• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * SLUB: A slab allocator that limits cache line use instead of queuing
3  * objects in per cpu and per node lists.
4  *
5  * The allocator synchronizes using per slab locks or atomic operatios
6  * and only uses a centralized lock to manage a pool of partial slabs.
7  *
8  * (C) 2007 SGI, Christoph Lameter
9  * (C) 2011 Linux Foundation, Christoph Lameter
10  */
11 
12 #include <linux/mm.h>
13 #include <linux/swap.h> /* struct reclaim_state */
14 #include <linux/module.h>
15 #include <linux/bit_spinlock.h>
16 #include <linux/interrupt.h>
17 #include <linux/bitops.h>
18 #include <linux/slab.h>
19 #include "slab.h"
20 #include <linux/proc_fs.h>
21 #include <linux/notifier.h>
22 #include <linux/seq_file.h>
23 #include <linux/kasan.h>
24 #include <linux/kmemcheck.h>
25 #include <linux/cpu.h>
26 #include <linux/cpuset.h>
27 #include <linux/mempolicy.h>
28 #include <linux/ctype.h>
29 #include <linux/debugobjects.h>
30 #include <linux/kallsyms.h>
31 #include <linux/memory.h>
32 #include <linux/math64.h>
33 #include <linux/fault-inject.h>
34 #include <linux/stacktrace.h>
35 #include <linux/prefetch.h>
36 #include <linux/memcontrol.h>
37 
38 #include <trace/events/kmem.h>
39 
40 #include "internal.h"
41 
42 /*
43  * Lock order:
44  *   1. slab_mutex (Global Mutex)
45  *   2. node->list_lock
46  *   3. slab_lock(page) (Only on some arches and for debugging)
47  *
48  *   slab_mutex
49  *
50  *   The role of the slab_mutex is to protect the list of all the slabs
51  *   and to synchronize major metadata changes to slab cache structures.
52  *
53  *   The slab_lock is only used for debugging and on arches that do not
54  *   have the ability to do a cmpxchg_double. It only protects the second
55  *   double word in the page struct. Meaning
56  *	A. page->freelist	-> List of object free in a page
57  *	B. page->counters	-> Counters of objects
58  *	C. page->frozen		-> frozen state
59  *
60  *   If a slab is frozen then it is exempt from list management. It is not
61  *   on any list. The processor that froze the slab is the one who can
62  *   perform list operations on the page. Other processors may put objects
63  *   onto the freelist but the processor that froze the slab is the only
64  *   one that can retrieve the objects from the page's freelist.
65  *
66  *   The list_lock protects the partial and full list on each node and
67  *   the partial slab counter. If taken then no new slabs may be added or
68  *   removed from the lists nor make the number of partial slabs be modified.
69  *   (Note that the total number of slabs is an atomic value that may be
70  *   modified without taking the list lock).
71  *
72  *   The list_lock is a centralized lock and thus we avoid taking it as
73  *   much as possible. As long as SLUB does not have to handle partial
74  *   slabs, operations can continue without any centralized lock. F.e.
75  *   allocating a long series of objects that fill up slabs does not require
76  *   the list lock.
77  *   Interrupts are disabled during allocation and deallocation in order to
78  *   make the slab allocator safe to use in the context of an irq. In addition
79  *   interrupts are disabled to ensure that the processor does not change
80  *   while handling per_cpu slabs, due to kernel preemption.
81  *
82  * SLUB assigns one slab for allocation to each processor.
83  * Allocations only occur from these slabs called cpu slabs.
84  *
85  * Slabs with free elements are kept on a partial list and during regular
86  * operations no list for full slabs is used. If an object in a full slab is
87  * freed then the slab will show up again on the partial lists.
88  * We track full slabs for debugging purposes though because otherwise we
89  * cannot scan all objects.
90  *
91  * Slabs are freed when they become empty. Teardown and setup is
92  * minimal so we rely on the page allocators per cpu caches for
93  * fast frees and allocs.
94  *
95  * Overloading of page flags that are otherwise used for LRU management.
96  *
97  * PageActive 		The slab is frozen and exempt from list processing.
98  * 			This means that the slab is dedicated to a purpose
99  * 			such as satisfying allocations for a specific
100  * 			processor. Objects may be freed in the slab while
101  * 			it is frozen but slab_free will then skip the usual
102  * 			list operations. It is up to the processor holding
103  * 			the slab to integrate the slab into the slab lists
104  * 			when the slab is no longer needed.
105  *
106  * 			One use of this flag is to mark slabs that are
107  * 			used for allocations. Then such a slab becomes a cpu
108  * 			slab. The cpu slab may be equipped with an additional
109  * 			freelist that allows lockless access to
110  * 			free objects in addition to the regular freelist
111  * 			that requires the slab lock.
112  *
113  * PageError		Slab requires special handling due to debug
114  * 			options set. This moves	slab handling out of
115  * 			the fast path and disables lockless freelists.
116  */
117 
kmem_cache_debug(struct kmem_cache * s)118 static inline int kmem_cache_debug(struct kmem_cache *s)
119 {
120 #ifdef CONFIG_SLUB_DEBUG
121 	return unlikely(s->flags & SLAB_DEBUG_FLAGS);
122 #else
123 	return 0;
124 #endif
125 }
126 
fixup_red_left(struct kmem_cache * s,void * p)127 void *fixup_red_left(struct kmem_cache *s, void *p)
128 {
129 	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE)
130 		p += s->red_left_pad;
131 
132 	return p;
133 }
134 
kmem_cache_has_cpu_partial(struct kmem_cache * s)135 static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
136 {
137 #ifdef CONFIG_SLUB_CPU_PARTIAL
138 	return !kmem_cache_debug(s);
139 #else
140 	return false;
141 #endif
142 }
143 
144 /*
145  * Issues still to be resolved:
146  *
147  * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
148  *
149  * - Variable sizing of the per node arrays
150  */
151 
152 /* Enable to test recovery from slab corruption on boot */
153 #undef SLUB_RESILIENCY_TEST
154 
155 /* Enable to log cmpxchg failures */
156 #undef SLUB_DEBUG_CMPXCHG
157 
158 /*
159  * Mininum number of partial slabs. These will be left on the partial
160  * lists even if they are empty. kmem_cache_shrink may reclaim them.
161  */
162 #define MIN_PARTIAL 5
163 
164 /*
165  * Maximum number of desirable partial slabs.
166  * The existence of more partial slabs makes kmem_cache_shrink
167  * sort the partial list by the number of objects in use.
168  */
169 #define MAX_PARTIAL 10
170 
171 #define DEBUG_DEFAULT_FLAGS (SLAB_CONSISTENCY_CHECKS | SLAB_RED_ZONE | \
172 				SLAB_POISON | SLAB_STORE_USER)
173 
174 /*
175  * These debug flags cannot use CMPXCHG because there might be consistency
176  * issues when checking or reading debug information
177  */
178 #define SLAB_NO_CMPXCHG (SLAB_CONSISTENCY_CHECKS | SLAB_STORE_USER | \
179 				SLAB_TRACE)
180 
181 
182 /*
183  * Debugging flags that require metadata to be stored in the slab.  These get
184  * disabled when slub_debug=O is used and a cache's min order increases with
185  * metadata.
186  */
187 #define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
188 
189 #define OO_SHIFT	16
190 #define OO_MASK		((1 << OO_SHIFT) - 1)
191 #define MAX_OBJS_PER_PAGE	32767 /* since page.objects is u15 */
192 
193 /* Internal SLUB flags */
194 #define __OBJECT_POISON		0x80000000UL /* Poison object */
195 #define __CMPXCHG_DOUBLE	0x40000000UL /* Use cmpxchg_double */
196 
197 /*
198  * Tracking user of a slab.
199  */
200 #define TRACK_ADDRS_COUNT 16
201 struct track {
202 	unsigned long addr;	/* Called from address */
203 #ifdef CONFIG_STACKTRACE
204 	unsigned long addrs[TRACK_ADDRS_COUNT];	/* Called from address */
205 #endif
206 	int cpu;		/* Was running on cpu */
207 	int pid;		/* Pid context */
208 	unsigned long when;	/* When did the operation occur */
209 };
210 
211 enum track_item { TRACK_ALLOC, TRACK_FREE };
212 
213 #ifdef CONFIG_SYSFS
214 static int sysfs_slab_add(struct kmem_cache *);
215 static int sysfs_slab_alias(struct kmem_cache *, const char *);
216 static void memcg_propagate_slab_attrs(struct kmem_cache *s);
217 #else
sysfs_slab_add(struct kmem_cache * s)218 static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
sysfs_slab_alias(struct kmem_cache * s,const char * p)219 static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
220 							{ return 0; }
memcg_propagate_slab_attrs(struct kmem_cache * s)221 static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
222 #endif
223 
stat(const struct kmem_cache * s,enum stat_item si)224 static inline void stat(const struct kmem_cache *s, enum stat_item si)
225 {
226 #ifdef CONFIG_SLUB_STATS
227 	/*
228 	 * The rmw is racy on a preemptible kernel but this is acceptable, so
229 	 * avoid this_cpu_add()'s irq-disable overhead.
230 	 */
231 	raw_cpu_inc(s->cpu_slab->stat[si]);
232 #endif
233 }
234 
235 /********************************************************************
236  * 			Core slab cache functions
237  *******************************************************************/
238 
get_freepointer(struct kmem_cache * s,void * object)239 static inline void *get_freepointer(struct kmem_cache *s, void *object)
240 {
241 	return *(void **)(object + s->offset);
242 }
243 
prefetch_freepointer(const struct kmem_cache * s,void * object)244 static void prefetch_freepointer(const struct kmem_cache *s, void *object)
245 {
246 	prefetch(object + s->offset);
247 }
248 
get_freepointer_safe(struct kmem_cache * s,void * object)249 static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
250 {
251 	void *p;
252 
253 	if (!debug_pagealloc_enabled())
254 		return get_freepointer(s, object);
255 
256 	probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
257 	return p;
258 }
259 
set_freepointer(struct kmem_cache * s,void * object,void * fp)260 static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
261 {
262 	*(void **)(object + s->offset) = fp;
263 }
264 
265 /* Loop over all objects in a slab */
266 #define for_each_object(__p, __s, __addr, __objects) \
267 	for (__p = fixup_red_left(__s, __addr); \
268 		__p < (__addr) + (__objects) * (__s)->size; \
269 		__p += (__s)->size)
270 
271 #define for_each_object_idx(__p, __idx, __s, __addr, __objects) \
272 	for (__p = fixup_red_left(__s, __addr), __idx = 1; \
273 		__idx <= __objects; \
274 		__p += (__s)->size, __idx++)
275 
276 /* Determine object index from a given position */
slab_index(void * p,struct kmem_cache * s,void * addr)277 static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
278 {
279 	return (p - addr) / s->size;
280 }
281 
order_objects(int order,unsigned long size,int reserved)282 static inline int order_objects(int order, unsigned long size, int reserved)
283 {
284 	return ((PAGE_SIZE << order) - reserved) / size;
285 }
286 
oo_make(int order,unsigned long size,int reserved)287 static inline struct kmem_cache_order_objects oo_make(int order,
288 		unsigned long size, int reserved)
289 {
290 	struct kmem_cache_order_objects x = {
291 		(order << OO_SHIFT) + order_objects(order, size, reserved)
292 	};
293 
294 	return x;
295 }
296 
oo_order(struct kmem_cache_order_objects x)297 static inline int oo_order(struct kmem_cache_order_objects x)
298 {
299 	return x.x >> OO_SHIFT;
300 }
301 
oo_objects(struct kmem_cache_order_objects x)302 static inline int oo_objects(struct kmem_cache_order_objects x)
303 {
304 	return x.x & OO_MASK;
305 }
306 
307 /*
308  * Per slab locking using the pagelock
309  */
slab_lock(struct page * page)310 static __always_inline void slab_lock(struct page *page)
311 {
312 	VM_BUG_ON_PAGE(PageTail(page), page);
313 	bit_spin_lock(PG_locked, &page->flags);
314 }
315 
slab_unlock(struct page * page)316 static __always_inline void slab_unlock(struct page *page)
317 {
318 	VM_BUG_ON_PAGE(PageTail(page), page);
319 	__bit_spin_unlock(PG_locked, &page->flags);
320 }
321 
set_page_slub_counters(struct page * page,unsigned long counters_new)322 static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
323 {
324 	struct page tmp;
325 	tmp.counters = counters_new;
326 	/*
327 	 * page->counters can cover frozen/inuse/objects as well
328 	 * as page->_refcount.  If we assign to ->counters directly
329 	 * we run the risk of losing updates to page->_refcount, so
330 	 * be careful and only assign to the fields we need.
331 	 */
332 	page->frozen  = tmp.frozen;
333 	page->inuse   = tmp.inuse;
334 	page->objects = tmp.objects;
335 }
336 
337 /* Interrupts must be disabled (for the fallback code to work right) */
__cmpxchg_double_slab(struct kmem_cache * s,struct page * page,void * freelist_old,unsigned long counters_old,void * freelist_new,unsigned long counters_new,const char * n)338 static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
339 		void *freelist_old, unsigned long counters_old,
340 		void *freelist_new, unsigned long counters_new,
341 		const char *n)
342 {
343 	VM_BUG_ON(!irqs_disabled());
344 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
345     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
346 	if (s->flags & __CMPXCHG_DOUBLE) {
347 		if (cmpxchg_double(&page->freelist, &page->counters,
348 				   freelist_old, counters_old,
349 				   freelist_new, counters_new))
350 			return true;
351 	} else
352 #endif
353 	{
354 		slab_lock(page);
355 		if (page->freelist == freelist_old &&
356 					page->counters == counters_old) {
357 			page->freelist = freelist_new;
358 			set_page_slub_counters(page, counters_new);
359 			slab_unlock(page);
360 			return true;
361 		}
362 		slab_unlock(page);
363 	}
364 
365 	cpu_relax();
366 	stat(s, CMPXCHG_DOUBLE_FAIL);
367 
368 #ifdef SLUB_DEBUG_CMPXCHG
369 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
370 #endif
371 
372 	return false;
373 }
374 
cmpxchg_double_slab(struct kmem_cache * s,struct page * page,void * freelist_old,unsigned long counters_old,void * freelist_new,unsigned long counters_new,const char * n)375 static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
376 		void *freelist_old, unsigned long counters_old,
377 		void *freelist_new, unsigned long counters_new,
378 		const char *n)
379 {
380 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
381     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
382 	if (s->flags & __CMPXCHG_DOUBLE) {
383 		if (cmpxchg_double(&page->freelist, &page->counters,
384 				   freelist_old, counters_old,
385 				   freelist_new, counters_new))
386 			return true;
387 	} else
388 #endif
389 	{
390 		unsigned long flags;
391 
392 		local_irq_save(flags);
393 		slab_lock(page);
394 		if (page->freelist == freelist_old &&
395 					page->counters == counters_old) {
396 			page->freelist = freelist_new;
397 			set_page_slub_counters(page, counters_new);
398 			slab_unlock(page);
399 			local_irq_restore(flags);
400 			return true;
401 		}
402 		slab_unlock(page);
403 		local_irq_restore(flags);
404 	}
405 
406 	cpu_relax();
407 	stat(s, CMPXCHG_DOUBLE_FAIL);
408 
409 #ifdef SLUB_DEBUG_CMPXCHG
410 	pr_info("%s %s: cmpxchg double redo ", n, s->name);
411 #endif
412 
413 	return false;
414 }
415 
416 #ifdef CONFIG_SLUB_DEBUG
417 /*
418  * Determine a map of object in use on a page.
419  *
420  * Node listlock must be held to guarantee that the page does
421  * not vanish from under us.
422  */
get_map(struct kmem_cache * s,struct page * page,unsigned long * map)423 static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
424 {
425 	void *p;
426 	void *addr = page_address(page);
427 
428 	for (p = page->freelist; p; p = get_freepointer(s, p))
429 		set_bit(slab_index(p, s, addr), map);
430 }
431 
size_from_object(struct kmem_cache * s)432 static inline int size_from_object(struct kmem_cache *s)
433 {
434 	if (s->flags & SLAB_RED_ZONE)
435 		return s->size - s->red_left_pad;
436 
437 	return s->size;
438 }
439 
restore_red_left(struct kmem_cache * s,void * p)440 static inline void *restore_red_left(struct kmem_cache *s, void *p)
441 {
442 	if (s->flags & SLAB_RED_ZONE)
443 		p -= s->red_left_pad;
444 
445 	return p;
446 }
447 
448 /*
449  * Debug settings:
450  */
451 #if defined(CONFIG_SLUB_DEBUG_ON)
452 static int slub_debug = DEBUG_DEFAULT_FLAGS;
453 #else
454 static int slub_debug;
455 #endif
456 
457 static char *slub_debug_slabs;
458 static int disable_higher_order_debug;
459 
460 /*
461  * slub is about to manipulate internal object metadata.  This memory lies
462  * outside the range of the allocated object, so accessing it would normally
463  * be reported by kasan as a bounds error.  metadata_access_enable() is used
464  * to tell kasan that these accesses are OK.
465  */
metadata_access_enable(void)466 static inline void metadata_access_enable(void)
467 {
468 	kasan_disable_current();
469 }
470 
metadata_access_disable(void)471 static inline void metadata_access_disable(void)
472 {
473 	kasan_enable_current();
474 }
475 
476 /*
477  * Object debugging
478  */
479 
480 /* Verify that a pointer has an address that is valid within a slab page */
check_valid_pointer(struct kmem_cache * s,struct page * page,void * object)481 static inline int check_valid_pointer(struct kmem_cache *s,
482 				struct page *page, void *object)
483 {
484 	void *base;
485 
486 	if (!object)
487 		return 1;
488 
489 	base = page_address(page);
490 	object = restore_red_left(s, object);
491 	if (object < base || object >= base + page->objects * s->size ||
492 		(object - base) % s->size) {
493 		return 0;
494 	}
495 
496 	return 1;
497 }
498 
print_section(char * level,char * text,u8 * addr,unsigned int length)499 static void print_section(char *level, char *text, u8 *addr,
500 			  unsigned int length)
501 {
502 	metadata_access_enable();
503 	print_hex_dump(level, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
504 			length, 1);
505 	metadata_access_disable();
506 }
507 
get_track(struct kmem_cache * s,void * object,enum track_item alloc)508 static struct track *get_track(struct kmem_cache *s, void *object,
509 	enum track_item alloc)
510 {
511 	struct track *p;
512 
513 	if (s->offset)
514 		p = object + s->offset + sizeof(void *);
515 	else
516 		p = object + s->inuse;
517 
518 	return p + alloc;
519 }
520 
set_track(struct kmem_cache * s,void * object,enum track_item alloc,unsigned long addr)521 static void set_track(struct kmem_cache *s, void *object,
522 			enum track_item alloc, unsigned long addr)
523 {
524 	struct track *p = get_track(s, object, alloc);
525 
526 	if (addr) {
527 #ifdef CONFIG_STACKTRACE
528 		struct stack_trace trace;
529 		int i;
530 
531 		trace.nr_entries = 0;
532 		trace.max_entries = TRACK_ADDRS_COUNT;
533 		trace.entries = p->addrs;
534 		trace.skip = 3;
535 		metadata_access_enable();
536 		save_stack_trace(&trace);
537 		metadata_access_disable();
538 
539 		/* See rant in lockdep.c */
540 		if (trace.nr_entries != 0 &&
541 		    trace.entries[trace.nr_entries - 1] == ULONG_MAX)
542 			trace.nr_entries--;
543 
544 		for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
545 			p->addrs[i] = 0;
546 #endif
547 		p->addr = addr;
548 		p->cpu = smp_processor_id();
549 		p->pid = current->pid;
550 		p->when = jiffies;
551 	} else
552 		memset(p, 0, sizeof(struct track));
553 }
554 
init_tracking(struct kmem_cache * s,void * object)555 static void init_tracking(struct kmem_cache *s, void *object)
556 {
557 	if (!(s->flags & SLAB_STORE_USER))
558 		return;
559 
560 	set_track(s, object, TRACK_FREE, 0UL);
561 	set_track(s, object, TRACK_ALLOC, 0UL);
562 }
563 
print_track(const char * s,struct track * t)564 static void print_track(const char *s, struct track *t)
565 {
566 	if (!t->addr)
567 		return;
568 
569 	pr_err("INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
570 	       s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
571 #ifdef CONFIG_STACKTRACE
572 	{
573 		int i;
574 		for (i = 0; i < TRACK_ADDRS_COUNT; i++)
575 			if (t->addrs[i])
576 				pr_err("\t%pS\n", (void *)t->addrs[i]);
577 			else
578 				break;
579 	}
580 #endif
581 }
582 
print_tracking(struct kmem_cache * s,void * object)583 static void print_tracking(struct kmem_cache *s, void *object)
584 {
585 	if (!(s->flags & SLAB_STORE_USER))
586 		return;
587 
588 	print_track("Allocated", get_track(s, object, TRACK_ALLOC));
589 	print_track("Freed", get_track(s, object, TRACK_FREE));
590 }
591 
print_page_info(struct page * page)592 static void print_page_info(struct page *page)
593 {
594 	pr_err("INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
595 	       page, page->objects, page->inuse, page->freelist, page->flags);
596 
597 }
598 
slab_bug(struct kmem_cache * s,char * fmt,...)599 static void slab_bug(struct kmem_cache *s, char *fmt, ...)
600 {
601 	struct va_format vaf;
602 	va_list args;
603 
604 	va_start(args, fmt);
605 	vaf.fmt = fmt;
606 	vaf.va = &args;
607 	pr_err("=============================================================================\n");
608 	pr_err("BUG %s (%s): %pV\n", s->name, print_tainted(), &vaf);
609 	pr_err("-----------------------------------------------------------------------------\n\n");
610 
611 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
612 	va_end(args);
613 }
614 
slab_fix(struct kmem_cache * s,char * fmt,...)615 static void slab_fix(struct kmem_cache *s, char *fmt, ...)
616 {
617 	struct va_format vaf;
618 	va_list args;
619 
620 	va_start(args, fmt);
621 	vaf.fmt = fmt;
622 	vaf.va = &args;
623 	pr_err("FIX %s: %pV\n", s->name, &vaf);
624 	va_end(args);
625 }
626 
print_trailer(struct kmem_cache * s,struct page * page,u8 * p)627 static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
628 {
629 	unsigned int off;	/* Offset of last byte */
630 	u8 *addr = page_address(page);
631 
632 	print_tracking(s, p);
633 
634 	print_page_info(page);
635 
636 	pr_err("INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
637 	       p, p - addr, get_freepointer(s, p));
638 
639 	if (s->flags & SLAB_RED_ZONE)
640 		print_section(KERN_ERR, "Redzone ", p - s->red_left_pad,
641 			      s->red_left_pad);
642 	else if (p > addr + 16)
643 		print_section(KERN_ERR, "Bytes b4 ", p - 16, 16);
644 
645 	print_section(KERN_ERR, "Object ", p,
646 		      min_t(unsigned long, s->object_size, PAGE_SIZE));
647 	if (s->flags & SLAB_RED_ZONE)
648 		print_section(KERN_ERR, "Redzone ", p + s->object_size,
649 			s->inuse - s->object_size);
650 
651 	if (s->offset)
652 		off = s->offset + sizeof(void *);
653 	else
654 		off = s->inuse;
655 
656 	if (s->flags & SLAB_STORE_USER)
657 		off += 2 * sizeof(struct track);
658 
659 	off += kasan_metadata_size(s);
660 
661 	if (off != size_from_object(s))
662 		/* Beginning of the filler is the free pointer */
663 		print_section(KERN_ERR, "Padding ", p + off,
664 			      size_from_object(s) - off);
665 
666 	dump_stack();
667 }
668 
object_err(struct kmem_cache * s,struct page * page,u8 * object,char * reason)669 void object_err(struct kmem_cache *s, struct page *page,
670 			u8 *object, char *reason)
671 {
672 	slab_bug(s, "%s", reason);
673 	print_trailer(s, page, object);
674 }
675 
slab_err(struct kmem_cache * s,struct page * page,const char * fmt,...)676 static void slab_err(struct kmem_cache *s, struct page *page,
677 			const char *fmt, ...)
678 {
679 	va_list args;
680 	char buf[100];
681 
682 	va_start(args, fmt);
683 	vsnprintf(buf, sizeof(buf), fmt, args);
684 	va_end(args);
685 	slab_bug(s, "%s", buf);
686 	print_page_info(page);
687 	dump_stack();
688 }
689 
init_object(struct kmem_cache * s,void * object,u8 val)690 static void init_object(struct kmem_cache *s, void *object, u8 val)
691 {
692 	u8 *p = object;
693 
694 	if (s->flags & SLAB_RED_ZONE)
695 		memset(p - s->red_left_pad, val, s->red_left_pad);
696 
697 	if (s->flags & __OBJECT_POISON) {
698 		memset(p, POISON_FREE, s->object_size - 1);
699 		p[s->object_size - 1] = POISON_END;
700 	}
701 
702 	if (s->flags & SLAB_RED_ZONE)
703 		memset(p + s->object_size, val, s->inuse - s->object_size);
704 }
705 
restore_bytes(struct kmem_cache * s,char * message,u8 data,void * from,void * to)706 static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
707 						void *from, void *to)
708 {
709 	slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
710 	memset(from, data, to - from);
711 }
712 
check_bytes_and_report(struct kmem_cache * s,struct page * page,u8 * object,char * what,u8 * start,unsigned int value,unsigned int bytes)713 static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
714 			u8 *object, char *what,
715 			u8 *start, unsigned int value, unsigned int bytes)
716 {
717 	u8 *fault;
718 	u8 *end;
719 
720 	metadata_access_enable();
721 	fault = memchr_inv(start, value, bytes);
722 	metadata_access_disable();
723 	if (!fault)
724 		return 1;
725 
726 	end = start + bytes;
727 	while (end > fault && end[-1] == value)
728 		end--;
729 
730 	slab_bug(s, "%s overwritten", what);
731 	pr_err("INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
732 					fault, end - 1, fault[0], value);
733 	print_trailer(s, page, object);
734 
735 	restore_bytes(s, what, value, fault, end);
736 	return 0;
737 }
738 
739 /*
740  * Object layout:
741  *
742  * object address
743  * 	Bytes of the object to be managed.
744  * 	If the freepointer may overlay the object then the free
745  * 	pointer is the first word of the object.
746  *
747  * 	Poisoning uses 0x6b (POISON_FREE) and the last byte is
748  * 	0xa5 (POISON_END)
749  *
750  * object + s->object_size
751  * 	Padding to reach word boundary. This is also used for Redzoning.
752  * 	Padding is extended by another word if Redzoning is enabled and
753  * 	object_size == inuse.
754  *
755  * 	We fill with 0xbb (RED_INACTIVE) for inactive objects and with
756  * 	0xcc (RED_ACTIVE) for objects in use.
757  *
758  * object + s->inuse
759  * 	Meta data starts here.
760  *
761  * 	A. Free pointer (if we cannot overwrite object on free)
762  * 	B. Tracking data for SLAB_STORE_USER
763  * 	C. Padding to reach required alignment boundary or at mininum
764  * 		one word if debugging is on to be able to detect writes
765  * 		before the word boundary.
766  *
767  *	Padding is done using 0x5a (POISON_INUSE)
768  *
769  * object + s->size
770  * 	Nothing is used beyond s->size.
771  *
772  * If slabcaches are merged then the object_size and inuse boundaries are mostly
773  * ignored. And therefore no slab options that rely on these boundaries
774  * may be used with merged slabcaches.
775  */
776 
check_pad_bytes(struct kmem_cache * s,struct page * page,u8 * p)777 static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
778 {
779 	unsigned long off = s->inuse;	/* The end of info */
780 
781 	if (s->offset)
782 		/* Freepointer is placed after the object. */
783 		off += sizeof(void *);
784 
785 	if (s->flags & SLAB_STORE_USER)
786 		/* We also have user information there */
787 		off += 2 * sizeof(struct track);
788 
789 	off += kasan_metadata_size(s);
790 
791 	if (size_from_object(s) == off)
792 		return 1;
793 
794 	return check_bytes_and_report(s, page, p, "Object padding",
795 			p + off, POISON_INUSE, size_from_object(s) - off);
796 }
797 
798 /* Check the pad bytes at the end of a slab page */
slab_pad_check(struct kmem_cache * s,struct page * page)799 static int slab_pad_check(struct kmem_cache *s, struct page *page)
800 {
801 	u8 *start;
802 	u8 *fault;
803 	u8 *end;
804 	int length;
805 	int remainder;
806 
807 	if (!(s->flags & SLAB_POISON))
808 		return 1;
809 
810 	start = page_address(page);
811 	length = (PAGE_SIZE << compound_order(page)) - s->reserved;
812 	end = start + length;
813 	remainder = length % s->size;
814 	if (!remainder)
815 		return 1;
816 
817 	metadata_access_enable();
818 	fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
819 	metadata_access_disable();
820 	if (!fault)
821 		return 1;
822 	while (end > fault && end[-1] == POISON_INUSE)
823 		end--;
824 
825 	slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
826 	print_section(KERN_ERR, "Padding ", end - remainder, remainder);
827 
828 	restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
829 	return 0;
830 }
831 
check_object(struct kmem_cache * s,struct page * page,void * object,u8 val)832 static int check_object(struct kmem_cache *s, struct page *page,
833 					void *object, u8 val)
834 {
835 	u8 *p = object;
836 	u8 *endobject = object + s->object_size;
837 
838 	if (s->flags & SLAB_RED_ZONE) {
839 		if (!check_bytes_and_report(s, page, object, "Redzone",
840 			object - s->red_left_pad, val, s->red_left_pad))
841 			return 0;
842 
843 		if (!check_bytes_and_report(s, page, object, "Redzone",
844 			endobject, val, s->inuse - s->object_size))
845 			return 0;
846 	} else {
847 		if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
848 			check_bytes_and_report(s, page, p, "Alignment padding",
849 				endobject, POISON_INUSE,
850 				s->inuse - s->object_size);
851 		}
852 	}
853 
854 	if (s->flags & SLAB_POISON) {
855 		if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
856 			(!check_bytes_and_report(s, page, p, "Poison", p,
857 					POISON_FREE, s->object_size - 1) ||
858 			 !check_bytes_and_report(s, page, p, "Poison",
859 				p + s->object_size - 1, POISON_END, 1)))
860 			return 0;
861 		/*
862 		 * check_pad_bytes cleans up on its own.
863 		 */
864 		check_pad_bytes(s, page, p);
865 	}
866 
867 	if (!s->offset && val == SLUB_RED_ACTIVE)
868 		/*
869 		 * Object and freepointer overlap. Cannot check
870 		 * freepointer while object is allocated.
871 		 */
872 		return 1;
873 
874 	/* Check free pointer validity */
875 	if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
876 		object_err(s, page, p, "Freepointer corrupt");
877 		/*
878 		 * No choice but to zap it and thus lose the remainder
879 		 * of the free objects in this slab. May cause
880 		 * another error because the object count is now wrong.
881 		 */
882 		set_freepointer(s, p, NULL);
883 		return 0;
884 	}
885 	return 1;
886 }
887 
check_slab(struct kmem_cache * s,struct page * page)888 static int check_slab(struct kmem_cache *s, struct page *page)
889 {
890 	int maxobj;
891 
892 	VM_BUG_ON(!irqs_disabled());
893 
894 	if (!PageSlab(page)) {
895 		slab_err(s, page, "Not a valid slab page");
896 		return 0;
897 	}
898 
899 	maxobj = order_objects(compound_order(page), s->size, s->reserved);
900 	if (page->objects > maxobj) {
901 		slab_err(s, page, "objects %u > max %u",
902 			page->objects, maxobj);
903 		return 0;
904 	}
905 	if (page->inuse > page->objects) {
906 		slab_err(s, page, "inuse %u > max %u",
907 			page->inuse, page->objects);
908 		return 0;
909 	}
910 	/* Slab_pad_check fixes things up after itself */
911 	slab_pad_check(s, page);
912 	return 1;
913 }
914 
915 /*
916  * Determine if a certain object on a page is on the freelist. Must hold the
917  * slab lock to guarantee that the chains are in a consistent state.
918  */
on_freelist(struct kmem_cache * s,struct page * page,void * search)919 static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
920 {
921 	int nr = 0;
922 	void *fp;
923 	void *object = NULL;
924 	int max_objects;
925 
926 	fp = page->freelist;
927 	while (fp && nr <= page->objects) {
928 		if (fp == search)
929 			return 1;
930 		if (!check_valid_pointer(s, page, fp)) {
931 			if (object) {
932 				object_err(s, page, object,
933 					"Freechain corrupt");
934 				set_freepointer(s, object, NULL);
935 			} else {
936 				slab_err(s, page, "Freepointer corrupt");
937 				page->freelist = NULL;
938 				page->inuse = page->objects;
939 				slab_fix(s, "Freelist cleared");
940 				return 0;
941 			}
942 			break;
943 		}
944 		object = fp;
945 		fp = get_freepointer(s, object);
946 		nr++;
947 	}
948 
949 	max_objects = order_objects(compound_order(page), s->size, s->reserved);
950 	if (max_objects > MAX_OBJS_PER_PAGE)
951 		max_objects = MAX_OBJS_PER_PAGE;
952 
953 	if (page->objects != max_objects) {
954 		slab_err(s, page, "Wrong number of objects. Found %d but should be %d",
955 			 page->objects, max_objects);
956 		page->objects = max_objects;
957 		slab_fix(s, "Number of objects adjusted.");
958 	}
959 	if (page->inuse != page->objects - nr) {
960 		slab_err(s, page, "Wrong object count. Counter is %d but counted were %d",
961 			 page->inuse, page->objects - nr);
962 		page->inuse = page->objects - nr;
963 		slab_fix(s, "Object count adjusted.");
964 	}
965 	return search == NULL;
966 }
967 
trace(struct kmem_cache * s,struct page * page,void * object,int alloc)968 static void trace(struct kmem_cache *s, struct page *page, void *object,
969 								int alloc)
970 {
971 	if (s->flags & SLAB_TRACE) {
972 		pr_info("TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
973 			s->name,
974 			alloc ? "alloc" : "free",
975 			object, page->inuse,
976 			page->freelist);
977 
978 		if (!alloc)
979 			print_section(KERN_INFO, "Object ", (void *)object,
980 					s->object_size);
981 
982 		dump_stack();
983 	}
984 }
985 
986 /*
987  * Tracking of fully allocated slabs for debugging purposes.
988  */
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)989 static void add_full(struct kmem_cache *s,
990 	struct kmem_cache_node *n, struct page *page)
991 {
992 	if (!(s->flags & SLAB_STORE_USER))
993 		return;
994 
995 	lockdep_assert_held(&n->list_lock);
996 	list_add(&page->lru, &n->full);
997 }
998 
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)999 static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
1000 {
1001 	if (!(s->flags & SLAB_STORE_USER))
1002 		return;
1003 
1004 	lockdep_assert_held(&n->list_lock);
1005 	list_del(&page->lru);
1006 }
1007 
1008 /* Tracking of the number of slabs for debugging purposes */
slabs_node(struct kmem_cache * s,int node)1009 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1010 {
1011 	struct kmem_cache_node *n = get_node(s, node);
1012 
1013 	return atomic_long_read(&n->nr_slabs);
1014 }
1015 
node_nr_slabs(struct kmem_cache_node * n)1016 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1017 {
1018 	return atomic_long_read(&n->nr_slabs);
1019 }
1020 
inc_slabs_node(struct kmem_cache * s,int node,int objects)1021 static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
1022 {
1023 	struct kmem_cache_node *n = get_node(s, node);
1024 
1025 	/*
1026 	 * May be called early in order to allocate a slab for the
1027 	 * kmem_cache_node structure. Solve the chicken-egg
1028 	 * dilemma by deferring the increment of the count during
1029 	 * bootstrap (see early_kmem_cache_node_alloc).
1030 	 */
1031 	if (likely(n)) {
1032 		atomic_long_inc(&n->nr_slabs);
1033 		atomic_long_add(objects, &n->total_objects);
1034 	}
1035 }
dec_slabs_node(struct kmem_cache * s,int node,int objects)1036 static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
1037 {
1038 	struct kmem_cache_node *n = get_node(s, node);
1039 
1040 	atomic_long_dec(&n->nr_slabs);
1041 	atomic_long_sub(objects, &n->total_objects);
1042 }
1043 
1044 /* Object debug checks for alloc/free paths */
setup_object_debug(struct kmem_cache * s,struct page * page,void * object)1045 static void setup_object_debug(struct kmem_cache *s, struct page *page,
1046 								void *object)
1047 {
1048 	if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1049 		return;
1050 
1051 	init_object(s, object, SLUB_RED_INACTIVE);
1052 	init_tracking(s, object);
1053 }
1054 
alloc_consistency_checks(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1055 static inline int alloc_consistency_checks(struct kmem_cache *s,
1056 					struct page *page,
1057 					void *object, unsigned long addr)
1058 {
1059 	if (!check_slab(s, page))
1060 		return 0;
1061 
1062 	if (!check_valid_pointer(s, page, object)) {
1063 		object_err(s, page, object, "Freelist Pointer check fails");
1064 		return 0;
1065 	}
1066 
1067 	if (!check_object(s, page, object, SLUB_RED_INACTIVE))
1068 		return 0;
1069 
1070 	return 1;
1071 }
1072 
alloc_debug_processing(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1073 static noinline int alloc_debug_processing(struct kmem_cache *s,
1074 					struct page *page,
1075 					void *object, unsigned long addr)
1076 {
1077 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1078 		if (!alloc_consistency_checks(s, page, object, addr))
1079 			goto bad;
1080 	}
1081 
1082 	/* Success perform special debug activities for allocs */
1083 	if (s->flags & SLAB_STORE_USER)
1084 		set_track(s, object, TRACK_ALLOC, addr);
1085 	trace(s, page, object, 1);
1086 	init_object(s, object, SLUB_RED_ACTIVE);
1087 	return 1;
1088 
1089 bad:
1090 	if (PageSlab(page)) {
1091 		/*
1092 		 * If this is a slab page then lets do the best we can
1093 		 * to avoid issues in the future. Marking all objects
1094 		 * as used avoids touching the remaining objects.
1095 		 */
1096 		slab_fix(s, "Marking all objects used");
1097 		page->inuse = page->objects;
1098 		page->freelist = NULL;
1099 	}
1100 	return 0;
1101 }
1102 
free_consistency_checks(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1103 static inline int free_consistency_checks(struct kmem_cache *s,
1104 		struct page *page, void *object, unsigned long addr)
1105 {
1106 	if (!check_valid_pointer(s, page, object)) {
1107 		slab_err(s, page, "Invalid object pointer 0x%p", object);
1108 		return 0;
1109 	}
1110 
1111 	if (on_freelist(s, page, object)) {
1112 		object_err(s, page, object, "Object already free");
1113 		return 0;
1114 	}
1115 
1116 	if (!check_object(s, page, object, SLUB_RED_ACTIVE))
1117 		return 0;
1118 
1119 	if (unlikely(s != page->slab_cache)) {
1120 		if (!PageSlab(page)) {
1121 			slab_err(s, page, "Attempt to free object(0x%p) outside of slab",
1122 				 object);
1123 		} else if (!page->slab_cache) {
1124 			pr_err("SLUB <none>: no slab for object 0x%p.\n",
1125 			       object);
1126 			dump_stack();
1127 		} else
1128 			object_err(s, page, object,
1129 					"page slab pointer corrupt.");
1130 		return 0;
1131 	}
1132 	return 1;
1133 }
1134 
1135 /* Supports checking bulk free of a constructed freelist */
free_debug_processing(struct kmem_cache * s,struct page * page,void * head,void * tail,int bulk_cnt,unsigned long addr)1136 static noinline int free_debug_processing(
1137 	struct kmem_cache *s, struct page *page,
1138 	void *head, void *tail, int bulk_cnt,
1139 	unsigned long addr)
1140 {
1141 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1142 	void *object = head;
1143 	int cnt = 0;
1144 	unsigned long uninitialized_var(flags);
1145 	int ret = 0;
1146 
1147 	spin_lock_irqsave(&n->list_lock, flags);
1148 	slab_lock(page);
1149 
1150 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1151 		if (!check_slab(s, page))
1152 			goto out;
1153 	}
1154 
1155 next_object:
1156 	cnt++;
1157 
1158 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1159 		if (!free_consistency_checks(s, page, object, addr))
1160 			goto out;
1161 	}
1162 
1163 	if (s->flags & SLAB_STORE_USER)
1164 		set_track(s, object, TRACK_FREE, addr);
1165 	trace(s, page, object, 0);
1166 	/* Freepointer not overwritten by init_object(), SLAB_POISON moved it */
1167 	init_object(s, object, SLUB_RED_INACTIVE);
1168 
1169 	/* Reached end of constructed freelist yet? */
1170 	if (object != tail) {
1171 		object = get_freepointer(s, object);
1172 		goto next_object;
1173 	}
1174 	ret = 1;
1175 
1176 out:
1177 	if (cnt != bulk_cnt)
1178 		slab_err(s, page, "Bulk freelist count(%d) invalid(%d)\n",
1179 			 bulk_cnt, cnt);
1180 
1181 	slab_unlock(page);
1182 	spin_unlock_irqrestore(&n->list_lock, flags);
1183 	if (!ret)
1184 		slab_fix(s, "Object at 0x%p not freed", object);
1185 	return ret;
1186 }
1187 
setup_slub_debug(char * str)1188 static int __init setup_slub_debug(char *str)
1189 {
1190 	slub_debug = DEBUG_DEFAULT_FLAGS;
1191 	if (*str++ != '=' || !*str)
1192 		/*
1193 		 * No options specified. Switch on full debugging.
1194 		 */
1195 		goto out;
1196 
1197 	if (*str == ',')
1198 		/*
1199 		 * No options but restriction on slabs. This means full
1200 		 * debugging for slabs matching a pattern.
1201 		 */
1202 		goto check_slabs;
1203 
1204 	slub_debug = 0;
1205 	if (*str == '-')
1206 		/*
1207 		 * Switch off all debugging measures.
1208 		 */
1209 		goto out;
1210 
1211 	/*
1212 	 * Determine which debug features should be switched on
1213 	 */
1214 	for (; *str && *str != ','; str++) {
1215 		switch (tolower(*str)) {
1216 		case 'f':
1217 			slub_debug |= SLAB_CONSISTENCY_CHECKS;
1218 			break;
1219 		case 'z':
1220 			slub_debug |= SLAB_RED_ZONE;
1221 			break;
1222 		case 'p':
1223 			slub_debug |= SLAB_POISON;
1224 			break;
1225 		case 'u':
1226 			slub_debug |= SLAB_STORE_USER;
1227 			break;
1228 		case 't':
1229 			slub_debug |= SLAB_TRACE;
1230 			break;
1231 		case 'a':
1232 			slub_debug |= SLAB_FAILSLAB;
1233 			break;
1234 		case 'o':
1235 			/*
1236 			 * Avoid enabling debugging on caches if its minimum
1237 			 * order would increase as a result.
1238 			 */
1239 			disable_higher_order_debug = 1;
1240 			break;
1241 		default:
1242 			pr_err("slub_debug option '%c' unknown. skipped\n",
1243 			       *str);
1244 		}
1245 	}
1246 
1247 check_slabs:
1248 	if (*str == ',')
1249 		slub_debug_slabs = str + 1;
1250 out:
1251 	return 1;
1252 }
1253 
1254 __setup("slub_debug", setup_slub_debug);
1255 
kmem_cache_flags(unsigned long object_size,unsigned long flags,const char * name,void (* ctor)(void *))1256 unsigned long kmem_cache_flags(unsigned long object_size,
1257 	unsigned long flags, const char *name,
1258 	void (*ctor)(void *))
1259 {
1260 	/*
1261 	 * Enable debugging if selected on the kernel commandline.
1262 	 */
1263 	if (slub_debug && (!slub_debug_slabs || (name &&
1264 		!strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
1265 		flags |= slub_debug;
1266 
1267 	return flags;
1268 }
1269 #else /* !CONFIG_SLUB_DEBUG */
setup_object_debug(struct kmem_cache * s,struct page * page,void * object)1270 static inline void setup_object_debug(struct kmem_cache *s,
1271 			struct page *page, void *object) {}
1272 
alloc_debug_processing(struct kmem_cache * s,struct page * page,void * object,unsigned long addr)1273 static inline int alloc_debug_processing(struct kmem_cache *s,
1274 	struct page *page, void *object, unsigned long addr) { return 0; }
1275 
free_debug_processing(struct kmem_cache * s,struct page * page,void * head,void * tail,int bulk_cnt,unsigned long addr)1276 static inline int free_debug_processing(
1277 	struct kmem_cache *s, struct page *page,
1278 	void *head, void *tail, int bulk_cnt,
1279 	unsigned long addr) { return 0; }
1280 
slab_pad_check(struct kmem_cache * s,struct page * page)1281 static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1282 			{ return 1; }
check_object(struct kmem_cache * s,struct page * page,void * object,u8 val)1283 static inline int check_object(struct kmem_cache *s, struct page *page,
1284 			void *object, u8 val) { return 1; }
add_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1285 static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1286 					struct page *page) {}
remove_full(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page)1287 static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1288 					struct page *page) {}
kmem_cache_flags(unsigned long object_size,unsigned long flags,const char * name,void (* ctor)(void *))1289 unsigned long kmem_cache_flags(unsigned long object_size,
1290 	unsigned long flags, const char *name,
1291 	void (*ctor)(void *))
1292 {
1293 	return flags;
1294 }
1295 #define slub_debug 0
1296 
1297 #define disable_higher_order_debug 0
1298 
slabs_node(struct kmem_cache * s,int node)1299 static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1300 							{ return 0; }
node_nr_slabs(struct kmem_cache_node * n)1301 static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1302 							{ return 0; }
inc_slabs_node(struct kmem_cache * s,int node,int objects)1303 static inline void inc_slabs_node(struct kmem_cache *s, int node,
1304 							int objects) {}
dec_slabs_node(struct kmem_cache * s,int node,int objects)1305 static inline void dec_slabs_node(struct kmem_cache *s, int node,
1306 							int objects) {}
1307 
1308 #endif /* CONFIG_SLUB_DEBUG */
1309 
1310 /*
1311  * Hooks for other subsystems that check memory allocations. In a typical
1312  * production configuration these hooks all should produce no code at all.
1313  */
kmalloc_large_node_hook(void * ptr,size_t size,gfp_t flags)1314 static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1315 {
1316 	kmemleak_alloc(ptr, size, 1, flags);
1317 	kasan_kmalloc_large(ptr, size, flags);
1318 }
1319 
kfree_hook(const void * x)1320 static inline void kfree_hook(const void *x)
1321 {
1322 	kmemleak_free(x);
1323 	kasan_kfree_large(x);
1324 }
1325 
slab_free_hook(struct kmem_cache * s,void * x)1326 static inline void *slab_free_hook(struct kmem_cache *s, void *x)
1327 {
1328 	void *freeptr;
1329 
1330 	kmemleak_free_recursive(x, s->flags);
1331 
1332 	/*
1333 	 * Trouble is that we may no longer disable interrupts in the fast path
1334 	 * So in order to make the debug calls that expect irqs to be
1335 	 * disabled we need to disable interrupts temporarily.
1336 	 */
1337 #if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
1338 	{
1339 		unsigned long flags;
1340 
1341 		local_irq_save(flags);
1342 		kmemcheck_slab_free(s, x, s->object_size);
1343 		debug_check_no_locks_freed(x, s->object_size);
1344 		local_irq_restore(flags);
1345 	}
1346 #endif
1347 	if (!(s->flags & SLAB_DEBUG_OBJECTS))
1348 		debug_check_no_obj_freed(x, s->object_size);
1349 
1350 	freeptr = get_freepointer(s, x);
1351 	/*
1352 	 * kasan_slab_free() may put x into memory quarantine, delaying its
1353 	 * reuse. In this case the object's freelist pointer is changed.
1354 	 */
1355 	kasan_slab_free(s, x);
1356 	return freeptr;
1357 }
1358 
slab_free_freelist_hook(struct kmem_cache * s,void * head,void * tail)1359 static inline void slab_free_freelist_hook(struct kmem_cache *s,
1360 					   void *head, void *tail)
1361 {
1362 /*
1363  * Compiler cannot detect this function can be removed if slab_free_hook()
1364  * evaluates to nothing.  Thus, catch all relevant config debug options here.
1365  */
1366 #if defined(CONFIG_KMEMCHECK) ||		\
1367 	defined(CONFIG_LOCKDEP)	||		\
1368 	defined(CONFIG_DEBUG_KMEMLEAK) ||	\
1369 	defined(CONFIG_DEBUG_OBJECTS_FREE) ||	\
1370 	defined(CONFIG_KASAN)
1371 
1372 	void *object = head;
1373 	void *tail_obj = tail ? : head;
1374 	void *freeptr;
1375 
1376 	do {
1377 		freeptr = slab_free_hook(s, object);
1378 	} while ((object != tail_obj) && (object = freeptr));
1379 #endif
1380 }
1381 
setup_object(struct kmem_cache * s,struct page * page,void * object)1382 static void setup_object(struct kmem_cache *s, struct page *page,
1383 				void *object)
1384 {
1385 	setup_object_debug(s, page, object);
1386 	kasan_init_slab_obj(s, object);
1387 	if (unlikely(s->ctor)) {
1388 		kasan_unpoison_object_data(s, object);
1389 		s->ctor(object);
1390 		kasan_poison_object_data(s, object);
1391 	}
1392 }
1393 
1394 /*
1395  * Slab allocation and freeing
1396  */
alloc_slab_page(struct kmem_cache * s,gfp_t flags,int node,struct kmem_cache_order_objects oo)1397 static inline struct page *alloc_slab_page(struct kmem_cache *s,
1398 		gfp_t flags, int node, struct kmem_cache_order_objects oo)
1399 {
1400 	struct page *page;
1401 	int order = oo_order(oo);
1402 
1403 	flags |= __GFP_NOTRACK;
1404 
1405 	if (node == NUMA_NO_NODE)
1406 		page = alloc_pages(flags, order);
1407 	else
1408 		page = __alloc_pages_node(node, flags, order);
1409 
1410 	if (page && memcg_charge_slab(page, flags, order, s)) {
1411 		__free_pages(page, order);
1412 		page = NULL;
1413 	}
1414 
1415 	return page;
1416 }
1417 
1418 #ifdef CONFIG_SLAB_FREELIST_RANDOM
1419 /* Pre-initialize the random sequence cache */
init_cache_random_seq(struct kmem_cache * s)1420 static int init_cache_random_seq(struct kmem_cache *s)
1421 {
1422 	int err;
1423 	unsigned long i, count = oo_objects(s->oo);
1424 
1425 	/* Bailout if already initialised */
1426 	if (s->random_seq)
1427 		return 0;
1428 
1429 	err = cache_random_seq_create(s, count, GFP_KERNEL);
1430 	if (err) {
1431 		pr_err("SLUB: Unable to initialize free list for %s\n",
1432 			s->name);
1433 		return err;
1434 	}
1435 
1436 	/* Transform to an offset on the set of pages */
1437 	if (s->random_seq) {
1438 		for (i = 0; i < count; i++)
1439 			s->random_seq[i] *= s->size;
1440 	}
1441 	return 0;
1442 }
1443 
1444 /* Initialize each random sequence freelist per cache */
init_freelist_randomization(void)1445 static void __init init_freelist_randomization(void)
1446 {
1447 	struct kmem_cache *s;
1448 
1449 	mutex_lock(&slab_mutex);
1450 
1451 	list_for_each_entry(s, &slab_caches, list)
1452 		init_cache_random_seq(s);
1453 
1454 	mutex_unlock(&slab_mutex);
1455 }
1456 
1457 /* Get the next entry on the pre-computed freelist randomized */
next_freelist_entry(struct kmem_cache * s,struct page * page,unsigned long * pos,void * start,unsigned long page_limit,unsigned long freelist_count)1458 static void *next_freelist_entry(struct kmem_cache *s, struct page *page,
1459 				unsigned long *pos, void *start,
1460 				unsigned long page_limit,
1461 				unsigned long freelist_count)
1462 {
1463 	unsigned int idx;
1464 
1465 	/*
1466 	 * If the target page allocation failed, the number of objects on the
1467 	 * page might be smaller than the usual size defined by the cache.
1468 	 */
1469 	do {
1470 		idx = s->random_seq[*pos];
1471 		*pos += 1;
1472 		if (*pos >= freelist_count)
1473 			*pos = 0;
1474 	} while (unlikely(idx >= page_limit));
1475 
1476 	return (char *)start + idx;
1477 }
1478 
1479 /* Shuffle the single linked freelist based on a random pre-computed sequence */
shuffle_freelist(struct kmem_cache * s,struct page * page)1480 static bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1481 {
1482 	void *start;
1483 	void *cur;
1484 	void *next;
1485 	unsigned long idx, pos, page_limit, freelist_count;
1486 
1487 	if (page->objects < 2 || !s->random_seq)
1488 		return false;
1489 
1490 	freelist_count = oo_objects(s->oo);
1491 	pos = get_random_int() % freelist_count;
1492 
1493 	page_limit = page->objects * s->size;
1494 	start = fixup_red_left(s, page_address(page));
1495 
1496 	/* First entry is used as the base of the freelist */
1497 	cur = next_freelist_entry(s, page, &pos, start, page_limit,
1498 				freelist_count);
1499 	page->freelist = cur;
1500 
1501 	for (idx = 1; idx < page->objects; idx++) {
1502 		setup_object(s, page, cur);
1503 		next = next_freelist_entry(s, page, &pos, start, page_limit,
1504 			freelist_count);
1505 		set_freepointer(s, cur, next);
1506 		cur = next;
1507 	}
1508 	setup_object(s, page, cur);
1509 	set_freepointer(s, cur, NULL);
1510 
1511 	return true;
1512 }
1513 #else
init_cache_random_seq(struct kmem_cache * s)1514 static inline int init_cache_random_seq(struct kmem_cache *s)
1515 {
1516 	return 0;
1517 }
init_freelist_randomization(void)1518 static inline void init_freelist_randomization(void) { }
shuffle_freelist(struct kmem_cache * s,struct page * page)1519 static inline bool shuffle_freelist(struct kmem_cache *s, struct page *page)
1520 {
1521 	return false;
1522 }
1523 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
1524 
allocate_slab(struct kmem_cache * s,gfp_t flags,int node)1525 static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1526 {
1527 	struct page *page;
1528 	struct kmem_cache_order_objects oo = s->oo;
1529 	gfp_t alloc_gfp;
1530 	void *start, *p;
1531 	int idx, order;
1532 	bool shuffle;
1533 
1534 	flags &= gfp_allowed_mask;
1535 
1536 	if (gfpflags_allow_blocking(flags))
1537 		local_irq_enable();
1538 
1539 	flags |= s->allocflags;
1540 
1541 	/*
1542 	 * Let the initial higher-order allocation fail under memory pressure
1543 	 * so we fall-back to the minimum order allocation.
1544 	 */
1545 	alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1546 	if ((alloc_gfp & __GFP_DIRECT_RECLAIM) && oo_order(oo) > oo_order(s->min))
1547 		alloc_gfp = (alloc_gfp | __GFP_NOMEMALLOC) & ~(__GFP_RECLAIM|__GFP_NOFAIL);
1548 
1549 	page = alloc_slab_page(s, alloc_gfp, node, oo);
1550 	if (unlikely(!page)) {
1551 		oo = s->min;
1552 		alloc_gfp = flags;
1553 		/*
1554 		 * Allocation may have failed due to fragmentation.
1555 		 * Try a lower order alloc if possible
1556 		 */
1557 		page = alloc_slab_page(s, alloc_gfp, node, oo);
1558 		if (unlikely(!page))
1559 			goto out;
1560 		stat(s, ORDER_FALLBACK);
1561 	}
1562 
1563 	if (kmemcheck_enabled &&
1564 	    !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
1565 		int pages = 1 << oo_order(oo);
1566 
1567 		kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
1568 
1569 		/*
1570 		 * Objects from caches that have a constructor don't get
1571 		 * cleared when they're allocated, so we need to do it here.
1572 		 */
1573 		if (s->ctor)
1574 			kmemcheck_mark_uninitialized_pages(page, pages);
1575 		else
1576 			kmemcheck_mark_unallocated_pages(page, pages);
1577 	}
1578 
1579 	page->objects = oo_objects(oo);
1580 
1581 	order = compound_order(page);
1582 	page->slab_cache = s;
1583 	__SetPageSlab(page);
1584 	if (page_is_pfmemalloc(page))
1585 		SetPageSlabPfmemalloc(page);
1586 
1587 	start = page_address(page);
1588 
1589 	if (unlikely(s->flags & SLAB_POISON))
1590 		memset(start, POISON_INUSE, PAGE_SIZE << order);
1591 
1592 	kasan_poison_slab(page);
1593 
1594 	shuffle = shuffle_freelist(s, page);
1595 
1596 	if (!shuffle) {
1597 		for_each_object_idx(p, idx, s, start, page->objects) {
1598 			setup_object(s, page, p);
1599 			if (likely(idx < page->objects))
1600 				set_freepointer(s, p, p + s->size);
1601 			else
1602 				set_freepointer(s, p, NULL);
1603 		}
1604 		page->freelist = fixup_red_left(s, start);
1605 	}
1606 
1607 	page->inuse = page->objects;
1608 	page->frozen = 1;
1609 
1610 out:
1611 	if (gfpflags_allow_blocking(flags))
1612 		local_irq_disable();
1613 	if (!page)
1614 		return NULL;
1615 
1616 	mod_zone_page_state(page_zone(page),
1617 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1618 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1619 		1 << oo_order(oo));
1620 
1621 	inc_slabs_node(s, page_to_nid(page), page->objects);
1622 
1623 	return page;
1624 }
1625 
new_slab(struct kmem_cache * s,gfp_t flags,int node)1626 static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1627 {
1628 	if (unlikely(flags & GFP_SLAB_BUG_MASK)) {
1629 		gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1630 		flags &= ~GFP_SLAB_BUG_MASK;
1631 		pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1632 				invalid_mask, &invalid_mask, flags, &flags);
1633 	}
1634 
1635 	return allocate_slab(s,
1636 		flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
1637 }
1638 
__free_slab(struct kmem_cache * s,struct page * page)1639 static void __free_slab(struct kmem_cache *s, struct page *page)
1640 {
1641 	int order = compound_order(page);
1642 	int pages = 1 << order;
1643 
1644 	if (s->flags & SLAB_CONSISTENCY_CHECKS) {
1645 		void *p;
1646 
1647 		slab_pad_check(s, page);
1648 		for_each_object(p, s, page_address(page),
1649 						page->objects)
1650 			check_object(s, page, p, SLUB_RED_INACTIVE);
1651 	}
1652 
1653 	kmemcheck_free_shadow(page, compound_order(page));
1654 
1655 	mod_zone_page_state(page_zone(page),
1656 		(s->flags & SLAB_RECLAIM_ACCOUNT) ?
1657 		NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1658 		-pages);
1659 
1660 	__ClearPageSlabPfmemalloc(page);
1661 	__ClearPageSlab(page);
1662 
1663 	page_mapcount_reset(page);
1664 	if (current->reclaim_state)
1665 		current->reclaim_state->reclaimed_slab += pages;
1666 	memcg_uncharge_slab(page, order, s);
1667 	__free_pages(page, order);
1668 }
1669 
1670 #define need_reserve_slab_rcu						\
1671 	(sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1672 
rcu_free_slab(struct rcu_head * h)1673 static void rcu_free_slab(struct rcu_head *h)
1674 {
1675 	struct page *page;
1676 
1677 	if (need_reserve_slab_rcu)
1678 		page = virt_to_head_page(h);
1679 	else
1680 		page = container_of((struct list_head *)h, struct page, lru);
1681 
1682 	__free_slab(page->slab_cache, page);
1683 }
1684 
free_slab(struct kmem_cache * s,struct page * page)1685 static void free_slab(struct kmem_cache *s, struct page *page)
1686 {
1687 	if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1688 		struct rcu_head *head;
1689 
1690 		if (need_reserve_slab_rcu) {
1691 			int order = compound_order(page);
1692 			int offset = (PAGE_SIZE << order) - s->reserved;
1693 
1694 			VM_BUG_ON(s->reserved != sizeof(*head));
1695 			head = page_address(page) + offset;
1696 		} else {
1697 			head = &page->rcu_head;
1698 		}
1699 
1700 		call_rcu(head, rcu_free_slab);
1701 	} else
1702 		__free_slab(s, page);
1703 }
1704 
discard_slab(struct kmem_cache * s,struct page * page)1705 static void discard_slab(struct kmem_cache *s, struct page *page)
1706 {
1707 	dec_slabs_node(s, page_to_nid(page), page->objects);
1708 	free_slab(s, page);
1709 }
1710 
1711 /*
1712  * Management of partially allocated slabs.
1713  */
1714 static inline void
__add_partial(struct kmem_cache_node * n,struct page * page,int tail)1715 __add_partial(struct kmem_cache_node *n, struct page *page, int tail)
1716 {
1717 	n->nr_partial++;
1718 	if (tail == DEACTIVATE_TO_TAIL)
1719 		list_add_tail(&page->lru, &n->partial);
1720 	else
1721 		list_add(&page->lru, &n->partial);
1722 }
1723 
add_partial(struct kmem_cache_node * n,struct page * page,int tail)1724 static inline void add_partial(struct kmem_cache_node *n,
1725 				struct page *page, int tail)
1726 {
1727 	lockdep_assert_held(&n->list_lock);
1728 	__add_partial(n, page, tail);
1729 }
1730 
remove_partial(struct kmem_cache_node * n,struct page * page)1731 static inline void remove_partial(struct kmem_cache_node *n,
1732 					struct page *page)
1733 {
1734 	lockdep_assert_held(&n->list_lock);
1735 	list_del(&page->lru);
1736 	n->nr_partial--;
1737 }
1738 
1739 /*
1740  * Remove slab from the partial list, freeze it and
1741  * return the pointer to the freelist.
1742  *
1743  * Returns a list of objects or NULL if it fails.
1744  */
acquire_slab(struct kmem_cache * s,struct kmem_cache_node * n,struct page * page,int mode,int * objects)1745 static inline void *acquire_slab(struct kmem_cache *s,
1746 		struct kmem_cache_node *n, struct page *page,
1747 		int mode, int *objects)
1748 {
1749 	void *freelist;
1750 	unsigned long counters;
1751 	struct page new;
1752 
1753 	lockdep_assert_held(&n->list_lock);
1754 
1755 	/*
1756 	 * Zap the freelist and set the frozen bit.
1757 	 * The old freelist is the list of objects for the
1758 	 * per cpu allocation list.
1759 	 */
1760 	freelist = page->freelist;
1761 	counters = page->counters;
1762 	new.counters = counters;
1763 	*objects = new.objects - new.inuse;
1764 	if (mode) {
1765 		new.inuse = page->objects;
1766 		new.freelist = NULL;
1767 	} else {
1768 		new.freelist = freelist;
1769 	}
1770 
1771 	VM_BUG_ON(new.frozen);
1772 	new.frozen = 1;
1773 
1774 	if (!__cmpxchg_double_slab(s, page,
1775 			freelist, counters,
1776 			new.freelist, new.counters,
1777 			"acquire_slab"))
1778 		return NULL;
1779 
1780 	remove_partial(n, page);
1781 	WARN_ON(!freelist);
1782 	return freelist;
1783 }
1784 
1785 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
1786 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
1787 
1788 /*
1789  * Try to allocate a partial slab from a specific node.
1790  */
get_partial_node(struct kmem_cache * s,struct kmem_cache_node * n,struct kmem_cache_cpu * c,gfp_t flags)1791 static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1792 				struct kmem_cache_cpu *c, gfp_t flags)
1793 {
1794 	struct page *page, *page2;
1795 	void *object = NULL;
1796 	int available = 0;
1797 	int objects;
1798 
1799 	/*
1800 	 * Racy check. If we mistakenly see no partial slabs then we
1801 	 * just allocate an empty slab. If we mistakenly try to get a
1802 	 * partial slab and there is none available then get_partials()
1803 	 * will return NULL.
1804 	 */
1805 	if (!n || !n->nr_partial)
1806 		return NULL;
1807 
1808 	spin_lock(&n->list_lock);
1809 	list_for_each_entry_safe(page, page2, &n->partial, lru) {
1810 		void *t;
1811 
1812 		if (!pfmemalloc_match(page, flags))
1813 			continue;
1814 
1815 		t = acquire_slab(s, n, page, object == NULL, &objects);
1816 		if (!t)
1817 			break;
1818 
1819 		available += objects;
1820 		if (!object) {
1821 			c->page = page;
1822 			stat(s, ALLOC_FROM_PARTIAL);
1823 			object = t;
1824 		} else {
1825 			put_cpu_partial(s, page, 0);
1826 			stat(s, CPU_PARTIAL_NODE);
1827 		}
1828 		if (!kmem_cache_has_cpu_partial(s)
1829 			|| available > s->cpu_partial / 2)
1830 			break;
1831 
1832 	}
1833 	spin_unlock(&n->list_lock);
1834 	return object;
1835 }
1836 
1837 /*
1838  * Get a page from somewhere. Search in increasing NUMA distances.
1839  */
get_any_partial(struct kmem_cache * s,gfp_t flags,struct kmem_cache_cpu * c)1840 static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
1841 		struct kmem_cache_cpu *c)
1842 {
1843 #ifdef CONFIG_NUMA
1844 	struct zonelist *zonelist;
1845 	struct zoneref *z;
1846 	struct zone *zone;
1847 	enum zone_type high_zoneidx = gfp_zone(flags);
1848 	void *object;
1849 	unsigned int cpuset_mems_cookie;
1850 
1851 	/*
1852 	 * The defrag ratio allows a configuration of the tradeoffs between
1853 	 * inter node defragmentation and node local allocations. A lower
1854 	 * defrag_ratio increases the tendency to do local allocations
1855 	 * instead of attempting to obtain partial slabs from other nodes.
1856 	 *
1857 	 * If the defrag_ratio is set to 0 then kmalloc() always
1858 	 * returns node local objects. If the ratio is higher then kmalloc()
1859 	 * may return off node objects because partial slabs are obtained
1860 	 * from other nodes and filled up.
1861 	 *
1862 	 * If /sys/kernel/slab/xx/remote_node_defrag_ratio is set to 100
1863 	 * (which makes defrag_ratio = 1000) then every (well almost)
1864 	 * allocation will first attempt to defrag slab caches on other nodes.
1865 	 * This means scanning over all nodes to look for partial slabs which
1866 	 * may be expensive if we do it every time we are trying to find a slab
1867 	 * with available objects.
1868 	 */
1869 	if (!s->remote_node_defrag_ratio ||
1870 			get_cycles() % 1024 > s->remote_node_defrag_ratio)
1871 		return NULL;
1872 
1873 	do {
1874 		cpuset_mems_cookie = read_mems_allowed_begin();
1875 		zonelist = node_zonelist(mempolicy_slab_node(), flags);
1876 		for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1877 			struct kmem_cache_node *n;
1878 
1879 			n = get_node(s, zone_to_nid(zone));
1880 
1881 			if (n && cpuset_zone_allowed(zone, flags) &&
1882 					n->nr_partial > s->min_partial) {
1883 				object = get_partial_node(s, n, c, flags);
1884 				if (object) {
1885 					/*
1886 					 * Don't check read_mems_allowed_retry()
1887 					 * here - if mems_allowed was updated in
1888 					 * parallel, that was a harmless race
1889 					 * between allocation and the cpuset
1890 					 * update
1891 					 */
1892 					return object;
1893 				}
1894 			}
1895 		}
1896 	} while (read_mems_allowed_retry(cpuset_mems_cookie));
1897 #endif
1898 	return NULL;
1899 }
1900 
1901 /*
1902  * Get a partial page, lock it and return it.
1903  */
get_partial(struct kmem_cache * s,gfp_t flags,int node,struct kmem_cache_cpu * c)1904 static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
1905 		struct kmem_cache_cpu *c)
1906 {
1907 	void *object;
1908 	int searchnode = node;
1909 
1910 	if (node == NUMA_NO_NODE)
1911 		searchnode = numa_mem_id();
1912 	else if (!node_present_pages(node))
1913 		searchnode = node_to_mem_node(node);
1914 
1915 	object = get_partial_node(s, get_node(s, searchnode), c, flags);
1916 	if (object || node != NUMA_NO_NODE)
1917 		return object;
1918 
1919 	return get_any_partial(s, flags, c);
1920 }
1921 
1922 #ifdef CONFIG_PREEMPT
1923 /*
1924  * Calculate the next globally unique transaction for disambiguiation
1925  * during cmpxchg. The transactions start with the cpu number and are then
1926  * incremented by CONFIG_NR_CPUS.
1927  */
1928 #define TID_STEP  roundup_pow_of_two(CONFIG_NR_CPUS)
1929 #else
1930 /*
1931  * No preemption supported therefore also no need to check for
1932  * different cpus.
1933  */
1934 #define TID_STEP 1
1935 #endif
1936 
next_tid(unsigned long tid)1937 static inline unsigned long next_tid(unsigned long tid)
1938 {
1939 	return tid + TID_STEP;
1940 }
1941 
tid_to_cpu(unsigned long tid)1942 static inline unsigned int tid_to_cpu(unsigned long tid)
1943 {
1944 	return tid % TID_STEP;
1945 }
1946 
tid_to_event(unsigned long tid)1947 static inline unsigned long tid_to_event(unsigned long tid)
1948 {
1949 	return tid / TID_STEP;
1950 }
1951 
init_tid(int cpu)1952 static inline unsigned int init_tid(int cpu)
1953 {
1954 	return cpu;
1955 }
1956 
note_cmpxchg_failure(const char * n,const struct kmem_cache * s,unsigned long tid)1957 static inline void note_cmpxchg_failure(const char *n,
1958 		const struct kmem_cache *s, unsigned long tid)
1959 {
1960 #ifdef SLUB_DEBUG_CMPXCHG
1961 	unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1962 
1963 	pr_info("%s %s: cmpxchg redo ", n, s->name);
1964 
1965 #ifdef CONFIG_PREEMPT
1966 	if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1967 		pr_warn("due to cpu change %d -> %d\n",
1968 			tid_to_cpu(tid), tid_to_cpu(actual_tid));
1969 	else
1970 #endif
1971 	if (tid_to_event(tid) != tid_to_event(actual_tid))
1972 		pr_warn("due to cpu running other code. Event %ld->%ld\n",
1973 			tid_to_event(tid), tid_to_event(actual_tid));
1974 	else
1975 		pr_warn("for unknown reason: actual=%lx was=%lx target=%lx\n",
1976 			actual_tid, tid, next_tid(tid));
1977 #endif
1978 	stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
1979 }
1980 
init_kmem_cache_cpus(struct kmem_cache * s)1981 static void init_kmem_cache_cpus(struct kmem_cache *s)
1982 {
1983 	int cpu;
1984 
1985 	for_each_possible_cpu(cpu)
1986 		per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
1987 }
1988 
1989 /*
1990  * Remove the cpu slab
1991  */
deactivate_slab(struct kmem_cache * s,struct page * page,void * freelist)1992 static void deactivate_slab(struct kmem_cache *s, struct page *page,
1993 				void *freelist)
1994 {
1995 	enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
1996 	struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1997 	int lock = 0;
1998 	enum slab_modes l = M_NONE, m = M_NONE;
1999 	void *nextfree;
2000 	int tail = DEACTIVATE_TO_HEAD;
2001 	struct page new;
2002 	struct page old;
2003 
2004 	if (page->freelist) {
2005 		stat(s, DEACTIVATE_REMOTE_FREES);
2006 		tail = DEACTIVATE_TO_TAIL;
2007 	}
2008 
2009 	/*
2010 	 * Stage one: Free all available per cpu objects back
2011 	 * to the page freelist while it is still frozen. Leave the
2012 	 * last one.
2013 	 *
2014 	 * There is no need to take the list->lock because the page
2015 	 * is still frozen.
2016 	 */
2017 	while (freelist && (nextfree = get_freepointer(s, freelist))) {
2018 		void *prior;
2019 		unsigned long counters;
2020 
2021 		do {
2022 			prior = page->freelist;
2023 			counters = page->counters;
2024 			set_freepointer(s, freelist, prior);
2025 			new.counters = counters;
2026 			new.inuse--;
2027 			VM_BUG_ON(!new.frozen);
2028 
2029 		} while (!__cmpxchg_double_slab(s, page,
2030 			prior, counters,
2031 			freelist, new.counters,
2032 			"drain percpu freelist"));
2033 
2034 		freelist = nextfree;
2035 	}
2036 
2037 	/*
2038 	 * Stage two: Ensure that the page is unfrozen while the
2039 	 * list presence reflects the actual number of objects
2040 	 * during unfreeze.
2041 	 *
2042 	 * We setup the list membership and then perform a cmpxchg
2043 	 * with the count. If there is a mismatch then the page
2044 	 * is not unfrozen but the page is on the wrong list.
2045 	 *
2046 	 * Then we restart the process which may have to remove
2047 	 * the page from the list that we just put it on again
2048 	 * because the number of objects in the slab may have
2049 	 * changed.
2050 	 */
2051 redo:
2052 
2053 	old.freelist = page->freelist;
2054 	old.counters = page->counters;
2055 	VM_BUG_ON(!old.frozen);
2056 
2057 	/* Determine target state of the slab */
2058 	new.counters = old.counters;
2059 	if (freelist) {
2060 		new.inuse--;
2061 		set_freepointer(s, freelist, old.freelist);
2062 		new.freelist = freelist;
2063 	} else
2064 		new.freelist = old.freelist;
2065 
2066 	new.frozen = 0;
2067 
2068 	if (!new.inuse && n->nr_partial >= s->min_partial)
2069 		m = M_FREE;
2070 	else if (new.freelist) {
2071 		m = M_PARTIAL;
2072 		if (!lock) {
2073 			lock = 1;
2074 			/*
2075 			 * Taking the spinlock removes the possiblity
2076 			 * that acquire_slab() will see a slab page that
2077 			 * is frozen
2078 			 */
2079 			spin_lock(&n->list_lock);
2080 		}
2081 	} else {
2082 		m = M_FULL;
2083 		if (kmem_cache_debug(s) && !lock) {
2084 			lock = 1;
2085 			/*
2086 			 * This also ensures that the scanning of full
2087 			 * slabs from diagnostic functions will not see
2088 			 * any frozen slabs.
2089 			 */
2090 			spin_lock(&n->list_lock);
2091 		}
2092 	}
2093 
2094 	if (l != m) {
2095 
2096 		if (l == M_PARTIAL)
2097 
2098 			remove_partial(n, page);
2099 
2100 		else if (l == M_FULL)
2101 
2102 			remove_full(s, n, page);
2103 
2104 		if (m == M_PARTIAL) {
2105 
2106 			add_partial(n, page, tail);
2107 			stat(s, tail);
2108 
2109 		} else if (m == M_FULL) {
2110 
2111 			stat(s, DEACTIVATE_FULL);
2112 			add_full(s, n, page);
2113 
2114 		}
2115 	}
2116 
2117 	l = m;
2118 	if (!__cmpxchg_double_slab(s, page,
2119 				old.freelist, old.counters,
2120 				new.freelist, new.counters,
2121 				"unfreezing slab"))
2122 		goto redo;
2123 
2124 	if (lock)
2125 		spin_unlock(&n->list_lock);
2126 
2127 	if (m == M_FREE) {
2128 		stat(s, DEACTIVATE_EMPTY);
2129 		discard_slab(s, page);
2130 		stat(s, FREE_SLAB);
2131 	}
2132 }
2133 
2134 /*
2135  * Unfreeze all the cpu partial slabs.
2136  *
2137  * This function must be called with interrupts disabled
2138  * for the cpu using c (or some other guarantee must be there
2139  * to guarantee no concurrent accesses).
2140  */
unfreeze_partials(struct kmem_cache * s,struct kmem_cache_cpu * c)2141 static void unfreeze_partials(struct kmem_cache *s,
2142 		struct kmem_cache_cpu *c)
2143 {
2144 #ifdef CONFIG_SLUB_CPU_PARTIAL
2145 	struct kmem_cache_node *n = NULL, *n2 = NULL;
2146 	struct page *page, *discard_page = NULL;
2147 
2148 	while ((page = c->partial)) {
2149 		struct page new;
2150 		struct page old;
2151 
2152 		c->partial = page->next;
2153 
2154 		n2 = get_node(s, page_to_nid(page));
2155 		if (n != n2) {
2156 			if (n)
2157 				spin_unlock(&n->list_lock);
2158 
2159 			n = n2;
2160 			spin_lock(&n->list_lock);
2161 		}
2162 
2163 		do {
2164 
2165 			old.freelist = page->freelist;
2166 			old.counters = page->counters;
2167 			VM_BUG_ON(!old.frozen);
2168 
2169 			new.counters = old.counters;
2170 			new.freelist = old.freelist;
2171 
2172 			new.frozen = 0;
2173 
2174 		} while (!__cmpxchg_double_slab(s, page,
2175 				old.freelist, old.counters,
2176 				new.freelist, new.counters,
2177 				"unfreezing slab"));
2178 
2179 		if (unlikely(!new.inuse && n->nr_partial >= s->min_partial)) {
2180 			page->next = discard_page;
2181 			discard_page = page;
2182 		} else {
2183 			add_partial(n, page, DEACTIVATE_TO_TAIL);
2184 			stat(s, FREE_ADD_PARTIAL);
2185 		}
2186 	}
2187 
2188 	if (n)
2189 		spin_unlock(&n->list_lock);
2190 
2191 	while (discard_page) {
2192 		page = discard_page;
2193 		discard_page = discard_page->next;
2194 
2195 		stat(s, DEACTIVATE_EMPTY);
2196 		discard_slab(s, page);
2197 		stat(s, FREE_SLAB);
2198 	}
2199 #endif
2200 }
2201 
2202 /*
2203  * Put a page that was just frozen (in __slab_free) into a partial page
2204  * slot if available. This is done without interrupts disabled and without
2205  * preemption disabled. The cmpxchg is racy and may put the partial page
2206  * onto a random cpus partial slot.
2207  *
2208  * If we did not find a slot then simply move all the partials to the
2209  * per node partial list.
2210  */
put_cpu_partial(struct kmem_cache * s,struct page * page,int drain)2211 static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
2212 {
2213 #ifdef CONFIG_SLUB_CPU_PARTIAL
2214 	struct page *oldpage;
2215 	int pages;
2216 	int pobjects;
2217 
2218 	preempt_disable();
2219 	do {
2220 		pages = 0;
2221 		pobjects = 0;
2222 		oldpage = this_cpu_read(s->cpu_slab->partial);
2223 
2224 		if (oldpage) {
2225 			pobjects = oldpage->pobjects;
2226 			pages = oldpage->pages;
2227 			if (drain && pobjects > s->cpu_partial) {
2228 				unsigned long flags;
2229 				/*
2230 				 * partial array is full. Move the existing
2231 				 * set to the per node partial list.
2232 				 */
2233 				local_irq_save(flags);
2234 				unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2235 				local_irq_restore(flags);
2236 				oldpage = NULL;
2237 				pobjects = 0;
2238 				pages = 0;
2239 				stat(s, CPU_PARTIAL_DRAIN);
2240 			}
2241 		}
2242 
2243 		pages++;
2244 		pobjects += page->objects - page->inuse;
2245 
2246 		page->pages = pages;
2247 		page->pobjects = pobjects;
2248 		page->next = oldpage;
2249 
2250 	} while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2251 								!= oldpage);
2252 	if (unlikely(!s->cpu_partial)) {
2253 		unsigned long flags;
2254 
2255 		local_irq_save(flags);
2256 		unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
2257 		local_irq_restore(flags);
2258 	}
2259 	preempt_enable();
2260 #endif
2261 }
2262 
flush_slab(struct kmem_cache * s,struct kmem_cache_cpu * c)2263 static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
2264 {
2265 	stat(s, CPUSLAB_FLUSH);
2266 	deactivate_slab(s, c->page, c->freelist);
2267 
2268 	c->tid = next_tid(c->tid);
2269 	c->page = NULL;
2270 	c->freelist = NULL;
2271 }
2272 
2273 /*
2274  * Flush cpu slab.
2275  *
2276  * Called from IPI handler with interrupts disabled.
2277  */
__flush_cpu_slab(struct kmem_cache * s,int cpu)2278 static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
2279 {
2280 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2281 
2282 	if (likely(c)) {
2283 		if (c->page)
2284 			flush_slab(s, c);
2285 
2286 		unfreeze_partials(s, c);
2287 	}
2288 }
2289 
flush_cpu_slab(void * d)2290 static void flush_cpu_slab(void *d)
2291 {
2292 	struct kmem_cache *s = d;
2293 
2294 	__flush_cpu_slab(s, smp_processor_id());
2295 }
2296 
has_cpu_slab(int cpu,void * info)2297 static bool has_cpu_slab(int cpu, void *info)
2298 {
2299 	struct kmem_cache *s = info;
2300 	struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2301 
2302 	return c->page || c->partial;
2303 }
2304 
flush_all(struct kmem_cache * s)2305 static void flush_all(struct kmem_cache *s)
2306 {
2307 	on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
2308 }
2309 
2310 /*
2311  * Use the cpu notifier to insure that the cpu slabs are flushed when
2312  * necessary.
2313  */
slub_cpu_dead(unsigned int cpu)2314 static int slub_cpu_dead(unsigned int cpu)
2315 {
2316 	struct kmem_cache *s;
2317 	unsigned long flags;
2318 
2319 	mutex_lock(&slab_mutex);
2320 	list_for_each_entry(s, &slab_caches, list) {
2321 		local_irq_save(flags);
2322 		__flush_cpu_slab(s, cpu);
2323 		local_irq_restore(flags);
2324 	}
2325 	mutex_unlock(&slab_mutex);
2326 	return 0;
2327 }
2328 
2329 /*
2330  * Check if the objects in a per cpu structure fit numa
2331  * locality expectations.
2332  */
node_match(struct page * page,int node)2333 static inline int node_match(struct page *page, int node)
2334 {
2335 #ifdef CONFIG_NUMA
2336 	if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
2337 		return 0;
2338 #endif
2339 	return 1;
2340 }
2341 
2342 #ifdef CONFIG_SLUB_DEBUG
count_free(struct page * page)2343 static int count_free(struct page *page)
2344 {
2345 	return page->objects - page->inuse;
2346 }
2347 
node_nr_objs(struct kmem_cache_node * n)2348 static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2349 {
2350 	return atomic_long_read(&n->total_objects);
2351 }
2352 #endif /* CONFIG_SLUB_DEBUG */
2353 
2354 #if defined(CONFIG_SLUB_DEBUG) || defined(CONFIG_SYSFS)
count_partial(struct kmem_cache_node * n,int (* get_count)(struct page *))2355 static unsigned long count_partial(struct kmem_cache_node *n,
2356 					int (*get_count)(struct page *))
2357 {
2358 	unsigned long flags;
2359 	unsigned long x = 0;
2360 	struct page *page;
2361 
2362 	spin_lock_irqsave(&n->list_lock, flags);
2363 	list_for_each_entry(page, &n->partial, lru)
2364 		x += get_count(page);
2365 	spin_unlock_irqrestore(&n->list_lock, flags);
2366 	return x;
2367 }
2368 #endif /* CONFIG_SLUB_DEBUG || CONFIG_SYSFS */
2369 
2370 static noinline void
slab_out_of_memory(struct kmem_cache * s,gfp_t gfpflags,int nid)2371 slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2372 {
2373 #ifdef CONFIG_SLUB_DEBUG
2374 	static DEFINE_RATELIMIT_STATE(slub_oom_rs, DEFAULT_RATELIMIT_INTERVAL,
2375 				      DEFAULT_RATELIMIT_BURST);
2376 	int node;
2377 	struct kmem_cache_node *n;
2378 
2379 	if ((gfpflags & __GFP_NOWARN) || !__ratelimit(&slub_oom_rs))
2380 		return;
2381 
2382 	pr_warn("SLUB: Unable to allocate memory on node %d, gfp=%#x(%pGg)\n",
2383 		nid, gfpflags, &gfpflags);
2384 	pr_warn("  cache: %s, object size: %d, buffer size: %d, default order: %d, min order: %d\n",
2385 		s->name, s->object_size, s->size, oo_order(s->oo),
2386 		oo_order(s->min));
2387 
2388 	if (oo_order(s->min) > get_order(s->object_size))
2389 		pr_warn("  %s debugging increased min order, use slub_debug=O to disable.\n",
2390 			s->name);
2391 
2392 	for_each_kmem_cache_node(s, node, n) {
2393 		unsigned long nr_slabs;
2394 		unsigned long nr_objs;
2395 		unsigned long nr_free;
2396 
2397 		nr_free  = count_partial(n, count_free);
2398 		nr_slabs = node_nr_slabs(n);
2399 		nr_objs  = node_nr_objs(n);
2400 
2401 		pr_warn("  node %d: slabs: %ld, objs: %ld, free: %ld\n",
2402 			node, nr_slabs, nr_objs, nr_free);
2403 	}
2404 #endif
2405 }
2406 
new_slab_objects(struct kmem_cache * s,gfp_t flags,int node,struct kmem_cache_cpu ** pc)2407 static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2408 			int node, struct kmem_cache_cpu **pc)
2409 {
2410 	void *freelist;
2411 	struct kmem_cache_cpu *c = *pc;
2412 	struct page *page;
2413 
2414 	freelist = get_partial(s, flags, node, c);
2415 
2416 	if (freelist)
2417 		return freelist;
2418 
2419 	page = new_slab(s, flags, node);
2420 	if (page) {
2421 		c = raw_cpu_ptr(s->cpu_slab);
2422 		if (c->page)
2423 			flush_slab(s, c);
2424 
2425 		/*
2426 		 * No other reference to the page yet so we can
2427 		 * muck around with it freely without cmpxchg
2428 		 */
2429 		freelist = page->freelist;
2430 		page->freelist = NULL;
2431 
2432 		stat(s, ALLOC_SLAB);
2433 		c->page = page;
2434 		*pc = c;
2435 	} else
2436 		freelist = NULL;
2437 
2438 	return freelist;
2439 }
2440 
pfmemalloc_match(struct page * page,gfp_t gfpflags)2441 static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2442 {
2443 	if (unlikely(PageSlabPfmemalloc(page)))
2444 		return gfp_pfmemalloc_allowed(gfpflags);
2445 
2446 	return true;
2447 }
2448 
2449 /*
2450  * Check the page->freelist of a page and either transfer the freelist to the
2451  * per cpu freelist or deactivate the page.
2452  *
2453  * The page is still frozen if the return value is not NULL.
2454  *
2455  * If this function returns NULL then the page has been unfrozen.
2456  *
2457  * This function must be called with interrupt disabled.
2458  */
get_freelist(struct kmem_cache * s,struct page * page)2459 static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2460 {
2461 	struct page new;
2462 	unsigned long counters;
2463 	void *freelist;
2464 
2465 	do {
2466 		freelist = page->freelist;
2467 		counters = page->counters;
2468 
2469 		new.counters = counters;
2470 		VM_BUG_ON(!new.frozen);
2471 
2472 		new.inuse = page->objects;
2473 		new.frozen = freelist != NULL;
2474 
2475 	} while (!__cmpxchg_double_slab(s, page,
2476 		freelist, counters,
2477 		NULL, new.counters,
2478 		"get_freelist"));
2479 
2480 	return freelist;
2481 }
2482 
2483 /*
2484  * Slow path. The lockless freelist is empty or we need to perform
2485  * debugging duties.
2486  *
2487  * Processing is still very fast if new objects have been freed to the
2488  * regular freelist. In that case we simply take over the regular freelist
2489  * as the lockless freelist and zap the regular freelist.
2490  *
2491  * If that is not working then we fall back to the partial lists. We take the
2492  * first element of the freelist as the object to allocate now and move the
2493  * rest of the freelist to the lockless freelist.
2494  *
2495  * And if we were unable to get a new slab from the partial slab lists then
2496  * we need to allocate a new slab. This is the slowest path since it involves
2497  * a call to the page allocator and the setup of a new slab.
2498  *
2499  * Version of __slab_alloc to use when we know that interrupts are
2500  * already disabled (which is the case for bulk allocation).
2501  */
___slab_alloc(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,struct kmem_cache_cpu * c)2502 static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2503 			  unsigned long addr, struct kmem_cache_cpu *c)
2504 {
2505 	void *freelist;
2506 	struct page *page;
2507 
2508 	page = c->page;
2509 	if (!page)
2510 		goto new_slab;
2511 redo:
2512 
2513 	if (unlikely(!node_match(page, node))) {
2514 		int searchnode = node;
2515 
2516 		if (node != NUMA_NO_NODE && !node_present_pages(node))
2517 			searchnode = node_to_mem_node(node);
2518 
2519 		if (unlikely(!node_match(page, searchnode))) {
2520 			stat(s, ALLOC_NODE_MISMATCH);
2521 			deactivate_slab(s, page, c->freelist);
2522 			c->page = NULL;
2523 			c->freelist = NULL;
2524 			goto new_slab;
2525 		}
2526 	}
2527 
2528 	/*
2529 	 * By rights, we should be searching for a slab page that was
2530 	 * PFMEMALLOC but right now, we are losing the pfmemalloc
2531 	 * information when the page leaves the per-cpu allocator
2532 	 */
2533 	if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2534 		deactivate_slab(s, page, c->freelist);
2535 		c->page = NULL;
2536 		c->freelist = NULL;
2537 		goto new_slab;
2538 	}
2539 
2540 	/* must check again c->freelist in case of cpu migration or IRQ */
2541 	freelist = c->freelist;
2542 	if (freelist)
2543 		goto load_freelist;
2544 
2545 	freelist = get_freelist(s, page);
2546 
2547 	if (!freelist) {
2548 		c->page = NULL;
2549 		stat(s, DEACTIVATE_BYPASS);
2550 		goto new_slab;
2551 	}
2552 
2553 	stat(s, ALLOC_REFILL);
2554 
2555 load_freelist:
2556 	/*
2557 	 * freelist is pointing to the list of objects to be used.
2558 	 * page is pointing to the page from which the objects are obtained.
2559 	 * That page must be frozen for per cpu allocations to work.
2560 	 */
2561 	VM_BUG_ON(!c->page->frozen);
2562 	c->freelist = get_freepointer(s, freelist);
2563 	c->tid = next_tid(c->tid);
2564 	return freelist;
2565 
2566 new_slab:
2567 
2568 	if (c->partial) {
2569 		page = c->page = c->partial;
2570 		c->partial = page->next;
2571 		stat(s, CPU_PARTIAL_ALLOC);
2572 		c->freelist = NULL;
2573 		goto redo;
2574 	}
2575 
2576 	freelist = new_slab_objects(s, gfpflags, node, &c);
2577 
2578 	if (unlikely(!freelist)) {
2579 		slab_out_of_memory(s, gfpflags, node);
2580 		return NULL;
2581 	}
2582 
2583 	page = c->page;
2584 	if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
2585 		goto load_freelist;
2586 
2587 	/* Only entered in the debug case */
2588 	if (kmem_cache_debug(s) &&
2589 			!alloc_debug_processing(s, page, freelist, addr))
2590 		goto new_slab;	/* Slab failed checks. Next slab needed */
2591 
2592 	deactivate_slab(s, page, get_freepointer(s, freelist));
2593 	c->page = NULL;
2594 	c->freelist = NULL;
2595 	return freelist;
2596 }
2597 
2598 /*
2599  * Another one that disabled interrupt and compensates for possible
2600  * cpu changes by refetching the per cpu area pointer.
2601  */
__slab_alloc(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr,struct kmem_cache_cpu * c)2602 static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2603 			  unsigned long addr, struct kmem_cache_cpu *c)
2604 {
2605 	void *p;
2606 	unsigned long flags;
2607 
2608 	local_irq_save(flags);
2609 #ifdef CONFIG_PREEMPT
2610 	/*
2611 	 * We may have been preempted and rescheduled on a different
2612 	 * cpu before disabling interrupts. Need to reload cpu area
2613 	 * pointer.
2614 	 */
2615 	c = this_cpu_ptr(s->cpu_slab);
2616 #endif
2617 
2618 	p = ___slab_alloc(s, gfpflags, node, addr, c);
2619 	local_irq_restore(flags);
2620 	return p;
2621 }
2622 
2623 /*
2624  * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2625  * have the fastpath folded into their functions. So no function call
2626  * overhead for requests that can be satisfied on the fastpath.
2627  *
2628  * The fastpath works by first checking if the lockless freelist can be used.
2629  * If not then __slab_alloc is called for slow processing.
2630  *
2631  * Otherwise we can simply pick the next object from the lockless free list.
2632  */
slab_alloc_node(struct kmem_cache * s,gfp_t gfpflags,int node,unsigned long addr)2633 static __always_inline void *slab_alloc_node(struct kmem_cache *s,
2634 		gfp_t gfpflags, int node, unsigned long addr)
2635 {
2636 	void *object;
2637 	struct kmem_cache_cpu *c;
2638 	struct page *page;
2639 	unsigned long tid;
2640 
2641 	s = slab_pre_alloc_hook(s, gfpflags);
2642 	if (!s)
2643 		return NULL;
2644 redo:
2645 	/*
2646 	 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2647 	 * enabled. We may switch back and forth between cpus while
2648 	 * reading from one cpu area. That does not matter as long
2649 	 * as we end up on the original cpu again when doing the cmpxchg.
2650 	 *
2651 	 * We should guarantee that tid and kmem_cache are retrieved on
2652 	 * the same cpu. It could be different if CONFIG_PREEMPT so we need
2653 	 * to check if it is matched or not.
2654 	 */
2655 	do {
2656 		tid = this_cpu_read(s->cpu_slab->tid);
2657 		c = raw_cpu_ptr(s->cpu_slab);
2658 	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2659 		 unlikely(tid != READ_ONCE(c->tid)));
2660 
2661 	/*
2662 	 * Irqless object alloc/free algorithm used here depends on sequence
2663 	 * of fetching cpu_slab's data. tid should be fetched before anything
2664 	 * on c to guarantee that object and page associated with previous tid
2665 	 * won't be used with current tid. If we fetch tid first, object and
2666 	 * page could be one associated with next tid and our alloc/free
2667 	 * request will be failed. In this case, we will retry. So, no problem.
2668 	 */
2669 	barrier();
2670 
2671 	/*
2672 	 * The transaction ids are globally unique per cpu and per operation on
2673 	 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2674 	 * occurs on the right processor and that there was no operation on the
2675 	 * linked list in between.
2676 	 */
2677 
2678 	object = c->freelist;
2679 	page = c->page;
2680 	if (unlikely(!object || !node_match(page, node))) {
2681 		object = __slab_alloc(s, gfpflags, node, addr, c);
2682 		stat(s, ALLOC_SLOWPATH);
2683 	} else {
2684 		void *next_object = get_freepointer_safe(s, object);
2685 
2686 		/*
2687 		 * The cmpxchg will only match if there was no additional
2688 		 * operation and if we are on the right processor.
2689 		 *
2690 		 * The cmpxchg does the following atomically (without lock
2691 		 * semantics!)
2692 		 * 1. Relocate first pointer to the current per cpu area.
2693 		 * 2. Verify that tid and freelist have not been changed
2694 		 * 3. If they were not changed replace tid and freelist
2695 		 *
2696 		 * Since this is without lock semantics the protection is only
2697 		 * against code executing on this cpu *not* from access by
2698 		 * other cpus.
2699 		 */
2700 		if (unlikely(!this_cpu_cmpxchg_double(
2701 				s->cpu_slab->freelist, s->cpu_slab->tid,
2702 				object, tid,
2703 				next_object, next_tid(tid)))) {
2704 
2705 			note_cmpxchg_failure("slab_alloc", s, tid);
2706 			goto redo;
2707 		}
2708 		prefetch_freepointer(s, next_object);
2709 		stat(s, ALLOC_FASTPATH);
2710 	}
2711 
2712 	if (unlikely(gfpflags & __GFP_ZERO) && object)
2713 		memset(object, 0, s->object_size);
2714 
2715 	slab_post_alloc_hook(s, gfpflags, 1, &object);
2716 
2717 	return object;
2718 }
2719 
slab_alloc(struct kmem_cache * s,gfp_t gfpflags,unsigned long addr)2720 static __always_inline void *slab_alloc(struct kmem_cache *s,
2721 		gfp_t gfpflags, unsigned long addr)
2722 {
2723 	return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2724 }
2725 
kmem_cache_alloc(struct kmem_cache * s,gfp_t gfpflags)2726 void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2727 {
2728 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2729 
2730 	trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2731 				s->size, gfpflags);
2732 
2733 	return ret;
2734 }
2735 EXPORT_SYMBOL(kmem_cache_alloc);
2736 
2737 #ifdef CONFIG_TRACING
kmem_cache_alloc_trace(struct kmem_cache * s,gfp_t gfpflags,size_t size)2738 void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2739 {
2740 	void *ret = slab_alloc(s, gfpflags, _RET_IP_);
2741 	trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2742 	kasan_kmalloc(s, ret, size, gfpflags);
2743 	return ret;
2744 }
2745 EXPORT_SYMBOL(kmem_cache_alloc_trace);
2746 #endif
2747 
2748 #ifdef CONFIG_NUMA
kmem_cache_alloc_node(struct kmem_cache * s,gfp_t gfpflags,int node)2749 void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2750 {
2751 	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2752 
2753 	trace_kmem_cache_alloc_node(_RET_IP_, ret,
2754 				    s->object_size, s->size, gfpflags, node);
2755 
2756 	return ret;
2757 }
2758 EXPORT_SYMBOL(kmem_cache_alloc_node);
2759 
2760 #ifdef CONFIG_TRACING
kmem_cache_alloc_node_trace(struct kmem_cache * s,gfp_t gfpflags,int node,size_t size)2761 void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
2762 				    gfp_t gfpflags,
2763 				    int node, size_t size)
2764 {
2765 	void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
2766 
2767 	trace_kmalloc_node(_RET_IP_, ret,
2768 			   size, s->size, gfpflags, node);
2769 
2770 	kasan_kmalloc(s, ret, size, gfpflags);
2771 	return ret;
2772 }
2773 EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
2774 #endif
2775 #endif
2776 
2777 /*
2778  * Slow path handling. This may still be called frequently since objects
2779  * have a longer lifetime than the cpu slabs in most processing loads.
2780  *
2781  * So we still attempt to reduce cache line usage. Just take the slab
2782  * lock and free the item. If there is no additional partial page
2783  * handling required then we can return immediately.
2784  */
__slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)2785 static void __slab_free(struct kmem_cache *s, struct page *page,
2786 			void *head, void *tail, int cnt,
2787 			unsigned long addr)
2788 
2789 {
2790 	void *prior;
2791 	int was_frozen;
2792 	struct page new;
2793 	unsigned long counters;
2794 	struct kmem_cache_node *n = NULL;
2795 	unsigned long uninitialized_var(flags);
2796 
2797 	stat(s, FREE_SLOWPATH);
2798 
2799 	if (kmem_cache_debug(s) &&
2800 	    !free_debug_processing(s, page, head, tail, cnt, addr))
2801 		return;
2802 
2803 	do {
2804 		if (unlikely(n)) {
2805 			spin_unlock_irqrestore(&n->list_lock, flags);
2806 			n = NULL;
2807 		}
2808 		prior = page->freelist;
2809 		counters = page->counters;
2810 		set_freepointer(s, tail, prior);
2811 		new.counters = counters;
2812 		was_frozen = new.frozen;
2813 		new.inuse -= cnt;
2814 		if ((!new.inuse || !prior) && !was_frozen) {
2815 
2816 			if (kmem_cache_has_cpu_partial(s) && !prior) {
2817 
2818 				/*
2819 				 * Slab was on no list before and will be
2820 				 * partially empty
2821 				 * We can defer the list move and instead
2822 				 * freeze it.
2823 				 */
2824 				new.frozen = 1;
2825 
2826 			} else { /* Needs to be taken off a list */
2827 
2828 				n = get_node(s, page_to_nid(page));
2829 				/*
2830 				 * Speculatively acquire the list_lock.
2831 				 * If the cmpxchg does not succeed then we may
2832 				 * drop the list_lock without any processing.
2833 				 *
2834 				 * Otherwise the list_lock will synchronize with
2835 				 * other processors updating the list of slabs.
2836 				 */
2837 				spin_lock_irqsave(&n->list_lock, flags);
2838 
2839 			}
2840 		}
2841 
2842 	} while (!cmpxchg_double_slab(s, page,
2843 		prior, counters,
2844 		head, new.counters,
2845 		"__slab_free"));
2846 
2847 	if (likely(!n)) {
2848 
2849 		/*
2850 		 * If we just froze the page then put it onto the
2851 		 * per cpu partial list.
2852 		 */
2853 		if (new.frozen && !was_frozen) {
2854 			put_cpu_partial(s, page, 1);
2855 			stat(s, CPU_PARTIAL_FREE);
2856 		}
2857 		/*
2858 		 * The list lock was not taken therefore no list
2859 		 * activity can be necessary.
2860 		 */
2861 		if (was_frozen)
2862 			stat(s, FREE_FROZEN);
2863 		return;
2864 	}
2865 
2866 	if (unlikely(!new.inuse && n->nr_partial >= s->min_partial))
2867 		goto slab_empty;
2868 
2869 	/*
2870 	 * Objects left in the slab. If it was not on the partial list before
2871 	 * then add it.
2872 	 */
2873 	if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2874 		if (kmem_cache_debug(s))
2875 			remove_full(s, n, page);
2876 		add_partial(n, page, DEACTIVATE_TO_TAIL);
2877 		stat(s, FREE_ADD_PARTIAL);
2878 	}
2879 	spin_unlock_irqrestore(&n->list_lock, flags);
2880 	return;
2881 
2882 slab_empty:
2883 	if (prior) {
2884 		/*
2885 		 * Slab on the partial list.
2886 		 */
2887 		remove_partial(n, page);
2888 		stat(s, FREE_REMOVE_PARTIAL);
2889 	} else {
2890 		/* Slab must be on the full list */
2891 		remove_full(s, n, page);
2892 	}
2893 
2894 	spin_unlock_irqrestore(&n->list_lock, flags);
2895 	stat(s, FREE_SLAB);
2896 	discard_slab(s, page);
2897 }
2898 
2899 /*
2900  * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2901  * can perform fastpath freeing without additional function calls.
2902  *
2903  * The fastpath is only possible if we are freeing to the current cpu slab
2904  * of this processor. This typically the case if we have just allocated
2905  * the item before.
2906  *
2907  * If fastpath is not possible then fall back to __slab_free where we deal
2908  * with all sorts of special processing.
2909  *
2910  * Bulk free of a freelist with several objects (all pointing to the
2911  * same page) possible by specifying head and tail ptr, plus objects
2912  * count (cnt). Bulk free indicated by tail pointer being set.
2913  */
do_slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)2914 static __always_inline void do_slab_free(struct kmem_cache *s,
2915 				struct page *page, void *head, void *tail,
2916 				int cnt, unsigned long addr)
2917 {
2918 	void *tail_obj = tail ? : head;
2919 	struct kmem_cache_cpu *c;
2920 	unsigned long tid;
2921 redo:
2922 	/*
2923 	 * Determine the currently cpus per cpu slab.
2924 	 * The cpu may change afterward. However that does not matter since
2925 	 * data is retrieved via this pointer. If we are on the same cpu
2926 	 * during the cmpxchg then the free will succeed.
2927 	 */
2928 	do {
2929 		tid = this_cpu_read(s->cpu_slab->tid);
2930 		c = raw_cpu_ptr(s->cpu_slab);
2931 	} while (IS_ENABLED(CONFIG_PREEMPT) &&
2932 		 unlikely(tid != READ_ONCE(c->tid)));
2933 
2934 	/* Same with comment on barrier() in slab_alloc_node() */
2935 	barrier();
2936 
2937 	if (likely(page == c->page)) {
2938 		set_freepointer(s, tail_obj, c->freelist);
2939 
2940 		if (unlikely(!this_cpu_cmpxchg_double(
2941 				s->cpu_slab->freelist, s->cpu_slab->tid,
2942 				c->freelist, tid,
2943 				head, next_tid(tid)))) {
2944 
2945 			note_cmpxchg_failure("slab_free", s, tid);
2946 			goto redo;
2947 		}
2948 		stat(s, FREE_FASTPATH);
2949 	} else
2950 		__slab_free(s, page, head, tail_obj, cnt, addr);
2951 
2952 }
2953 
slab_free(struct kmem_cache * s,struct page * page,void * head,void * tail,int cnt,unsigned long addr)2954 static __always_inline void slab_free(struct kmem_cache *s, struct page *page,
2955 				      void *head, void *tail, int cnt,
2956 				      unsigned long addr)
2957 {
2958 	slab_free_freelist_hook(s, head, tail);
2959 	/*
2960 	 * slab_free_freelist_hook() could have put the items into quarantine.
2961 	 * If so, no need to free them.
2962 	 */
2963 	if (s->flags & SLAB_KASAN && !(s->flags & SLAB_DESTROY_BY_RCU))
2964 		return;
2965 	do_slab_free(s, page, head, tail, cnt, addr);
2966 }
2967 
2968 #ifdef CONFIG_KASAN
___cache_free(struct kmem_cache * cache,void * x,unsigned long addr)2969 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr)
2970 {
2971 	do_slab_free(cache, virt_to_head_page(x), x, NULL, 1, addr);
2972 }
2973 #endif
2974 
kmem_cache_free(struct kmem_cache * s,void * x)2975 void kmem_cache_free(struct kmem_cache *s, void *x)
2976 {
2977 	s = cache_from_obj(s, x);
2978 	if (!s)
2979 		return;
2980 	slab_free(s, virt_to_head_page(x), x, NULL, 1, _RET_IP_);
2981 	trace_kmem_cache_free(_RET_IP_, x);
2982 }
2983 EXPORT_SYMBOL(kmem_cache_free);
2984 
2985 struct detached_freelist {
2986 	struct page *page;
2987 	void *tail;
2988 	void *freelist;
2989 	int cnt;
2990 	struct kmem_cache *s;
2991 };
2992 
2993 /*
2994  * This function progressively scans the array with free objects (with
2995  * a limited look ahead) and extract objects belonging to the same
2996  * page.  It builds a detached freelist directly within the given
2997  * page/objects.  This can happen without any need for
2998  * synchronization, because the objects are owned by running process.
2999  * The freelist is build up as a single linked list in the objects.
3000  * The idea is, that this detached freelist can then be bulk
3001  * transferred to the real freelist(s), but only requiring a single
3002  * synchronization primitive.  Look ahead in the array is limited due
3003  * to performance reasons.
3004  */
3005 static inline
build_detached_freelist(struct kmem_cache * s,size_t size,void ** p,struct detached_freelist * df)3006 int build_detached_freelist(struct kmem_cache *s, size_t size,
3007 			    void **p, struct detached_freelist *df)
3008 {
3009 	size_t first_skipped_index = 0;
3010 	int lookahead = 3;
3011 	void *object;
3012 	struct page *page;
3013 
3014 	/* Always re-init detached_freelist */
3015 	df->page = NULL;
3016 
3017 	do {
3018 		object = p[--size];
3019 		/* Do we need !ZERO_OR_NULL_PTR(object) here? (for kfree) */
3020 	} while (!object && size);
3021 
3022 	if (!object)
3023 		return 0;
3024 
3025 	page = virt_to_head_page(object);
3026 	if (!s) {
3027 		/* Handle kalloc'ed objects */
3028 		if (unlikely(!PageSlab(page))) {
3029 			BUG_ON(!PageCompound(page));
3030 			kfree_hook(object);
3031 			__free_pages(page, compound_order(page));
3032 			p[size] = NULL; /* mark object processed */
3033 			return size;
3034 		}
3035 		/* Derive kmem_cache from object */
3036 		df->s = page->slab_cache;
3037 	} else {
3038 		df->s = cache_from_obj(s, object); /* Support for memcg */
3039 	}
3040 
3041 	/* Start new detached freelist */
3042 	df->page = page;
3043 	set_freepointer(df->s, object, NULL);
3044 	df->tail = object;
3045 	df->freelist = object;
3046 	p[size] = NULL; /* mark object processed */
3047 	df->cnt = 1;
3048 
3049 	while (size) {
3050 		object = p[--size];
3051 		if (!object)
3052 			continue; /* Skip processed objects */
3053 
3054 		/* df->page is always set at this point */
3055 		if (df->page == virt_to_head_page(object)) {
3056 			/* Opportunity build freelist */
3057 			set_freepointer(df->s, object, df->freelist);
3058 			df->freelist = object;
3059 			df->cnt++;
3060 			p[size] = NULL; /* mark object processed */
3061 
3062 			continue;
3063 		}
3064 
3065 		/* Limit look ahead search */
3066 		if (!--lookahead)
3067 			break;
3068 
3069 		if (!first_skipped_index)
3070 			first_skipped_index = size + 1;
3071 	}
3072 
3073 	return first_skipped_index;
3074 }
3075 
3076 /* Note that interrupts must be enabled when calling this function. */
kmem_cache_free_bulk(struct kmem_cache * s,size_t size,void ** p)3077 void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p)
3078 {
3079 	if (WARN_ON(!size))
3080 		return;
3081 
3082 	do {
3083 		struct detached_freelist df;
3084 
3085 		size = build_detached_freelist(s, size, p, &df);
3086 		if (unlikely(!df.page))
3087 			continue;
3088 
3089 		slab_free(df.s, df.page, df.freelist, df.tail, df.cnt,_RET_IP_);
3090 	} while (likely(size));
3091 }
3092 EXPORT_SYMBOL(kmem_cache_free_bulk);
3093 
3094 /* Note that interrupts must be enabled when calling this function. */
kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t size,void ** p)3095 int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size,
3096 			  void **p)
3097 {
3098 	struct kmem_cache_cpu *c;
3099 	int i;
3100 
3101 	/* memcg and kmem_cache debug support */
3102 	s = slab_pre_alloc_hook(s, flags);
3103 	if (unlikely(!s))
3104 		return false;
3105 	/*
3106 	 * Drain objects in the per cpu slab, while disabling local
3107 	 * IRQs, which protects against PREEMPT and interrupts
3108 	 * handlers invoking normal fastpath.
3109 	 */
3110 	local_irq_disable();
3111 	c = this_cpu_ptr(s->cpu_slab);
3112 
3113 	for (i = 0; i < size; i++) {
3114 		void *object = c->freelist;
3115 
3116 		if (unlikely(!object)) {
3117 			/*
3118 			 * Invoking slow path likely have side-effect
3119 			 * of re-populating per CPU c->freelist
3120 			 */
3121 			p[i] = ___slab_alloc(s, flags, NUMA_NO_NODE,
3122 					    _RET_IP_, c);
3123 			if (unlikely(!p[i]))
3124 				goto error;
3125 
3126 			c = this_cpu_ptr(s->cpu_slab);
3127 			continue; /* goto for-loop */
3128 		}
3129 		c->freelist = get_freepointer(s, object);
3130 		p[i] = object;
3131 	}
3132 	c->tid = next_tid(c->tid);
3133 	local_irq_enable();
3134 
3135 	/* Clear memory outside IRQ disabled fastpath loop */
3136 	if (unlikely(flags & __GFP_ZERO)) {
3137 		int j;
3138 
3139 		for (j = 0; j < i; j++)
3140 			memset(p[j], 0, s->object_size);
3141 	}
3142 
3143 	/* memcg and kmem_cache debug support */
3144 	slab_post_alloc_hook(s, flags, size, p);
3145 	return i;
3146 error:
3147 	local_irq_enable();
3148 	slab_post_alloc_hook(s, flags, i, p);
3149 	__kmem_cache_free_bulk(s, i, p);
3150 	return 0;
3151 }
3152 EXPORT_SYMBOL(kmem_cache_alloc_bulk);
3153 
3154 
3155 /*
3156  * Object placement in a slab is made very easy because we always start at
3157  * offset 0. If we tune the size of the object to the alignment then we can
3158  * get the required alignment by putting one properly sized object after
3159  * another.
3160  *
3161  * Notice that the allocation order determines the sizes of the per cpu
3162  * caches. Each processor has always one slab available for allocations.
3163  * Increasing the allocation order reduces the number of times that slabs
3164  * must be moved on and off the partial lists and is therefore a factor in
3165  * locking overhead.
3166  */
3167 
3168 /*
3169  * Mininum / Maximum order of slab pages. This influences locking overhead
3170  * and slab fragmentation. A higher order reduces the number of partial slabs
3171  * and increases the number of allocations possible without having to
3172  * take the list_lock.
3173  */
3174 static int slub_min_order;
3175 static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
3176 static int slub_min_objects;
3177 
3178 /*
3179  * Calculate the order of allocation given an slab object size.
3180  *
3181  * The order of allocation has significant impact on performance and other
3182  * system components. Generally order 0 allocations should be preferred since
3183  * order 0 does not cause fragmentation in the page allocator. Larger objects
3184  * be problematic to put into order 0 slabs because there may be too much
3185  * unused space left. We go to a higher order if more than 1/16th of the slab
3186  * would be wasted.
3187  *
3188  * In order to reach satisfactory performance we must ensure that a minimum
3189  * number of objects is in one slab. Otherwise we may generate too much
3190  * activity on the partial lists which requires taking the list_lock. This is
3191  * less a concern for large slabs though which are rarely used.
3192  *
3193  * slub_max_order specifies the order where we begin to stop considering the
3194  * number of objects in a slab as critical. If we reach slub_max_order then
3195  * we try to keep the page order as low as possible. So we accept more waste
3196  * of space in favor of a small page order.
3197  *
3198  * Higher order allocations also allow the placement of more objects in a
3199  * slab and thereby reduce object handling overhead. If the user has
3200  * requested a higher mininum order then we start with that one instead of
3201  * the smallest order which will fit the object.
3202  */
slab_order(int size,int min_objects,int max_order,int fract_leftover,int reserved)3203 static inline int slab_order(int size, int min_objects,
3204 				int max_order, int fract_leftover, int reserved)
3205 {
3206 	int order;
3207 	int rem;
3208 	int min_order = slub_min_order;
3209 
3210 	if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
3211 		return get_order(size * MAX_OBJS_PER_PAGE) - 1;
3212 
3213 	for (order = max(min_order, get_order(min_objects * size + reserved));
3214 			order <= max_order; order++) {
3215 
3216 		unsigned long slab_size = PAGE_SIZE << order;
3217 
3218 		rem = (slab_size - reserved) % size;
3219 
3220 		if (rem <= slab_size / fract_leftover)
3221 			break;
3222 	}
3223 
3224 	return order;
3225 }
3226 
calculate_order(int size,int reserved)3227 static inline int calculate_order(int size, int reserved)
3228 {
3229 	int order;
3230 	int min_objects;
3231 	int fraction;
3232 	int max_objects;
3233 
3234 	/*
3235 	 * Attempt to find best configuration for a slab. This
3236 	 * works by first attempting to generate a layout with
3237 	 * the best configuration and backing off gradually.
3238 	 *
3239 	 * First we increase the acceptable waste in a slab. Then
3240 	 * we reduce the minimum objects required in a slab.
3241 	 */
3242 	min_objects = slub_min_objects;
3243 	if (!min_objects)
3244 		min_objects = 4 * (fls(nr_cpu_ids) + 1);
3245 	max_objects = order_objects(slub_max_order, size, reserved);
3246 	min_objects = min(min_objects, max_objects);
3247 
3248 	while (min_objects > 1) {
3249 		fraction = 16;
3250 		while (fraction >= 4) {
3251 			order = slab_order(size, min_objects,
3252 					slub_max_order, fraction, reserved);
3253 			if (order <= slub_max_order)
3254 				return order;
3255 			fraction /= 2;
3256 		}
3257 		min_objects--;
3258 	}
3259 
3260 	/*
3261 	 * We were unable to place multiple objects in a slab. Now
3262 	 * lets see if we can place a single object there.
3263 	 */
3264 	order = slab_order(size, 1, slub_max_order, 1, reserved);
3265 	if (order <= slub_max_order)
3266 		return order;
3267 
3268 	/*
3269 	 * Doh this slab cannot be placed using slub_max_order.
3270 	 */
3271 	order = slab_order(size, 1, MAX_ORDER, 1, reserved);
3272 	if (order < MAX_ORDER)
3273 		return order;
3274 	return -ENOSYS;
3275 }
3276 
3277 static void
init_kmem_cache_node(struct kmem_cache_node * n)3278 init_kmem_cache_node(struct kmem_cache_node *n)
3279 {
3280 	n->nr_partial = 0;
3281 	spin_lock_init(&n->list_lock);
3282 	INIT_LIST_HEAD(&n->partial);
3283 #ifdef CONFIG_SLUB_DEBUG
3284 	atomic_long_set(&n->nr_slabs, 0);
3285 	atomic_long_set(&n->total_objects, 0);
3286 	INIT_LIST_HEAD(&n->full);
3287 #endif
3288 }
3289 
alloc_kmem_cache_cpus(struct kmem_cache * s)3290 static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
3291 {
3292 	BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
3293 			KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
3294 
3295 	/*
3296 	 * Must align to double word boundary for the double cmpxchg
3297 	 * instructions to work; see __pcpu_double_call_return_bool().
3298 	 */
3299 	s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
3300 				     2 * sizeof(void *));
3301 
3302 	if (!s->cpu_slab)
3303 		return 0;
3304 
3305 	init_kmem_cache_cpus(s);
3306 
3307 	return 1;
3308 }
3309 
3310 static struct kmem_cache *kmem_cache_node;
3311 
3312 /*
3313  * No kmalloc_node yet so do it by hand. We know that this is the first
3314  * slab on the node for this slabcache. There are no concurrent accesses
3315  * possible.
3316  *
3317  * Note that this function only works on the kmem_cache_node
3318  * when allocating for the kmem_cache_node. This is used for bootstrapping
3319  * memory on a fresh node that has no slab structures yet.
3320  */
early_kmem_cache_node_alloc(int node)3321 static void early_kmem_cache_node_alloc(int node)
3322 {
3323 	struct page *page;
3324 	struct kmem_cache_node *n;
3325 
3326 	BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
3327 
3328 	page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
3329 
3330 	BUG_ON(!page);
3331 	if (page_to_nid(page) != node) {
3332 		pr_err("SLUB: Unable to allocate memory from node %d\n", node);
3333 		pr_err("SLUB: Allocating a useless per node structure in order to be able to continue\n");
3334 	}
3335 
3336 	n = page->freelist;
3337 	BUG_ON(!n);
3338 	page->freelist = get_freepointer(kmem_cache_node, n);
3339 	page->inuse = 1;
3340 	page->frozen = 0;
3341 	kmem_cache_node->node[node] = n;
3342 #ifdef CONFIG_SLUB_DEBUG
3343 	init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
3344 	init_tracking(kmem_cache_node, n);
3345 #endif
3346 	kasan_kmalloc(kmem_cache_node, n, sizeof(struct kmem_cache_node),
3347 		      GFP_KERNEL);
3348 	init_kmem_cache_node(n);
3349 	inc_slabs_node(kmem_cache_node, node, page->objects);
3350 
3351 	/*
3352 	 * No locks need to be taken here as it has just been
3353 	 * initialized and there is no concurrent access.
3354 	 */
3355 	__add_partial(n, page, DEACTIVATE_TO_HEAD);
3356 }
3357 
free_kmem_cache_nodes(struct kmem_cache * s)3358 static void free_kmem_cache_nodes(struct kmem_cache *s)
3359 {
3360 	int node;
3361 	struct kmem_cache_node *n;
3362 
3363 	for_each_kmem_cache_node(s, node, n) {
3364 		kmem_cache_free(kmem_cache_node, n);
3365 		s->node[node] = NULL;
3366 	}
3367 }
3368 
__kmem_cache_release(struct kmem_cache * s)3369 void __kmem_cache_release(struct kmem_cache *s)
3370 {
3371 	cache_random_seq_destroy(s);
3372 	free_percpu(s->cpu_slab);
3373 	free_kmem_cache_nodes(s);
3374 }
3375 
init_kmem_cache_nodes(struct kmem_cache * s)3376 static int init_kmem_cache_nodes(struct kmem_cache *s)
3377 {
3378 	int node;
3379 
3380 	for_each_node_state(node, N_NORMAL_MEMORY) {
3381 		struct kmem_cache_node *n;
3382 
3383 		if (slab_state == DOWN) {
3384 			early_kmem_cache_node_alloc(node);
3385 			continue;
3386 		}
3387 		n = kmem_cache_alloc_node(kmem_cache_node,
3388 						GFP_KERNEL, node);
3389 
3390 		if (!n) {
3391 			free_kmem_cache_nodes(s);
3392 			return 0;
3393 		}
3394 
3395 		s->node[node] = n;
3396 		init_kmem_cache_node(n);
3397 	}
3398 	return 1;
3399 }
3400 
set_min_partial(struct kmem_cache * s,unsigned long min)3401 static void set_min_partial(struct kmem_cache *s, unsigned long min)
3402 {
3403 	if (min < MIN_PARTIAL)
3404 		min = MIN_PARTIAL;
3405 	else if (min > MAX_PARTIAL)
3406 		min = MAX_PARTIAL;
3407 	s->min_partial = min;
3408 }
3409 
3410 /*
3411  * calculate_sizes() determines the order and the distribution of data within
3412  * a slab object.
3413  */
calculate_sizes(struct kmem_cache * s,int forced_order)3414 static int calculate_sizes(struct kmem_cache *s, int forced_order)
3415 {
3416 	unsigned long flags = s->flags;
3417 	size_t size = s->object_size;
3418 	int order;
3419 
3420 	/*
3421 	 * Round up object size to the next word boundary. We can only
3422 	 * place the free pointer at word boundaries and this determines
3423 	 * the possible location of the free pointer.
3424 	 */
3425 	size = ALIGN(size, sizeof(void *));
3426 
3427 #ifdef CONFIG_SLUB_DEBUG
3428 	/*
3429 	 * Determine if we can poison the object itself. If the user of
3430 	 * the slab may touch the object after free or before allocation
3431 	 * then we should never poison the object itself.
3432 	 */
3433 	if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
3434 			!s->ctor)
3435 		s->flags |= __OBJECT_POISON;
3436 	else
3437 		s->flags &= ~__OBJECT_POISON;
3438 
3439 
3440 	/*
3441 	 * If we are Redzoning then check if there is some space between the
3442 	 * end of the object and the free pointer. If not then add an
3443 	 * additional word to have some bytes to store Redzone information.
3444 	 */
3445 	if ((flags & SLAB_RED_ZONE) && size == s->object_size)
3446 		size += sizeof(void *);
3447 #endif
3448 
3449 	/*
3450 	 * With that we have determined the number of bytes in actual use
3451 	 * by the object. This is the potential offset to the free pointer.
3452 	 */
3453 	s->inuse = size;
3454 
3455 	if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
3456 		s->ctor)) {
3457 		/*
3458 		 * Relocate free pointer after the object if it is not
3459 		 * permitted to overwrite the first word of the object on
3460 		 * kmem_cache_free.
3461 		 *
3462 		 * This is the case if we do RCU, have a constructor or
3463 		 * destructor or are poisoning the objects.
3464 		 */
3465 		s->offset = size;
3466 		size += sizeof(void *);
3467 	}
3468 
3469 #ifdef CONFIG_SLUB_DEBUG
3470 	if (flags & SLAB_STORE_USER)
3471 		/*
3472 		 * Need to store information about allocs and frees after
3473 		 * the object.
3474 		 */
3475 		size += 2 * sizeof(struct track);
3476 #endif
3477 
3478 	kasan_cache_create(s, &size, &s->flags);
3479 #ifdef CONFIG_SLUB_DEBUG
3480 	if (flags & SLAB_RED_ZONE) {
3481 		/*
3482 		 * Add some empty padding so that we can catch
3483 		 * overwrites from earlier objects rather than let
3484 		 * tracking information or the free pointer be
3485 		 * corrupted if a user writes before the start
3486 		 * of the object.
3487 		 */
3488 		size += sizeof(void *);
3489 
3490 		s->red_left_pad = sizeof(void *);
3491 		s->red_left_pad = ALIGN(s->red_left_pad, s->align);
3492 		size += s->red_left_pad;
3493 	}
3494 #endif
3495 
3496 	/*
3497 	 * SLUB stores one object immediately after another beginning from
3498 	 * offset 0. In order to align the objects we have to simply size
3499 	 * each object to conform to the alignment.
3500 	 */
3501 	size = ALIGN(size, s->align);
3502 	s->size = size;
3503 	if (forced_order >= 0)
3504 		order = forced_order;
3505 	else
3506 		order = calculate_order(size, s->reserved);
3507 
3508 	if (order < 0)
3509 		return 0;
3510 
3511 	s->allocflags = 0;
3512 	if (order)
3513 		s->allocflags |= __GFP_COMP;
3514 
3515 	if (s->flags & SLAB_CACHE_DMA)
3516 		s->allocflags |= GFP_DMA;
3517 
3518 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
3519 		s->allocflags |= __GFP_RECLAIMABLE;
3520 
3521 	/*
3522 	 * Determine the number of objects per slab
3523 	 */
3524 	s->oo = oo_make(order, size, s->reserved);
3525 	s->min = oo_make(get_order(size), size, s->reserved);
3526 	if (oo_objects(s->oo) > oo_objects(s->max))
3527 		s->max = s->oo;
3528 
3529 	return !!oo_objects(s->oo);
3530 }
3531 
kmem_cache_open(struct kmem_cache * s,unsigned long flags)3532 static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
3533 {
3534 	s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
3535 	s->reserved = 0;
3536 
3537 	if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3538 		s->reserved = sizeof(struct rcu_head);
3539 
3540 	if (!calculate_sizes(s, -1))
3541 		goto error;
3542 	if (disable_higher_order_debug) {
3543 		/*
3544 		 * Disable debugging flags that store metadata if the min slab
3545 		 * order increased.
3546 		 */
3547 		if (get_order(s->size) > get_order(s->object_size)) {
3548 			s->flags &= ~DEBUG_METADATA_FLAGS;
3549 			s->offset = 0;
3550 			if (!calculate_sizes(s, -1))
3551 				goto error;
3552 		}
3553 	}
3554 
3555 #if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3556     defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
3557 	if (system_has_cmpxchg_double() && (s->flags & SLAB_NO_CMPXCHG) == 0)
3558 		/* Enable fast mode */
3559 		s->flags |= __CMPXCHG_DOUBLE;
3560 #endif
3561 
3562 	/*
3563 	 * The larger the object size is, the more pages we want on the partial
3564 	 * list to avoid pounding the page allocator excessively.
3565 	 */
3566 	set_min_partial(s, ilog2(s->size) / 2);
3567 
3568 	/*
3569 	 * cpu_partial determined the maximum number of objects kept in the
3570 	 * per cpu partial lists of a processor.
3571 	 *
3572 	 * Per cpu partial lists mainly contain slabs that just have one
3573 	 * object freed. If they are used for allocation then they can be
3574 	 * filled up again with minimal effort. The slab will never hit the
3575 	 * per node partial lists and therefore no locking will be required.
3576 	 *
3577 	 * This setting also determines
3578 	 *
3579 	 * A) The number of objects from per cpu partial slabs dumped to the
3580 	 *    per node list when we reach the limit.
3581 	 * B) The number of objects in cpu partial slabs to extract from the
3582 	 *    per node list when we run out of per cpu objects. We only fetch
3583 	 *    50% to keep some capacity around for frees.
3584 	 */
3585 	if (!kmem_cache_has_cpu_partial(s))
3586 		s->cpu_partial = 0;
3587 	else if (s->size >= PAGE_SIZE)
3588 		s->cpu_partial = 2;
3589 	else if (s->size >= 1024)
3590 		s->cpu_partial = 6;
3591 	else if (s->size >= 256)
3592 		s->cpu_partial = 13;
3593 	else
3594 		s->cpu_partial = 30;
3595 
3596 #ifdef CONFIG_NUMA
3597 	s->remote_node_defrag_ratio = 1000;
3598 #endif
3599 
3600 	/* Initialize the pre-computed randomized freelist if slab is up */
3601 	if (slab_state >= UP) {
3602 		if (init_cache_random_seq(s))
3603 			goto error;
3604 	}
3605 
3606 	if (!init_kmem_cache_nodes(s))
3607 		goto error;
3608 
3609 	if (alloc_kmem_cache_cpus(s))
3610 		return 0;
3611 
3612 	free_kmem_cache_nodes(s);
3613 error:
3614 	if (flags & SLAB_PANIC)
3615 		panic("Cannot create slab %s size=%lu realsize=%u order=%u offset=%u flags=%lx\n",
3616 		      s->name, (unsigned long)s->size, s->size,
3617 		      oo_order(s->oo), s->offset, flags);
3618 	return -EINVAL;
3619 }
3620 
list_slab_objects(struct kmem_cache * s,struct page * page,const char * text)3621 static void list_slab_objects(struct kmem_cache *s, struct page *page,
3622 							const char *text)
3623 {
3624 #ifdef CONFIG_SLUB_DEBUG
3625 	void *addr = page_address(page);
3626 	void *p;
3627 	unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3628 				     sizeof(long), GFP_ATOMIC);
3629 	if (!map)
3630 		return;
3631 	slab_err(s, page, text, s->name);
3632 	slab_lock(page);
3633 
3634 	get_map(s, page, map);
3635 	for_each_object(p, s, addr, page->objects) {
3636 
3637 		if (!test_bit(slab_index(p, s, addr), map)) {
3638 			pr_err("INFO: Object 0x%p @offset=%tu\n", p, p - addr);
3639 			print_tracking(s, p);
3640 		}
3641 	}
3642 	slab_unlock(page);
3643 	kfree(map);
3644 #endif
3645 }
3646 
3647 /*
3648  * Attempt to free all partial slabs on a node.
3649  * This is called from __kmem_cache_shutdown(). We must take list_lock
3650  * because sysfs file might still access partial list after the shutdowning.
3651  */
free_partial(struct kmem_cache * s,struct kmem_cache_node * n)3652 static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
3653 {
3654 	LIST_HEAD(discard);
3655 	struct page *page, *h;
3656 
3657 	BUG_ON(irqs_disabled());
3658 	spin_lock_irq(&n->list_lock);
3659 	list_for_each_entry_safe(page, h, &n->partial, lru) {
3660 		if (!page->inuse) {
3661 			remove_partial(n, page);
3662 			list_add(&page->lru, &discard);
3663 		} else {
3664 			list_slab_objects(s, page,
3665 			"Objects remaining in %s on __kmem_cache_shutdown()");
3666 		}
3667 	}
3668 	spin_unlock_irq(&n->list_lock);
3669 
3670 	list_for_each_entry_safe(page, h, &discard, lru)
3671 		discard_slab(s, page);
3672 }
3673 
3674 /*
3675  * Release all resources used by a slab cache.
3676  */
__kmem_cache_shutdown(struct kmem_cache * s)3677 int __kmem_cache_shutdown(struct kmem_cache *s)
3678 {
3679 	int node;
3680 	struct kmem_cache_node *n;
3681 
3682 	flush_all(s);
3683 	/* Attempt to free all objects */
3684 	for_each_kmem_cache_node(s, node, n) {
3685 		free_partial(s, n);
3686 		if (n->nr_partial || slabs_node(s, node))
3687 			return 1;
3688 	}
3689 	return 0;
3690 }
3691 
3692 /********************************************************************
3693  *		Kmalloc subsystem
3694  *******************************************************************/
3695 
setup_slub_min_order(char * str)3696 static int __init setup_slub_min_order(char *str)
3697 {
3698 	get_option(&str, &slub_min_order);
3699 
3700 	return 1;
3701 }
3702 
3703 __setup("slub_min_order=", setup_slub_min_order);
3704 
setup_slub_max_order(char * str)3705 static int __init setup_slub_max_order(char *str)
3706 {
3707 	get_option(&str, &slub_max_order);
3708 	slub_max_order = min(slub_max_order, MAX_ORDER - 1);
3709 
3710 	return 1;
3711 }
3712 
3713 __setup("slub_max_order=", setup_slub_max_order);
3714 
setup_slub_min_objects(char * str)3715 static int __init setup_slub_min_objects(char *str)
3716 {
3717 	get_option(&str, &slub_min_objects);
3718 
3719 	return 1;
3720 }
3721 
3722 __setup("slub_min_objects=", setup_slub_min_objects);
3723 
__kmalloc(size_t size,gfp_t flags)3724 void *__kmalloc(size_t size, gfp_t flags)
3725 {
3726 	struct kmem_cache *s;
3727 	void *ret;
3728 
3729 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
3730 		return kmalloc_large(size, flags);
3731 
3732 	s = kmalloc_slab(size, flags);
3733 
3734 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3735 		return s;
3736 
3737 	ret = slab_alloc(s, flags, _RET_IP_);
3738 
3739 	trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
3740 
3741 	kasan_kmalloc(s, ret, size, flags);
3742 
3743 	return ret;
3744 }
3745 EXPORT_SYMBOL(__kmalloc);
3746 
3747 #ifdef CONFIG_NUMA
kmalloc_large_node(size_t size,gfp_t flags,int node)3748 static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3749 {
3750 	struct page *page;
3751 	void *ptr = NULL;
3752 
3753 	flags |= __GFP_COMP | __GFP_NOTRACK;
3754 	page = alloc_pages_node(node, flags, get_order(size));
3755 	if (page)
3756 		ptr = page_address(page);
3757 
3758 	kmalloc_large_node_hook(ptr, size, flags);
3759 	return ptr;
3760 }
3761 
__kmalloc_node(size_t size,gfp_t flags,int node)3762 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3763 {
3764 	struct kmem_cache *s;
3765 	void *ret;
3766 
3767 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
3768 		ret = kmalloc_large_node(size, flags, node);
3769 
3770 		trace_kmalloc_node(_RET_IP_, ret,
3771 				   size, PAGE_SIZE << get_order(size),
3772 				   flags, node);
3773 
3774 		return ret;
3775 	}
3776 
3777 	s = kmalloc_slab(size, flags);
3778 
3779 	if (unlikely(ZERO_OR_NULL_PTR(s)))
3780 		return s;
3781 
3782 	ret = slab_alloc_node(s, flags, node, _RET_IP_);
3783 
3784 	trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
3785 
3786 	kasan_kmalloc(s, ret, size, flags);
3787 
3788 	return ret;
3789 }
3790 EXPORT_SYMBOL(__kmalloc_node);
3791 #endif
3792 
3793 #ifdef CONFIG_HARDENED_USERCOPY
3794 /*
3795  * Rejects objects that are incorrectly sized.
3796  *
3797  * Returns NULL if check passes, otherwise const char * to name of cache
3798  * to indicate an error.
3799  */
__check_heap_object(const void * ptr,unsigned long n,struct page * page)3800 const char *__check_heap_object(const void *ptr, unsigned long n,
3801 				struct page *page)
3802 {
3803 	struct kmem_cache *s;
3804 	unsigned long offset;
3805 	size_t object_size;
3806 
3807 	/* Find object and usable object size. */
3808 	s = page->slab_cache;
3809 	object_size = slab_ksize(s);
3810 
3811 	/* Reject impossible pointers. */
3812 	if (ptr < page_address(page))
3813 		return s->name;
3814 
3815 	/* Find offset within object. */
3816 	offset = (ptr - page_address(page)) % s->size;
3817 
3818 	/* Adjust for redzone and reject if within the redzone. */
3819 	if (kmem_cache_debug(s) && s->flags & SLAB_RED_ZONE) {
3820 		if (offset < s->red_left_pad)
3821 			return s->name;
3822 		offset -= s->red_left_pad;
3823 	}
3824 
3825 	/* Allow address range falling entirely within object size. */
3826 	if (offset <= object_size && n <= object_size - offset)
3827 		return NULL;
3828 
3829 	return s->name;
3830 }
3831 #endif /* CONFIG_HARDENED_USERCOPY */
3832 
__ksize(const void * object)3833 static size_t __ksize(const void *object)
3834 {
3835 	struct page *page;
3836 
3837 	if (unlikely(object == ZERO_SIZE_PTR))
3838 		return 0;
3839 
3840 	page = virt_to_head_page(object);
3841 
3842 	if (unlikely(!PageSlab(page))) {
3843 		WARN_ON(!PageCompound(page));
3844 		return PAGE_SIZE << compound_order(page);
3845 	}
3846 
3847 	return slab_ksize(page->slab_cache);
3848 }
3849 
ksize(const void * object)3850 size_t ksize(const void *object)
3851 {
3852 	size_t size = __ksize(object);
3853 	/* We assume that ksize callers could use whole allocated area,
3854 	 * so we need to unpoison this area.
3855 	 */
3856 	kasan_unpoison_shadow(object, size);
3857 	return size;
3858 }
3859 EXPORT_SYMBOL(ksize);
3860 
kfree(const void * x)3861 void kfree(const void *x)
3862 {
3863 	struct page *page;
3864 	void *object = (void *)x;
3865 
3866 	trace_kfree(_RET_IP_, x);
3867 
3868 	if (unlikely(ZERO_OR_NULL_PTR(x)))
3869 		return;
3870 
3871 	page = virt_to_head_page(x);
3872 	if (unlikely(!PageSlab(page))) {
3873 		BUG_ON(!PageCompound(page));
3874 		kfree_hook(x);
3875 		__free_pages(page, compound_order(page));
3876 		return;
3877 	}
3878 	slab_free(page->slab_cache, page, object, NULL, 1, _RET_IP_);
3879 }
3880 EXPORT_SYMBOL(kfree);
3881 
3882 #define SHRINK_PROMOTE_MAX 32
3883 
3884 /*
3885  * kmem_cache_shrink discards empty slabs and promotes the slabs filled
3886  * up most to the head of the partial lists. New allocations will then
3887  * fill those up and thus they can be removed from the partial lists.
3888  *
3889  * The slabs with the least items are placed last. This results in them
3890  * being allocated from last increasing the chance that the last objects
3891  * are freed in them.
3892  */
__kmem_cache_shrink(struct kmem_cache * s)3893 int __kmem_cache_shrink(struct kmem_cache *s)
3894 {
3895 	int node;
3896 	int i;
3897 	struct kmem_cache_node *n;
3898 	struct page *page;
3899 	struct page *t;
3900 	struct list_head discard;
3901 	struct list_head promote[SHRINK_PROMOTE_MAX];
3902 	unsigned long flags;
3903 	int ret = 0;
3904 
3905 	flush_all(s);
3906 	for_each_kmem_cache_node(s, node, n) {
3907 		INIT_LIST_HEAD(&discard);
3908 		for (i = 0; i < SHRINK_PROMOTE_MAX; i++)
3909 			INIT_LIST_HEAD(promote + i);
3910 
3911 		spin_lock_irqsave(&n->list_lock, flags);
3912 
3913 		/*
3914 		 * Build lists of slabs to discard or promote.
3915 		 *
3916 		 * Note that concurrent frees may occur while we hold the
3917 		 * list_lock. page->inuse here is the upper limit.
3918 		 */
3919 		list_for_each_entry_safe(page, t, &n->partial, lru) {
3920 			int free = page->objects - page->inuse;
3921 
3922 			/* Do not reread page->inuse */
3923 			barrier();
3924 
3925 			/* We do not keep full slabs on the list */
3926 			BUG_ON(free <= 0);
3927 
3928 			if (free == page->objects) {
3929 				list_move(&page->lru, &discard);
3930 				n->nr_partial--;
3931 			} else if (free <= SHRINK_PROMOTE_MAX)
3932 				list_move(&page->lru, promote + free - 1);
3933 		}
3934 
3935 		/*
3936 		 * Promote the slabs filled up most to the head of the
3937 		 * partial list.
3938 		 */
3939 		for (i = SHRINK_PROMOTE_MAX - 1; i >= 0; i--)
3940 			list_splice(promote + i, &n->partial);
3941 
3942 		spin_unlock_irqrestore(&n->list_lock, flags);
3943 
3944 		/* Release empty slabs */
3945 		list_for_each_entry_safe(page, t, &discard, lru)
3946 			discard_slab(s, page);
3947 
3948 		if (slabs_node(s, node))
3949 			ret = 1;
3950 	}
3951 
3952 	return ret;
3953 }
3954 
slab_mem_going_offline_callback(void * arg)3955 static int slab_mem_going_offline_callback(void *arg)
3956 {
3957 	struct kmem_cache *s;
3958 
3959 	mutex_lock(&slab_mutex);
3960 	list_for_each_entry(s, &slab_caches, list)
3961 		__kmem_cache_shrink(s);
3962 	mutex_unlock(&slab_mutex);
3963 
3964 	return 0;
3965 }
3966 
slab_mem_offline_callback(void * arg)3967 static void slab_mem_offline_callback(void *arg)
3968 {
3969 	struct kmem_cache_node *n;
3970 	struct kmem_cache *s;
3971 	struct memory_notify *marg = arg;
3972 	int offline_node;
3973 
3974 	offline_node = marg->status_change_nid_normal;
3975 
3976 	/*
3977 	 * If the node still has available memory. we need kmem_cache_node
3978 	 * for it yet.
3979 	 */
3980 	if (offline_node < 0)
3981 		return;
3982 
3983 	mutex_lock(&slab_mutex);
3984 	list_for_each_entry(s, &slab_caches, list) {
3985 		n = get_node(s, offline_node);
3986 		if (n) {
3987 			/*
3988 			 * if n->nr_slabs > 0, slabs still exist on the node
3989 			 * that is going down. We were unable to free them,
3990 			 * and offline_pages() function shouldn't call this
3991 			 * callback. So, we must fail.
3992 			 */
3993 			BUG_ON(slabs_node(s, offline_node));
3994 
3995 			s->node[offline_node] = NULL;
3996 			kmem_cache_free(kmem_cache_node, n);
3997 		}
3998 	}
3999 	mutex_unlock(&slab_mutex);
4000 }
4001 
slab_mem_going_online_callback(void * arg)4002 static int slab_mem_going_online_callback(void *arg)
4003 {
4004 	struct kmem_cache_node *n;
4005 	struct kmem_cache *s;
4006 	struct memory_notify *marg = arg;
4007 	int nid = marg->status_change_nid_normal;
4008 	int ret = 0;
4009 
4010 	/*
4011 	 * If the node's memory is already available, then kmem_cache_node is
4012 	 * already created. Nothing to do.
4013 	 */
4014 	if (nid < 0)
4015 		return 0;
4016 
4017 	/*
4018 	 * We are bringing a node online. No memory is available yet. We must
4019 	 * allocate a kmem_cache_node structure in order to bring the node
4020 	 * online.
4021 	 */
4022 	mutex_lock(&slab_mutex);
4023 	list_for_each_entry(s, &slab_caches, list) {
4024 		/*
4025 		 * XXX: kmem_cache_alloc_node will fallback to other nodes
4026 		 *      since memory is not yet available from the node that
4027 		 *      is brought up.
4028 		 */
4029 		n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
4030 		if (!n) {
4031 			ret = -ENOMEM;
4032 			goto out;
4033 		}
4034 		init_kmem_cache_node(n);
4035 		s->node[nid] = n;
4036 	}
4037 out:
4038 	mutex_unlock(&slab_mutex);
4039 	return ret;
4040 }
4041 
slab_memory_callback(struct notifier_block * self,unsigned long action,void * arg)4042 static int slab_memory_callback(struct notifier_block *self,
4043 				unsigned long action, void *arg)
4044 {
4045 	int ret = 0;
4046 
4047 	switch (action) {
4048 	case MEM_GOING_ONLINE:
4049 		ret = slab_mem_going_online_callback(arg);
4050 		break;
4051 	case MEM_GOING_OFFLINE:
4052 		ret = slab_mem_going_offline_callback(arg);
4053 		break;
4054 	case MEM_OFFLINE:
4055 	case MEM_CANCEL_ONLINE:
4056 		slab_mem_offline_callback(arg);
4057 		break;
4058 	case MEM_ONLINE:
4059 	case MEM_CANCEL_OFFLINE:
4060 		break;
4061 	}
4062 	if (ret)
4063 		ret = notifier_from_errno(ret);
4064 	else
4065 		ret = NOTIFY_OK;
4066 	return ret;
4067 }
4068 
4069 static struct notifier_block slab_memory_callback_nb = {
4070 	.notifier_call = slab_memory_callback,
4071 	.priority = SLAB_CALLBACK_PRI,
4072 };
4073 
4074 /********************************************************************
4075  *			Basic setup of slabs
4076  *******************************************************************/
4077 
4078 /*
4079  * Used for early kmem_cache structures that were allocated using
4080  * the page allocator. Allocate them properly then fix up the pointers
4081  * that may be pointing to the wrong kmem_cache structure.
4082  */
4083 
bootstrap(struct kmem_cache * static_cache)4084 static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
4085 {
4086 	int node;
4087 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
4088 	struct kmem_cache_node *n;
4089 
4090 	memcpy(s, static_cache, kmem_cache->object_size);
4091 
4092 	/*
4093 	 * This runs very early, and only the boot processor is supposed to be
4094 	 * up.  Even if it weren't true, IRQs are not up so we couldn't fire
4095 	 * IPIs around.
4096 	 */
4097 	__flush_cpu_slab(s, smp_processor_id());
4098 	for_each_kmem_cache_node(s, node, n) {
4099 		struct page *p;
4100 
4101 		list_for_each_entry(p, &n->partial, lru)
4102 			p->slab_cache = s;
4103 
4104 #ifdef CONFIG_SLUB_DEBUG
4105 		list_for_each_entry(p, &n->full, lru)
4106 			p->slab_cache = s;
4107 #endif
4108 	}
4109 	slab_init_memcg_params(s);
4110 	list_add(&s->list, &slab_caches);
4111 	return s;
4112 }
4113 
kmem_cache_init(void)4114 void __init kmem_cache_init(void)
4115 {
4116 	static __initdata struct kmem_cache boot_kmem_cache,
4117 		boot_kmem_cache_node;
4118 
4119 	if (debug_guardpage_minorder())
4120 		slub_max_order = 0;
4121 
4122 	kmem_cache_node = &boot_kmem_cache_node;
4123 	kmem_cache = &boot_kmem_cache;
4124 
4125 	create_boot_cache(kmem_cache_node, "kmem_cache_node",
4126 		sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
4127 
4128 	register_hotmemory_notifier(&slab_memory_callback_nb);
4129 
4130 	/* Able to allocate the per node structures */
4131 	slab_state = PARTIAL;
4132 
4133 	create_boot_cache(kmem_cache, "kmem_cache",
4134 			offsetof(struct kmem_cache, node) +
4135 				nr_node_ids * sizeof(struct kmem_cache_node *),
4136 		       SLAB_HWCACHE_ALIGN);
4137 
4138 	kmem_cache = bootstrap(&boot_kmem_cache);
4139 
4140 	/*
4141 	 * Allocate kmem_cache_node properly from the kmem_cache slab.
4142 	 * kmem_cache_node is separately allocated so no need to
4143 	 * update any list pointers.
4144 	 */
4145 	kmem_cache_node = bootstrap(&boot_kmem_cache_node);
4146 
4147 	/* Now we can use the kmem_cache to allocate kmalloc slabs */
4148 	setup_kmalloc_cache_index_table();
4149 	create_kmalloc_caches(0);
4150 
4151 	/* Setup random freelists for each cache */
4152 	init_freelist_randomization();
4153 
4154 	cpuhp_setup_state_nocalls(CPUHP_SLUB_DEAD, "slub:dead", NULL,
4155 				  slub_cpu_dead);
4156 
4157 	pr_info("SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d, CPUs=%d, Nodes=%d\n",
4158 		cache_line_size(),
4159 		slub_min_order, slub_max_order, slub_min_objects,
4160 		nr_cpu_ids, nr_node_ids);
4161 }
4162 
kmem_cache_init_late(void)4163 void __init kmem_cache_init_late(void)
4164 {
4165 }
4166 
4167 struct kmem_cache *
__kmem_cache_alias(const char * name,size_t size,size_t align,unsigned long flags,void (* ctor)(void *))4168 __kmem_cache_alias(const char *name, size_t size, size_t align,
4169 		   unsigned long flags, void (*ctor)(void *))
4170 {
4171 	struct kmem_cache *s, *c;
4172 
4173 	s = find_mergeable(size, align, flags, name, ctor);
4174 	if (s) {
4175 		s->refcount++;
4176 
4177 		/*
4178 		 * Adjust the object sizes so that we clear
4179 		 * the complete object on kzalloc.
4180 		 */
4181 		s->object_size = max(s->object_size, (int)size);
4182 		s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
4183 
4184 		for_each_memcg_cache(c, s) {
4185 			c->object_size = s->object_size;
4186 			c->inuse = max_t(int, c->inuse,
4187 					 ALIGN(size, sizeof(void *)));
4188 		}
4189 
4190 		if (sysfs_slab_alias(s, name)) {
4191 			s->refcount--;
4192 			s = NULL;
4193 		}
4194 	}
4195 
4196 	return s;
4197 }
4198 
__kmem_cache_create(struct kmem_cache * s,unsigned long flags)4199 int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
4200 {
4201 	int err;
4202 
4203 	err = kmem_cache_open(s, flags);
4204 	if (err)
4205 		return err;
4206 
4207 	/* Mutex is not taken during early boot */
4208 	if (slab_state <= UP)
4209 		return 0;
4210 
4211 	memcg_propagate_slab_attrs(s);
4212 	err = sysfs_slab_add(s);
4213 	if (err)
4214 		__kmem_cache_release(s);
4215 
4216 	return err;
4217 }
4218 
__kmalloc_track_caller(size_t size,gfp_t gfpflags,unsigned long caller)4219 void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
4220 {
4221 	struct kmem_cache *s;
4222 	void *ret;
4223 
4224 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
4225 		return kmalloc_large(size, gfpflags);
4226 
4227 	s = kmalloc_slab(size, gfpflags);
4228 
4229 	if (unlikely(ZERO_OR_NULL_PTR(s)))
4230 		return s;
4231 
4232 	ret = slab_alloc(s, gfpflags, caller);
4233 
4234 	/* Honor the call site pointer we received. */
4235 	trace_kmalloc(caller, ret, size, s->size, gfpflags);
4236 
4237 	return ret;
4238 }
4239 
4240 #ifdef CONFIG_NUMA
__kmalloc_node_track_caller(size_t size,gfp_t gfpflags,int node,unsigned long caller)4241 void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
4242 					int node, unsigned long caller)
4243 {
4244 	struct kmem_cache *s;
4245 	void *ret;
4246 
4247 	if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
4248 		ret = kmalloc_large_node(size, gfpflags, node);
4249 
4250 		trace_kmalloc_node(caller, ret,
4251 				   size, PAGE_SIZE << get_order(size),
4252 				   gfpflags, node);
4253 
4254 		return ret;
4255 	}
4256 
4257 	s = kmalloc_slab(size, gfpflags);
4258 
4259 	if (unlikely(ZERO_OR_NULL_PTR(s)))
4260 		return s;
4261 
4262 	ret = slab_alloc_node(s, gfpflags, node, caller);
4263 
4264 	/* Honor the call site pointer we received. */
4265 	trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
4266 
4267 	return ret;
4268 }
4269 #endif
4270 
4271 #ifdef CONFIG_SYSFS
count_inuse(struct page * page)4272 static int count_inuse(struct page *page)
4273 {
4274 	return page->inuse;
4275 }
4276 
count_total(struct page * page)4277 static int count_total(struct page *page)
4278 {
4279 	return page->objects;
4280 }
4281 #endif
4282 
4283 #ifdef CONFIG_SLUB_DEBUG
validate_slab(struct kmem_cache * s,struct page * page,unsigned long * map)4284 static int validate_slab(struct kmem_cache *s, struct page *page,
4285 						unsigned long *map)
4286 {
4287 	void *p;
4288 	void *addr = page_address(page);
4289 
4290 	if (!check_slab(s, page) ||
4291 			!on_freelist(s, page, NULL))
4292 		return 0;
4293 
4294 	/* Now we know that a valid freelist exists */
4295 	bitmap_zero(map, page->objects);
4296 
4297 	get_map(s, page, map);
4298 	for_each_object(p, s, addr, page->objects) {
4299 		if (test_bit(slab_index(p, s, addr), map))
4300 			if (!check_object(s, page, p, SLUB_RED_INACTIVE))
4301 				return 0;
4302 	}
4303 
4304 	for_each_object(p, s, addr, page->objects)
4305 		if (!test_bit(slab_index(p, s, addr), map))
4306 			if (!check_object(s, page, p, SLUB_RED_ACTIVE))
4307 				return 0;
4308 	return 1;
4309 }
4310 
validate_slab_slab(struct kmem_cache * s,struct page * page,unsigned long * map)4311 static void validate_slab_slab(struct kmem_cache *s, struct page *page,
4312 						unsigned long *map)
4313 {
4314 	slab_lock(page);
4315 	validate_slab(s, page, map);
4316 	slab_unlock(page);
4317 }
4318 
validate_slab_node(struct kmem_cache * s,struct kmem_cache_node * n,unsigned long * map)4319 static int validate_slab_node(struct kmem_cache *s,
4320 		struct kmem_cache_node *n, unsigned long *map)
4321 {
4322 	unsigned long count = 0;
4323 	struct page *page;
4324 	unsigned long flags;
4325 
4326 	spin_lock_irqsave(&n->list_lock, flags);
4327 
4328 	list_for_each_entry(page, &n->partial, lru) {
4329 		validate_slab_slab(s, page, map);
4330 		count++;
4331 	}
4332 	if (count != n->nr_partial)
4333 		pr_err("SLUB %s: %ld partial slabs counted but counter=%ld\n",
4334 		       s->name, count, n->nr_partial);
4335 
4336 	if (!(s->flags & SLAB_STORE_USER))
4337 		goto out;
4338 
4339 	list_for_each_entry(page, &n->full, lru) {
4340 		validate_slab_slab(s, page, map);
4341 		count++;
4342 	}
4343 	if (count != atomic_long_read(&n->nr_slabs))
4344 		pr_err("SLUB: %s %ld slabs counted but counter=%ld\n",
4345 		       s->name, count, atomic_long_read(&n->nr_slabs));
4346 
4347 out:
4348 	spin_unlock_irqrestore(&n->list_lock, flags);
4349 	return count;
4350 }
4351 
validate_slab_cache(struct kmem_cache * s)4352 static long validate_slab_cache(struct kmem_cache *s)
4353 {
4354 	int node;
4355 	unsigned long count = 0;
4356 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4357 				sizeof(unsigned long), GFP_KERNEL);
4358 	struct kmem_cache_node *n;
4359 
4360 	if (!map)
4361 		return -ENOMEM;
4362 
4363 	flush_all(s);
4364 	for_each_kmem_cache_node(s, node, n)
4365 		count += validate_slab_node(s, n, map);
4366 	kfree(map);
4367 	return count;
4368 }
4369 /*
4370  * Generate lists of code addresses where slabcache objects are allocated
4371  * and freed.
4372  */
4373 
4374 struct location {
4375 	unsigned long count;
4376 	unsigned long addr;
4377 	long long sum_time;
4378 	long min_time;
4379 	long max_time;
4380 	long min_pid;
4381 	long max_pid;
4382 	DECLARE_BITMAP(cpus, NR_CPUS);
4383 	nodemask_t nodes;
4384 };
4385 
4386 struct loc_track {
4387 	unsigned long max;
4388 	unsigned long count;
4389 	struct location *loc;
4390 };
4391 
free_loc_track(struct loc_track * t)4392 static void free_loc_track(struct loc_track *t)
4393 {
4394 	if (t->max)
4395 		free_pages((unsigned long)t->loc,
4396 			get_order(sizeof(struct location) * t->max));
4397 }
4398 
alloc_loc_track(struct loc_track * t,unsigned long max,gfp_t flags)4399 static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
4400 {
4401 	struct location *l;
4402 	int order;
4403 
4404 	order = get_order(sizeof(struct location) * max);
4405 
4406 	l = (void *)__get_free_pages(flags, order);
4407 	if (!l)
4408 		return 0;
4409 
4410 	if (t->count) {
4411 		memcpy(l, t->loc, sizeof(struct location) * t->count);
4412 		free_loc_track(t);
4413 	}
4414 	t->max = max;
4415 	t->loc = l;
4416 	return 1;
4417 }
4418 
add_location(struct loc_track * t,struct kmem_cache * s,const struct track * track)4419 static int add_location(struct loc_track *t, struct kmem_cache *s,
4420 				const struct track *track)
4421 {
4422 	long start, end, pos;
4423 	struct location *l;
4424 	unsigned long caddr;
4425 	unsigned long age = jiffies - track->when;
4426 
4427 	start = -1;
4428 	end = t->count;
4429 
4430 	for ( ; ; ) {
4431 		pos = start + (end - start + 1) / 2;
4432 
4433 		/*
4434 		 * There is nothing at "end". If we end up there
4435 		 * we need to add something to before end.
4436 		 */
4437 		if (pos == end)
4438 			break;
4439 
4440 		caddr = t->loc[pos].addr;
4441 		if (track->addr == caddr) {
4442 
4443 			l = &t->loc[pos];
4444 			l->count++;
4445 			if (track->when) {
4446 				l->sum_time += age;
4447 				if (age < l->min_time)
4448 					l->min_time = age;
4449 				if (age > l->max_time)
4450 					l->max_time = age;
4451 
4452 				if (track->pid < l->min_pid)
4453 					l->min_pid = track->pid;
4454 				if (track->pid > l->max_pid)
4455 					l->max_pid = track->pid;
4456 
4457 				cpumask_set_cpu(track->cpu,
4458 						to_cpumask(l->cpus));
4459 			}
4460 			node_set(page_to_nid(virt_to_page(track)), l->nodes);
4461 			return 1;
4462 		}
4463 
4464 		if (track->addr < caddr)
4465 			end = pos;
4466 		else
4467 			start = pos;
4468 	}
4469 
4470 	/*
4471 	 * Not found. Insert new tracking element.
4472 	 */
4473 	if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
4474 		return 0;
4475 
4476 	l = t->loc + pos;
4477 	if (pos < t->count)
4478 		memmove(l + 1, l,
4479 			(t->count - pos) * sizeof(struct location));
4480 	t->count++;
4481 	l->count = 1;
4482 	l->addr = track->addr;
4483 	l->sum_time = age;
4484 	l->min_time = age;
4485 	l->max_time = age;
4486 	l->min_pid = track->pid;
4487 	l->max_pid = track->pid;
4488 	cpumask_clear(to_cpumask(l->cpus));
4489 	cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
4490 	nodes_clear(l->nodes);
4491 	node_set(page_to_nid(virt_to_page(track)), l->nodes);
4492 	return 1;
4493 }
4494 
process_slab(struct loc_track * t,struct kmem_cache * s,struct page * page,enum track_item alloc,unsigned long * map)4495 static void process_slab(struct loc_track *t, struct kmem_cache *s,
4496 		struct page *page, enum track_item alloc,
4497 		unsigned long *map)
4498 {
4499 	void *addr = page_address(page);
4500 	void *p;
4501 
4502 	bitmap_zero(map, page->objects);
4503 	get_map(s, page, map);
4504 
4505 	for_each_object(p, s, addr, page->objects)
4506 		if (!test_bit(slab_index(p, s, addr), map))
4507 			add_location(t, s, get_track(s, p, alloc));
4508 }
4509 
list_locations(struct kmem_cache * s,char * buf,enum track_item alloc)4510 static int list_locations(struct kmem_cache *s, char *buf,
4511 					enum track_item alloc)
4512 {
4513 	int len = 0;
4514 	unsigned long i;
4515 	struct loc_track t = { 0, 0, NULL };
4516 	int node;
4517 	unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4518 				     sizeof(unsigned long), GFP_KERNEL);
4519 	struct kmem_cache_node *n;
4520 
4521 	if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4522 				     GFP_TEMPORARY)) {
4523 		kfree(map);
4524 		return sprintf(buf, "Out of memory\n");
4525 	}
4526 	/* Push back cpu slabs */
4527 	flush_all(s);
4528 
4529 	for_each_kmem_cache_node(s, node, n) {
4530 		unsigned long flags;
4531 		struct page *page;
4532 
4533 		if (!atomic_long_read(&n->nr_slabs))
4534 			continue;
4535 
4536 		spin_lock_irqsave(&n->list_lock, flags);
4537 		list_for_each_entry(page, &n->partial, lru)
4538 			process_slab(&t, s, page, alloc, map);
4539 		list_for_each_entry(page, &n->full, lru)
4540 			process_slab(&t, s, page, alloc, map);
4541 		spin_unlock_irqrestore(&n->list_lock, flags);
4542 	}
4543 
4544 	for (i = 0; i < t.count; i++) {
4545 		struct location *l = &t.loc[i];
4546 
4547 		if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
4548 			break;
4549 		len += sprintf(buf + len, "%7ld ", l->count);
4550 
4551 		if (l->addr)
4552 			len += sprintf(buf + len, "%pS", (void *)l->addr);
4553 		else
4554 			len += sprintf(buf + len, "<not-available>");
4555 
4556 		if (l->sum_time != l->min_time) {
4557 			len += sprintf(buf + len, " age=%ld/%ld/%ld",
4558 				l->min_time,
4559 				(long)div_u64(l->sum_time, l->count),
4560 				l->max_time);
4561 		} else
4562 			len += sprintf(buf + len, " age=%ld",
4563 				l->min_time);
4564 
4565 		if (l->min_pid != l->max_pid)
4566 			len += sprintf(buf + len, " pid=%ld-%ld",
4567 				l->min_pid, l->max_pid);
4568 		else
4569 			len += sprintf(buf + len, " pid=%ld",
4570 				l->min_pid);
4571 
4572 		if (num_online_cpus() > 1 &&
4573 				!cpumask_empty(to_cpumask(l->cpus)) &&
4574 				len < PAGE_SIZE - 60)
4575 			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4576 					 " cpus=%*pbl",
4577 					 cpumask_pr_args(to_cpumask(l->cpus)));
4578 
4579 		if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
4580 				len < PAGE_SIZE - 60)
4581 			len += scnprintf(buf + len, PAGE_SIZE - len - 50,
4582 					 " nodes=%*pbl",
4583 					 nodemask_pr_args(&l->nodes));
4584 
4585 		len += sprintf(buf + len, "\n");
4586 	}
4587 
4588 	free_loc_track(&t);
4589 	kfree(map);
4590 	if (!t.count)
4591 		len += sprintf(buf, "No data\n");
4592 	return len;
4593 }
4594 #endif
4595 
4596 #ifdef SLUB_RESILIENCY_TEST
resiliency_test(void)4597 static void __init resiliency_test(void)
4598 {
4599 	u8 *p;
4600 
4601 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
4602 
4603 	pr_err("SLUB resiliency testing\n");
4604 	pr_err("-----------------------\n");
4605 	pr_err("A. Corruption after allocation\n");
4606 
4607 	p = kzalloc(16, GFP_KERNEL);
4608 	p[16] = 0x12;
4609 	pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n",
4610 	       p + 16);
4611 
4612 	validate_slab_cache(kmalloc_caches[4]);
4613 
4614 	/* Hmmm... The next two are dangerous */
4615 	p = kzalloc(32, GFP_KERNEL);
4616 	p[32 + sizeof(void *)] = 0x34;
4617 	pr_err("\n2. kmalloc-32: Clobber next pointer/next slab 0x34 -> -0x%p\n",
4618 	       p);
4619 	pr_err("If allocated object is overwritten then not detectable\n\n");
4620 
4621 	validate_slab_cache(kmalloc_caches[5]);
4622 	p = kzalloc(64, GFP_KERNEL);
4623 	p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4624 	*p = 0x56;
4625 	pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4626 	       p);
4627 	pr_err("If allocated object is overwritten then not detectable\n\n");
4628 	validate_slab_cache(kmalloc_caches[6]);
4629 
4630 	pr_err("\nB. Corruption after free\n");
4631 	p = kzalloc(128, GFP_KERNEL);
4632 	kfree(p);
4633 	*p = 0x78;
4634 	pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4635 	validate_slab_cache(kmalloc_caches[7]);
4636 
4637 	p = kzalloc(256, GFP_KERNEL);
4638 	kfree(p);
4639 	p[50] = 0x9a;
4640 	pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
4641 	validate_slab_cache(kmalloc_caches[8]);
4642 
4643 	p = kzalloc(512, GFP_KERNEL);
4644 	kfree(p);
4645 	p[512] = 0xab;
4646 	pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4647 	validate_slab_cache(kmalloc_caches[9]);
4648 }
4649 #else
4650 #ifdef CONFIG_SYSFS
resiliency_test(void)4651 static void resiliency_test(void) {};
4652 #endif
4653 #endif
4654 
4655 #ifdef CONFIG_SYSFS
4656 enum slab_stat_type {
4657 	SL_ALL,			/* All slabs */
4658 	SL_PARTIAL,		/* Only partially allocated slabs */
4659 	SL_CPU,			/* Only slabs used for cpu caches */
4660 	SL_OBJECTS,		/* Determine allocated objects not slabs */
4661 	SL_TOTAL		/* Determine object capacity not slabs */
4662 };
4663 
4664 #define SO_ALL		(1 << SL_ALL)
4665 #define SO_PARTIAL	(1 << SL_PARTIAL)
4666 #define SO_CPU		(1 << SL_CPU)
4667 #define SO_OBJECTS	(1 << SL_OBJECTS)
4668 #define SO_TOTAL	(1 << SL_TOTAL)
4669 
show_slab_objects(struct kmem_cache * s,char * buf,unsigned long flags)4670 static ssize_t show_slab_objects(struct kmem_cache *s,
4671 			    char *buf, unsigned long flags)
4672 {
4673 	unsigned long total = 0;
4674 	int node;
4675 	int x;
4676 	unsigned long *nodes;
4677 
4678 	nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
4679 	if (!nodes)
4680 		return -ENOMEM;
4681 
4682 	if (flags & SO_CPU) {
4683 		int cpu;
4684 
4685 		for_each_possible_cpu(cpu) {
4686 			struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4687 							       cpu);
4688 			int node;
4689 			struct page *page;
4690 
4691 			page = READ_ONCE(c->page);
4692 			if (!page)
4693 				continue;
4694 
4695 			node = page_to_nid(page);
4696 			if (flags & SO_TOTAL)
4697 				x = page->objects;
4698 			else if (flags & SO_OBJECTS)
4699 				x = page->inuse;
4700 			else
4701 				x = 1;
4702 
4703 			total += x;
4704 			nodes[node] += x;
4705 
4706 			page = READ_ONCE(c->partial);
4707 			if (page) {
4708 				node = page_to_nid(page);
4709 				if (flags & SO_TOTAL)
4710 					WARN_ON_ONCE(1);
4711 				else if (flags & SO_OBJECTS)
4712 					WARN_ON_ONCE(1);
4713 				else
4714 					x = page->pages;
4715 				total += x;
4716 				nodes[node] += x;
4717 			}
4718 		}
4719 	}
4720 
4721 	get_online_mems();
4722 #ifdef CONFIG_SLUB_DEBUG
4723 	if (flags & SO_ALL) {
4724 		struct kmem_cache_node *n;
4725 
4726 		for_each_kmem_cache_node(s, node, n) {
4727 
4728 			if (flags & SO_TOTAL)
4729 				x = atomic_long_read(&n->total_objects);
4730 			else if (flags & SO_OBJECTS)
4731 				x = atomic_long_read(&n->total_objects) -
4732 					count_partial(n, count_free);
4733 			else
4734 				x = atomic_long_read(&n->nr_slabs);
4735 			total += x;
4736 			nodes[node] += x;
4737 		}
4738 
4739 	} else
4740 #endif
4741 	if (flags & SO_PARTIAL) {
4742 		struct kmem_cache_node *n;
4743 
4744 		for_each_kmem_cache_node(s, node, n) {
4745 			if (flags & SO_TOTAL)
4746 				x = count_partial(n, count_total);
4747 			else if (flags & SO_OBJECTS)
4748 				x = count_partial(n, count_inuse);
4749 			else
4750 				x = n->nr_partial;
4751 			total += x;
4752 			nodes[node] += x;
4753 		}
4754 	}
4755 	x = sprintf(buf, "%lu", total);
4756 #ifdef CONFIG_NUMA
4757 	for (node = 0; node < nr_node_ids; node++)
4758 		if (nodes[node])
4759 			x += sprintf(buf + x, " N%d=%lu",
4760 					node, nodes[node]);
4761 #endif
4762 	put_online_mems();
4763 	kfree(nodes);
4764 	return x + sprintf(buf + x, "\n");
4765 }
4766 
4767 #ifdef CONFIG_SLUB_DEBUG
any_slab_objects(struct kmem_cache * s)4768 static int any_slab_objects(struct kmem_cache *s)
4769 {
4770 	int node;
4771 	struct kmem_cache_node *n;
4772 
4773 	for_each_kmem_cache_node(s, node, n)
4774 		if (atomic_long_read(&n->total_objects))
4775 			return 1;
4776 
4777 	return 0;
4778 }
4779 #endif
4780 
4781 #define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
4782 #define to_slab(n) container_of(n, struct kmem_cache, kobj)
4783 
4784 struct slab_attribute {
4785 	struct attribute attr;
4786 	ssize_t (*show)(struct kmem_cache *s, char *buf);
4787 	ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4788 };
4789 
4790 #define SLAB_ATTR_RO(_name) \
4791 	static struct slab_attribute _name##_attr = \
4792 	__ATTR(_name, 0400, _name##_show, NULL)
4793 
4794 #define SLAB_ATTR(_name) \
4795 	static struct slab_attribute _name##_attr =  \
4796 	__ATTR(_name, 0600, _name##_show, _name##_store)
4797 
slab_size_show(struct kmem_cache * s,char * buf)4798 static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4799 {
4800 	return sprintf(buf, "%d\n", s->size);
4801 }
4802 SLAB_ATTR_RO(slab_size);
4803 
align_show(struct kmem_cache * s,char * buf)4804 static ssize_t align_show(struct kmem_cache *s, char *buf)
4805 {
4806 	return sprintf(buf, "%d\n", s->align);
4807 }
4808 SLAB_ATTR_RO(align);
4809 
object_size_show(struct kmem_cache * s,char * buf)4810 static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4811 {
4812 	return sprintf(buf, "%d\n", s->object_size);
4813 }
4814 SLAB_ATTR_RO(object_size);
4815 
objs_per_slab_show(struct kmem_cache * s,char * buf)4816 static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4817 {
4818 	return sprintf(buf, "%d\n", oo_objects(s->oo));
4819 }
4820 SLAB_ATTR_RO(objs_per_slab);
4821 
order_store(struct kmem_cache * s,const char * buf,size_t length)4822 static ssize_t order_store(struct kmem_cache *s,
4823 				const char *buf, size_t length)
4824 {
4825 	unsigned long order;
4826 	int err;
4827 
4828 	err = kstrtoul(buf, 10, &order);
4829 	if (err)
4830 		return err;
4831 
4832 	if (order > slub_max_order || order < slub_min_order)
4833 		return -EINVAL;
4834 
4835 	calculate_sizes(s, order);
4836 	return length;
4837 }
4838 
order_show(struct kmem_cache * s,char * buf)4839 static ssize_t order_show(struct kmem_cache *s, char *buf)
4840 {
4841 	return sprintf(buf, "%d\n", oo_order(s->oo));
4842 }
4843 SLAB_ATTR(order);
4844 
min_partial_show(struct kmem_cache * s,char * buf)4845 static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4846 {
4847 	return sprintf(buf, "%lu\n", s->min_partial);
4848 }
4849 
min_partial_store(struct kmem_cache * s,const char * buf,size_t length)4850 static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4851 				 size_t length)
4852 {
4853 	unsigned long min;
4854 	int err;
4855 
4856 	err = kstrtoul(buf, 10, &min);
4857 	if (err)
4858 		return err;
4859 
4860 	set_min_partial(s, min);
4861 	return length;
4862 }
4863 SLAB_ATTR(min_partial);
4864 
cpu_partial_show(struct kmem_cache * s,char * buf)4865 static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4866 {
4867 	return sprintf(buf, "%u\n", s->cpu_partial);
4868 }
4869 
cpu_partial_store(struct kmem_cache * s,const char * buf,size_t length)4870 static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4871 				 size_t length)
4872 {
4873 	unsigned long objects;
4874 	int err;
4875 
4876 	err = kstrtoul(buf, 10, &objects);
4877 	if (err)
4878 		return err;
4879 	if (objects && !kmem_cache_has_cpu_partial(s))
4880 		return -EINVAL;
4881 
4882 	s->cpu_partial = objects;
4883 	flush_all(s);
4884 	return length;
4885 }
4886 SLAB_ATTR(cpu_partial);
4887 
ctor_show(struct kmem_cache * s,char * buf)4888 static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4889 {
4890 	if (!s->ctor)
4891 		return 0;
4892 	return sprintf(buf, "%pS\n", s->ctor);
4893 }
4894 SLAB_ATTR_RO(ctor);
4895 
aliases_show(struct kmem_cache * s,char * buf)4896 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4897 {
4898 	return sprintf(buf, "%d\n", s->refcount < 0 ? 0 : s->refcount - 1);
4899 }
4900 SLAB_ATTR_RO(aliases);
4901 
partial_show(struct kmem_cache * s,char * buf)4902 static ssize_t partial_show(struct kmem_cache *s, char *buf)
4903 {
4904 	return show_slab_objects(s, buf, SO_PARTIAL);
4905 }
4906 SLAB_ATTR_RO(partial);
4907 
cpu_slabs_show(struct kmem_cache * s,char * buf)4908 static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4909 {
4910 	return show_slab_objects(s, buf, SO_CPU);
4911 }
4912 SLAB_ATTR_RO(cpu_slabs);
4913 
objects_show(struct kmem_cache * s,char * buf)4914 static ssize_t objects_show(struct kmem_cache *s, char *buf)
4915 {
4916 	return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
4917 }
4918 SLAB_ATTR_RO(objects);
4919 
objects_partial_show(struct kmem_cache * s,char * buf)4920 static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4921 {
4922 	return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4923 }
4924 SLAB_ATTR_RO(objects_partial);
4925 
slabs_cpu_partial_show(struct kmem_cache * s,char * buf)4926 static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4927 {
4928 	int objects = 0;
4929 	int pages = 0;
4930 	int cpu;
4931 	int len;
4932 
4933 	for_each_online_cpu(cpu) {
4934 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4935 
4936 		if (page) {
4937 			pages += page->pages;
4938 			objects += page->pobjects;
4939 		}
4940 	}
4941 
4942 	len = sprintf(buf, "%d(%d)", objects, pages);
4943 
4944 #ifdef CONFIG_SMP
4945 	for_each_online_cpu(cpu) {
4946 		struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4947 
4948 		if (page && len < PAGE_SIZE - 20)
4949 			len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4950 				page->pobjects, page->pages);
4951 	}
4952 #endif
4953 	return len + sprintf(buf + len, "\n");
4954 }
4955 SLAB_ATTR_RO(slabs_cpu_partial);
4956 
reclaim_account_show(struct kmem_cache * s,char * buf)4957 static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4958 {
4959 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4960 }
4961 
reclaim_account_store(struct kmem_cache * s,const char * buf,size_t length)4962 static ssize_t reclaim_account_store(struct kmem_cache *s,
4963 				const char *buf, size_t length)
4964 {
4965 	s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4966 	if (buf[0] == '1')
4967 		s->flags |= SLAB_RECLAIM_ACCOUNT;
4968 	return length;
4969 }
4970 SLAB_ATTR(reclaim_account);
4971 
hwcache_align_show(struct kmem_cache * s,char * buf)4972 static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4973 {
4974 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4975 }
4976 SLAB_ATTR_RO(hwcache_align);
4977 
4978 #ifdef CONFIG_ZONE_DMA
cache_dma_show(struct kmem_cache * s,char * buf)4979 static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4980 {
4981 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4982 }
4983 SLAB_ATTR_RO(cache_dma);
4984 #endif
4985 
destroy_by_rcu_show(struct kmem_cache * s,char * buf)4986 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4987 {
4988 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4989 }
4990 SLAB_ATTR_RO(destroy_by_rcu);
4991 
reserved_show(struct kmem_cache * s,char * buf)4992 static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4993 {
4994 	return sprintf(buf, "%d\n", s->reserved);
4995 }
4996 SLAB_ATTR_RO(reserved);
4997 
4998 #ifdef CONFIG_SLUB_DEBUG
slabs_show(struct kmem_cache * s,char * buf)4999 static ssize_t slabs_show(struct kmem_cache *s, char *buf)
5000 {
5001 	return show_slab_objects(s, buf, SO_ALL);
5002 }
5003 SLAB_ATTR_RO(slabs);
5004 
total_objects_show(struct kmem_cache * s,char * buf)5005 static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
5006 {
5007 	return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
5008 }
5009 SLAB_ATTR_RO(total_objects);
5010 
sanity_checks_show(struct kmem_cache * s,char * buf)5011 static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
5012 {
5013 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_CONSISTENCY_CHECKS));
5014 }
5015 
sanity_checks_store(struct kmem_cache * s,const char * buf,size_t length)5016 static ssize_t sanity_checks_store(struct kmem_cache *s,
5017 				const char *buf, size_t length)
5018 {
5019 	s->flags &= ~SLAB_CONSISTENCY_CHECKS;
5020 	if (buf[0] == '1') {
5021 		s->flags &= ~__CMPXCHG_DOUBLE;
5022 		s->flags |= SLAB_CONSISTENCY_CHECKS;
5023 	}
5024 	return length;
5025 }
5026 SLAB_ATTR(sanity_checks);
5027 
trace_show(struct kmem_cache * s,char * buf)5028 static ssize_t trace_show(struct kmem_cache *s, char *buf)
5029 {
5030 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
5031 }
5032 
trace_store(struct kmem_cache * s,const char * buf,size_t length)5033 static ssize_t trace_store(struct kmem_cache *s, const char *buf,
5034 							size_t length)
5035 {
5036 	/*
5037 	 * Tracing a merged cache is going to give confusing results
5038 	 * as well as cause other issues like converting a mergeable
5039 	 * cache into an umergeable one.
5040 	 */
5041 	if (s->refcount > 1)
5042 		return -EINVAL;
5043 
5044 	s->flags &= ~SLAB_TRACE;
5045 	if (buf[0] == '1') {
5046 		s->flags &= ~__CMPXCHG_DOUBLE;
5047 		s->flags |= SLAB_TRACE;
5048 	}
5049 	return length;
5050 }
5051 SLAB_ATTR(trace);
5052 
red_zone_show(struct kmem_cache * s,char * buf)5053 static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
5054 {
5055 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
5056 }
5057 
red_zone_store(struct kmem_cache * s,const char * buf,size_t length)5058 static ssize_t red_zone_store(struct kmem_cache *s,
5059 				const char *buf, size_t length)
5060 {
5061 	if (any_slab_objects(s))
5062 		return -EBUSY;
5063 
5064 	s->flags &= ~SLAB_RED_ZONE;
5065 	if (buf[0] == '1') {
5066 		s->flags |= SLAB_RED_ZONE;
5067 	}
5068 	calculate_sizes(s, -1);
5069 	return length;
5070 }
5071 SLAB_ATTR(red_zone);
5072 
poison_show(struct kmem_cache * s,char * buf)5073 static ssize_t poison_show(struct kmem_cache *s, char *buf)
5074 {
5075 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
5076 }
5077 
poison_store(struct kmem_cache * s,const char * buf,size_t length)5078 static ssize_t poison_store(struct kmem_cache *s,
5079 				const char *buf, size_t length)
5080 {
5081 	if (any_slab_objects(s))
5082 		return -EBUSY;
5083 
5084 	s->flags &= ~SLAB_POISON;
5085 	if (buf[0] == '1') {
5086 		s->flags |= SLAB_POISON;
5087 	}
5088 	calculate_sizes(s, -1);
5089 	return length;
5090 }
5091 SLAB_ATTR(poison);
5092 
store_user_show(struct kmem_cache * s,char * buf)5093 static ssize_t store_user_show(struct kmem_cache *s, char *buf)
5094 {
5095 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
5096 }
5097 
store_user_store(struct kmem_cache * s,const char * buf,size_t length)5098 static ssize_t store_user_store(struct kmem_cache *s,
5099 				const char *buf, size_t length)
5100 {
5101 	if (any_slab_objects(s))
5102 		return -EBUSY;
5103 
5104 	s->flags &= ~SLAB_STORE_USER;
5105 	if (buf[0] == '1') {
5106 		s->flags &= ~__CMPXCHG_DOUBLE;
5107 		s->flags |= SLAB_STORE_USER;
5108 	}
5109 	calculate_sizes(s, -1);
5110 	return length;
5111 }
5112 SLAB_ATTR(store_user);
5113 
validate_show(struct kmem_cache * s,char * buf)5114 static ssize_t validate_show(struct kmem_cache *s, char *buf)
5115 {
5116 	return 0;
5117 }
5118 
validate_store(struct kmem_cache * s,const char * buf,size_t length)5119 static ssize_t validate_store(struct kmem_cache *s,
5120 			const char *buf, size_t length)
5121 {
5122 	int ret = -EINVAL;
5123 
5124 	if (buf[0] == '1') {
5125 		ret = validate_slab_cache(s);
5126 		if (ret >= 0)
5127 			ret = length;
5128 	}
5129 	return ret;
5130 }
5131 SLAB_ATTR(validate);
5132 
alloc_calls_show(struct kmem_cache * s,char * buf)5133 static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
5134 {
5135 	if (!(s->flags & SLAB_STORE_USER))
5136 		return -ENOSYS;
5137 	return list_locations(s, buf, TRACK_ALLOC);
5138 }
5139 SLAB_ATTR_RO(alloc_calls);
5140 
free_calls_show(struct kmem_cache * s,char * buf)5141 static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
5142 {
5143 	if (!(s->flags & SLAB_STORE_USER))
5144 		return -ENOSYS;
5145 	return list_locations(s, buf, TRACK_FREE);
5146 }
5147 SLAB_ATTR_RO(free_calls);
5148 #endif /* CONFIG_SLUB_DEBUG */
5149 
5150 #ifdef CONFIG_FAILSLAB
failslab_show(struct kmem_cache * s,char * buf)5151 static ssize_t failslab_show(struct kmem_cache *s, char *buf)
5152 {
5153 	return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
5154 }
5155 
failslab_store(struct kmem_cache * s,const char * buf,size_t length)5156 static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
5157 							size_t length)
5158 {
5159 	if (s->refcount > 1)
5160 		return -EINVAL;
5161 
5162 	s->flags &= ~SLAB_FAILSLAB;
5163 	if (buf[0] == '1')
5164 		s->flags |= SLAB_FAILSLAB;
5165 	return length;
5166 }
5167 SLAB_ATTR(failslab);
5168 #endif
5169 
shrink_show(struct kmem_cache * s,char * buf)5170 static ssize_t shrink_show(struct kmem_cache *s, char *buf)
5171 {
5172 	return 0;
5173 }
5174 
shrink_store(struct kmem_cache * s,const char * buf,size_t length)5175 static ssize_t shrink_store(struct kmem_cache *s,
5176 			const char *buf, size_t length)
5177 {
5178 	if (buf[0] == '1')
5179 		kmem_cache_shrink(s);
5180 	else
5181 		return -EINVAL;
5182 	return length;
5183 }
5184 SLAB_ATTR(shrink);
5185 
5186 #ifdef CONFIG_NUMA
remote_node_defrag_ratio_show(struct kmem_cache * s,char * buf)5187 static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
5188 {
5189 	return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
5190 }
5191 
remote_node_defrag_ratio_store(struct kmem_cache * s,const char * buf,size_t length)5192 static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
5193 				const char *buf, size_t length)
5194 {
5195 	unsigned long ratio;
5196 	int err;
5197 
5198 	err = kstrtoul(buf, 10, &ratio);
5199 	if (err)
5200 		return err;
5201 
5202 	if (ratio <= 100)
5203 		s->remote_node_defrag_ratio = ratio * 10;
5204 
5205 	return length;
5206 }
5207 SLAB_ATTR(remote_node_defrag_ratio);
5208 #endif
5209 
5210 #ifdef CONFIG_SLUB_STATS
show_stat(struct kmem_cache * s,char * buf,enum stat_item si)5211 static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
5212 {
5213 	unsigned long sum  = 0;
5214 	int cpu;
5215 	int len;
5216 	int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
5217 
5218 	if (!data)
5219 		return -ENOMEM;
5220 
5221 	for_each_online_cpu(cpu) {
5222 		unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
5223 
5224 		data[cpu] = x;
5225 		sum += x;
5226 	}
5227 
5228 	len = sprintf(buf, "%lu", sum);
5229 
5230 #ifdef CONFIG_SMP
5231 	for_each_online_cpu(cpu) {
5232 		if (data[cpu] && len < PAGE_SIZE - 20)
5233 			len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
5234 	}
5235 #endif
5236 	kfree(data);
5237 	return len + sprintf(buf + len, "\n");
5238 }
5239 
clear_stat(struct kmem_cache * s,enum stat_item si)5240 static void clear_stat(struct kmem_cache *s, enum stat_item si)
5241 {
5242 	int cpu;
5243 
5244 	for_each_online_cpu(cpu)
5245 		per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
5246 }
5247 
5248 #define STAT_ATTR(si, text) 					\
5249 static ssize_t text##_show(struct kmem_cache *s, char *buf)	\
5250 {								\
5251 	return show_stat(s, buf, si);				\
5252 }								\
5253 static ssize_t text##_store(struct kmem_cache *s,		\
5254 				const char *buf, size_t length)	\
5255 {								\
5256 	if (buf[0] != '0')					\
5257 		return -EINVAL;					\
5258 	clear_stat(s, si);					\
5259 	return length;						\
5260 }								\
5261 SLAB_ATTR(text);						\
5262 
5263 STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
5264 STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
5265 STAT_ATTR(FREE_FASTPATH, free_fastpath);
5266 STAT_ATTR(FREE_SLOWPATH, free_slowpath);
5267 STAT_ATTR(FREE_FROZEN, free_frozen);
5268 STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
5269 STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
5270 STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
5271 STAT_ATTR(ALLOC_SLAB, alloc_slab);
5272 STAT_ATTR(ALLOC_REFILL, alloc_refill);
5273 STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
5274 STAT_ATTR(FREE_SLAB, free_slab);
5275 STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
5276 STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
5277 STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
5278 STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
5279 STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
5280 STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
5281 STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
5282 STAT_ATTR(ORDER_FALLBACK, order_fallback);
5283 STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
5284 STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
5285 STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
5286 STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
5287 STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
5288 STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
5289 #endif
5290 
5291 static struct attribute *slab_attrs[] = {
5292 	&slab_size_attr.attr,
5293 	&object_size_attr.attr,
5294 	&objs_per_slab_attr.attr,
5295 	&order_attr.attr,
5296 	&min_partial_attr.attr,
5297 	&cpu_partial_attr.attr,
5298 	&objects_attr.attr,
5299 	&objects_partial_attr.attr,
5300 	&partial_attr.attr,
5301 	&cpu_slabs_attr.attr,
5302 	&ctor_attr.attr,
5303 	&aliases_attr.attr,
5304 	&align_attr.attr,
5305 	&hwcache_align_attr.attr,
5306 	&reclaim_account_attr.attr,
5307 	&destroy_by_rcu_attr.attr,
5308 	&shrink_attr.attr,
5309 	&reserved_attr.attr,
5310 	&slabs_cpu_partial_attr.attr,
5311 #ifdef CONFIG_SLUB_DEBUG
5312 	&total_objects_attr.attr,
5313 	&slabs_attr.attr,
5314 	&sanity_checks_attr.attr,
5315 	&trace_attr.attr,
5316 	&red_zone_attr.attr,
5317 	&poison_attr.attr,
5318 	&store_user_attr.attr,
5319 	&validate_attr.attr,
5320 	&alloc_calls_attr.attr,
5321 	&free_calls_attr.attr,
5322 #endif
5323 #ifdef CONFIG_ZONE_DMA
5324 	&cache_dma_attr.attr,
5325 #endif
5326 #ifdef CONFIG_NUMA
5327 	&remote_node_defrag_ratio_attr.attr,
5328 #endif
5329 #ifdef CONFIG_SLUB_STATS
5330 	&alloc_fastpath_attr.attr,
5331 	&alloc_slowpath_attr.attr,
5332 	&free_fastpath_attr.attr,
5333 	&free_slowpath_attr.attr,
5334 	&free_frozen_attr.attr,
5335 	&free_add_partial_attr.attr,
5336 	&free_remove_partial_attr.attr,
5337 	&alloc_from_partial_attr.attr,
5338 	&alloc_slab_attr.attr,
5339 	&alloc_refill_attr.attr,
5340 	&alloc_node_mismatch_attr.attr,
5341 	&free_slab_attr.attr,
5342 	&cpuslab_flush_attr.attr,
5343 	&deactivate_full_attr.attr,
5344 	&deactivate_empty_attr.attr,
5345 	&deactivate_to_head_attr.attr,
5346 	&deactivate_to_tail_attr.attr,
5347 	&deactivate_remote_frees_attr.attr,
5348 	&deactivate_bypass_attr.attr,
5349 	&order_fallback_attr.attr,
5350 	&cmpxchg_double_fail_attr.attr,
5351 	&cmpxchg_double_cpu_fail_attr.attr,
5352 	&cpu_partial_alloc_attr.attr,
5353 	&cpu_partial_free_attr.attr,
5354 	&cpu_partial_node_attr.attr,
5355 	&cpu_partial_drain_attr.attr,
5356 #endif
5357 #ifdef CONFIG_FAILSLAB
5358 	&failslab_attr.attr,
5359 #endif
5360 
5361 	NULL
5362 };
5363 
5364 static struct attribute_group slab_attr_group = {
5365 	.attrs = slab_attrs,
5366 };
5367 
slab_attr_show(struct kobject * kobj,struct attribute * attr,char * buf)5368 static ssize_t slab_attr_show(struct kobject *kobj,
5369 				struct attribute *attr,
5370 				char *buf)
5371 {
5372 	struct slab_attribute *attribute;
5373 	struct kmem_cache *s;
5374 	int err;
5375 
5376 	attribute = to_slab_attr(attr);
5377 	s = to_slab(kobj);
5378 
5379 	if (!attribute->show)
5380 		return -EIO;
5381 
5382 	err = attribute->show(s, buf);
5383 
5384 	return err;
5385 }
5386 
slab_attr_store(struct kobject * kobj,struct attribute * attr,const char * buf,size_t len)5387 static ssize_t slab_attr_store(struct kobject *kobj,
5388 				struct attribute *attr,
5389 				const char *buf, size_t len)
5390 {
5391 	struct slab_attribute *attribute;
5392 	struct kmem_cache *s;
5393 	int err;
5394 
5395 	attribute = to_slab_attr(attr);
5396 	s = to_slab(kobj);
5397 
5398 	if (!attribute->store)
5399 		return -EIO;
5400 
5401 	err = attribute->store(s, buf, len);
5402 #ifdef CONFIG_MEMCG
5403 	if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5404 		struct kmem_cache *c;
5405 
5406 		mutex_lock(&slab_mutex);
5407 		if (s->max_attr_size < len)
5408 			s->max_attr_size = len;
5409 
5410 		/*
5411 		 * This is a best effort propagation, so this function's return
5412 		 * value will be determined by the parent cache only. This is
5413 		 * basically because not all attributes will have a well
5414 		 * defined semantics for rollbacks - most of the actions will
5415 		 * have permanent effects.
5416 		 *
5417 		 * Returning the error value of any of the children that fail
5418 		 * is not 100 % defined, in the sense that users seeing the
5419 		 * error code won't be able to know anything about the state of
5420 		 * the cache.
5421 		 *
5422 		 * Only returning the error code for the parent cache at least
5423 		 * has well defined semantics. The cache being written to
5424 		 * directly either failed or succeeded, in which case we loop
5425 		 * through the descendants with best-effort propagation.
5426 		 */
5427 		for_each_memcg_cache(c, s)
5428 			attribute->store(c, buf, len);
5429 		mutex_unlock(&slab_mutex);
5430 	}
5431 #endif
5432 	return err;
5433 }
5434 
memcg_propagate_slab_attrs(struct kmem_cache * s)5435 static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5436 {
5437 #ifdef CONFIG_MEMCG
5438 	int i;
5439 	char *buffer = NULL;
5440 	struct kmem_cache *root_cache;
5441 
5442 	if (is_root_cache(s))
5443 		return;
5444 
5445 	root_cache = s->memcg_params.root_cache;
5446 
5447 	/*
5448 	 * This mean this cache had no attribute written. Therefore, no point
5449 	 * in copying default values around
5450 	 */
5451 	if (!root_cache->max_attr_size)
5452 		return;
5453 
5454 	for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5455 		char mbuf[64];
5456 		char *buf;
5457 		struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5458 		ssize_t len;
5459 
5460 		if (!attr || !attr->store || !attr->show)
5461 			continue;
5462 
5463 		/*
5464 		 * It is really bad that we have to allocate here, so we will
5465 		 * do it only as a fallback. If we actually allocate, though,
5466 		 * we can just use the allocated buffer until the end.
5467 		 *
5468 		 * Most of the slub attributes will tend to be very small in
5469 		 * size, but sysfs allows buffers up to a page, so they can
5470 		 * theoretically happen.
5471 		 */
5472 		if (buffer)
5473 			buf = buffer;
5474 		else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
5475 			buf = mbuf;
5476 		else {
5477 			buffer = (char *) get_zeroed_page(GFP_KERNEL);
5478 			if (WARN_ON(!buffer))
5479 				continue;
5480 			buf = buffer;
5481 		}
5482 
5483 		len = attr->show(root_cache, buf);
5484 		if (len > 0)
5485 			attr->store(s, buf, len);
5486 	}
5487 
5488 	if (buffer)
5489 		free_page((unsigned long)buffer);
5490 #endif
5491 }
5492 
kmem_cache_release(struct kobject * k)5493 static void kmem_cache_release(struct kobject *k)
5494 {
5495 	slab_kmem_cache_release(to_slab(k));
5496 }
5497 
5498 static const struct sysfs_ops slab_sysfs_ops = {
5499 	.show = slab_attr_show,
5500 	.store = slab_attr_store,
5501 };
5502 
5503 static struct kobj_type slab_ktype = {
5504 	.sysfs_ops = &slab_sysfs_ops,
5505 	.release = kmem_cache_release,
5506 };
5507 
uevent_filter(struct kset * kset,struct kobject * kobj)5508 static int uevent_filter(struct kset *kset, struct kobject *kobj)
5509 {
5510 	struct kobj_type *ktype = get_ktype(kobj);
5511 
5512 	if (ktype == &slab_ktype)
5513 		return 1;
5514 	return 0;
5515 }
5516 
5517 static const struct kset_uevent_ops slab_uevent_ops = {
5518 	.filter = uevent_filter,
5519 };
5520 
5521 static struct kset *slab_kset;
5522 
cache_kset(struct kmem_cache * s)5523 static inline struct kset *cache_kset(struct kmem_cache *s)
5524 {
5525 #ifdef CONFIG_MEMCG
5526 	if (!is_root_cache(s))
5527 		return s->memcg_params.root_cache->memcg_kset;
5528 #endif
5529 	return slab_kset;
5530 }
5531 
5532 #define ID_STR_LENGTH 64
5533 
5534 /* Create a unique string id for a slab cache:
5535  *
5536  * Format	:[flags-]size
5537  */
create_unique_id(struct kmem_cache * s)5538 static char *create_unique_id(struct kmem_cache *s)
5539 {
5540 	char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5541 	char *p = name;
5542 
5543 	BUG_ON(!name);
5544 
5545 	*p++ = ':';
5546 	/*
5547 	 * First flags affecting slabcache operations. We will only
5548 	 * get here for aliasable slabs so we do not need to support
5549 	 * too many flags. The flags here must cover all flags that
5550 	 * are matched during merging to guarantee that the id is
5551 	 * unique.
5552 	 */
5553 	if (s->flags & SLAB_CACHE_DMA)
5554 		*p++ = 'd';
5555 	if (s->flags & SLAB_RECLAIM_ACCOUNT)
5556 		*p++ = 'a';
5557 	if (s->flags & SLAB_CONSISTENCY_CHECKS)
5558 		*p++ = 'F';
5559 	if (!(s->flags & SLAB_NOTRACK))
5560 		*p++ = 't';
5561 	if (s->flags & SLAB_ACCOUNT)
5562 		*p++ = 'A';
5563 	if (p != name + 1)
5564 		*p++ = '-';
5565 	p += sprintf(p, "%07d", s->size);
5566 
5567 	BUG_ON(p > name + ID_STR_LENGTH - 1);
5568 	return name;
5569 }
5570 
sysfs_slab_add(struct kmem_cache * s)5571 static int sysfs_slab_add(struct kmem_cache *s)
5572 {
5573 	int err;
5574 	const char *name;
5575 	int unmergeable = slab_unmergeable(s);
5576 
5577 	if (unmergeable) {
5578 		/*
5579 		 * Slabcache can never be merged so we can use the name proper.
5580 		 * This is typically the case for debug situations. In that
5581 		 * case we can catch duplicate names easily.
5582 		 */
5583 		sysfs_remove_link(&slab_kset->kobj, s->name);
5584 		name = s->name;
5585 	} else {
5586 		/*
5587 		 * Create a unique name for the slab as a target
5588 		 * for the symlinks.
5589 		 */
5590 		name = create_unique_id(s);
5591 	}
5592 
5593 	s->kobj.kset = cache_kset(s);
5594 	err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
5595 	if (err)
5596 		goto out;
5597 
5598 	err = sysfs_create_group(&s->kobj, &slab_attr_group);
5599 	if (err)
5600 		goto out_del_kobj;
5601 
5602 #ifdef CONFIG_MEMCG
5603 	if (is_root_cache(s)) {
5604 		s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5605 		if (!s->memcg_kset) {
5606 			err = -ENOMEM;
5607 			goto out_del_kobj;
5608 		}
5609 	}
5610 #endif
5611 
5612 	kobject_uevent(&s->kobj, KOBJ_ADD);
5613 	if (!unmergeable) {
5614 		/* Setup first alias */
5615 		sysfs_slab_alias(s, s->name);
5616 	}
5617 out:
5618 	if (!unmergeable)
5619 		kfree(name);
5620 	return err;
5621 out_del_kobj:
5622 	kobject_del(&s->kobj);
5623 	goto out;
5624 }
5625 
sysfs_slab_remove(struct kmem_cache * s)5626 void sysfs_slab_remove(struct kmem_cache *s)
5627 {
5628 	if (slab_state < FULL)
5629 		/*
5630 		 * Sysfs has not been setup yet so no need to remove the
5631 		 * cache from sysfs.
5632 		 */
5633 		return;
5634 
5635 #ifdef CONFIG_MEMCG
5636 	kset_unregister(s->memcg_kset);
5637 #endif
5638 	kobject_uevent(&s->kobj, KOBJ_REMOVE);
5639 	kobject_del(&s->kobj);
5640 	kobject_put(&s->kobj);
5641 }
5642 
5643 /*
5644  * Need to buffer aliases during bootup until sysfs becomes
5645  * available lest we lose that information.
5646  */
5647 struct saved_alias {
5648 	struct kmem_cache *s;
5649 	const char *name;
5650 	struct saved_alias *next;
5651 };
5652 
5653 static struct saved_alias *alias_list;
5654 
sysfs_slab_alias(struct kmem_cache * s,const char * name)5655 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5656 {
5657 	struct saved_alias *al;
5658 
5659 	if (slab_state == FULL) {
5660 		/*
5661 		 * If we have a leftover link then remove it.
5662 		 */
5663 		sysfs_remove_link(&slab_kset->kobj, name);
5664 		return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
5665 	}
5666 
5667 	al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5668 	if (!al)
5669 		return -ENOMEM;
5670 
5671 	al->s = s;
5672 	al->name = name;
5673 	al->next = alias_list;
5674 	alias_list = al;
5675 	return 0;
5676 }
5677 
slab_sysfs_init(void)5678 static int __init slab_sysfs_init(void)
5679 {
5680 	struct kmem_cache *s;
5681 	int err;
5682 
5683 	mutex_lock(&slab_mutex);
5684 
5685 	slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
5686 	if (!slab_kset) {
5687 		mutex_unlock(&slab_mutex);
5688 		pr_err("Cannot register slab subsystem.\n");
5689 		return -ENOSYS;
5690 	}
5691 
5692 	slab_state = FULL;
5693 
5694 	list_for_each_entry(s, &slab_caches, list) {
5695 		err = sysfs_slab_add(s);
5696 		if (err)
5697 			pr_err("SLUB: Unable to add boot slab %s to sysfs\n",
5698 			       s->name);
5699 	}
5700 
5701 	while (alias_list) {
5702 		struct saved_alias *al = alias_list;
5703 
5704 		alias_list = alias_list->next;
5705 		err = sysfs_slab_alias(al->s, al->name);
5706 		if (err)
5707 			pr_err("SLUB: Unable to add boot slab alias %s to sysfs\n",
5708 			       al->name);
5709 		kfree(al);
5710 	}
5711 
5712 	mutex_unlock(&slab_mutex);
5713 	resiliency_test();
5714 	return 0;
5715 }
5716 
5717 __initcall(slab_sysfs_init);
5718 #endif /* CONFIG_SYSFS */
5719 
5720 /*
5721  * The /proc/slabinfo ABI
5722  */
5723 #ifdef CONFIG_SLABINFO
get_slabinfo(struct kmem_cache * s,struct slabinfo * sinfo)5724 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
5725 {
5726 	unsigned long nr_slabs = 0;
5727 	unsigned long nr_objs = 0;
5728 	unsigned long nr_free = 0;
5729 	int node;
5730 	struct kmem_cache_node *n;
5731 
5732 	for_each_kmem_cache_node(s, node, n) {
5733 		nr_slabs += node_nr_slabs(n);
5734 		nr_objs += node_nr_objs(n);
5735 		nr_free += count_partial(n, count_free);
5736 	}
5737 
5738 	sinfo->active_objs = nr_objs - nr_free;
5739 	sinfo->num_objs = nr_objs;
5740 	sinfo->active_slabs = nr_slabs;
5741 	sinfo->num_slabs = nr_slabs;
5742 	sinfo->objects_per_slab = oo_objects(s->oo);
5743 	sinfo->cache_order = oo_order(s->oo);
5744 }
5745 
slabinfo_show_stats(struct seq_file * m,struct kmem_cache * s)5746 void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
5747 {
5748 }
5749 
slabinfo_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)5750 ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5751 		       size_t count, loff_t *ppos)
5752 {
5753 	return -EIO;
5754 }
5755 #endif /* CONFIG_SLABINFO */
5756