• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * linux/mm/slab.c
3  * Written by Mark Hemment, 1996/97.
4  * (markhe@nextd.demon.co.uk)
5  *
6  * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7  *
8  * Major cleanup, different bufctl logic, per-cpu arrays
9  *	(c) 2000 Manfred Spraul
10  *
11  * Cleanup, make the head arrays unconditional, preparation for NUMA
12  * 	(c) 2002 Manfred Spraul
13  *
14  * An implementation of the Slab Allocator as described in outline in;
15  *	UNIX Internals: The New Frontiers by Uresh Vahalia
16  *	Pub: Prentice Hall	ISBN 0-13-101908-2
17  * or with a little more detail in;
18  *	The Slab Allocator: An Object-Caching Kernel Memory Allocator
19  *	Jeff Bonwick (Sun Microsystems).
20  *	Presented at: USENIX Summer 1994 Technical Conference
21  *
22  * The memory is organized in caches, one cache for each object type.
23  * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24  * Each cache consists out of many slabs (they are small (usually one
25  * page long) and always contiguous), and each slab contains multiple
26  * initialized objects.
27  *
28  * This means, that your constructor is used only for newly allocated
29  * slabs and you must pass objects with the same initializations to
30  * kmem_cache_free.
31  *
32  * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33  * normal). If you need a special memory type, then must create a new
34  * cache for that memory type.
35  *
36  * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37  *   full slabs with 0 free objects
38  *   partial slabs
39  *   empty slabs with no allocated objects
40  *
41  * If partial slabs exist, then new allocations come from these slabs,
42  * otherwise from empty slabs or new slabs are allocated.
43  *
44  * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45  * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46  *
47  * Each cache has a short per-cpu head array, most allocs
48  * and frees go into that array, and if that array overflows, then 1/2
49  * of the entries in the array are given back into the global cache.
50  * The head array is strictly LIFO and should improve the cache hit rates.
51  * On SMP, it additionally reduces the spinlock operations.
52  *
53  * The c_cpuarray may not be read with enabled local interrupts -
54  * it's changed with a smp_call_function().
55  *
56  * SMP synchronization:
57  *  constructors and destructors are called without any locking.
58  *  Several members in struct kmem_cache and struct slab never change, they
59  *	are accessed without any locking.
60  *  The per-cpu arrays are never accessed from the wrong cpu, no locking,
61  *  	and local interrupts are disabled so slab code is preempt-safe.
62  *  The non-constant members are protected with a per-cache irq spinlock.
63  *
64  * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65  * in 2000 - many ideas in the current implementation are derived from
66  * his patch.
67  *
68  * Further notes from the original documentation:
69  *
70  * 11 April '97.  Started multi-threading - markhe
71  *	The global cache-chain is protected by the mutex 'cache_chain_mutex'.
72  *	The sem is only needed when accessing/extending the cache-chain, which
73  *	can never happen inside an interrupt (kmem_cache_create(),
74  *	kmem_cache_shrink() and kmem_cache_reap()).
75  *
76  *	At present, each engine can be growing a cache.  This should be blocked.
77  *
78  * 15 March 2005. NUMA slab allocator.
79  *	Shai Fultheim <shai@scalex86.org>.
80  *	Shobhit Dayal <shobhit@calsoftinc.com>
81  *	Alok N Kataria <alokk@calsoftinc.com>
82  *	Christoph Lameter <christoph@lameter.com>
83  *
84  *	Modified the slab allocator to be node aware on NUMA systems.
85  *	Each node has its own list of partial, free and full slabs.
86  *	All object allocations for a node occur from node specific slab lists.
87  */
88 
89 #include	<linux/slab.h>
90 #include	<linux/mm.h>
91 #include	<linux/poison.h>
92 #include	<linux/swap.h>
93 #include	<linux/cache.h>
94 #include	<linux/interrupt.h>
95 #include	<linux/init.h>
96 #include	<linux/compiler.h>
97 #include	<linux/cpuset.h>
98 #include	<linux/proc_fs.h>
99 #include	<linux/seq_file.h>
100 #include	<linux/notifier.h>
101 #include	<linux/kallsyms.h>
102 #include	<linux/cpu.h>
103 #include	<linux/sysctl.h>
104 #include	<linux/module.h>
105 #include	<linux/rcupdate.h>
106 #include	<linux/string.h>
107 #include	<linux/uaccess.h>
108 #include	<linux/nodemask.h>
109 #include	<linux/mempolicy.h>
110 #include	<linux/mutex.h>
111 #include	<linux/fault-inject.h>
112 #include	<linux/rtmutex.h>
113 #include	<linux/reciprocal_div.h>
114 #include	<linux/debugobjects.h>
115 
116 #include	<asm/cacheflush.h>
117 #include	<asm/tlbflush.h>
118 #include	<asm/page.h>
119 
120 /*
121  * DEBUG	- 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
122  *		  0 for faster, smaller code (especially in the critical paths).
123  *
124  * STATS	- 1 to collect stats for /proc/slabinfo.
125  *		  0 for faster, smaller code (especially in the critical paths).
126  *
127  * FORCED_DEBUG	- 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
128  */
129 
130 #ifdef CONFIG_DEBUG_SLAB
131 #define	DEBUG		1
132 #define	STATS		1
133 #define	FORCED_DEBUG	1
134 #else
135 #define	DEBUG		0
136 #define	STATS		0
137 #define	FORCED_DEBUG	0
138 #endif
139 
140 /* Shouldn't this be in a header file somewhere? */
141 #define	BYTES_PER_WORD		sizeof(void *)
142 #define	REDZONE_ALIGN		max(BYTES_PER_WORD, __alignof__(unsigned long long))
143 
144 #ifndef ARCH_KMALLOC_MINALIGN
145 /*
146  * Enforce a minimum alignment for the kmalloc caches.
147  * Usually, the kmalloc caches are cache_line_size() aligned, except when
148  * DEBUG and FORCED_DEBUG are enabled, then they are BYTES_PER_WORD aligned.
149  * Some archs want to perform DMA into kmalloc caches and need a guaranteed
150  * alignment larger than the alignment of a 64-bit integer.
151  * ARCH_KMALLOC_MINALIGN allows that.
152  * Note that increasing this value may disable some debug features.
153  */
154 #define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
155 #endif
156 
157 #ifndef ARCH_SLAB_MINALIGN
158 /*
159  * Enforce a minimum alignment for all caches.
160  * Intended for archs that get misalignment faults even for BYTES_PER_WORD
161  * aligned buffers. Includes ARCH_KMALLOC_MINALIGN.
162  * If possible: Do not enable this flag for CONFIG_DEBUG_SLAB, it disables
163  * some debug features.
164  */
165 #define ARCH_SLAB_MINALIGN 0
166 #endif
167 
168 #ifndef ARCH_KMALLOC_FLAGS
169 #define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
170 #endif
171 
172 /* Legal flag mask for kmem_cache_create(). */
173 #if DEBUG
174 # define CREATE_MASK	(SLAB_RED_ZONE | \
175 			 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
176 			 SLAB_CACHE_DMA | \
177 			 SLAB_STORE_USER | \
178 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
179 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
180 			 SLAB_DEBUG_OBJECTS)
181 #else
182 # define CREATE_MASK	(SLAB_HWCACHE_ALIGN | \
183 			 SLAB_CACHE_DMA | \
184 			 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
185 			 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
186 			 SLAB_DEBUG_OBJECTS)
187 #endif
188 
189 /*
190  * kmem_bufctl_t:
191  *
192  * Bufctl's are used for linking objs within a slab
193  * linked offsets.
194  *
195  * This implementation relies on "struct page" for locating the cache &
196  * slab an object belongs to.
197  * This allows the bufctl structure to be small (one int), but limits
198  * the number of objects a slab (not a cache) can contain when off-slab
199  * bufctls are used. The limit is the size of the largest general cache
200  * that does not use off-slab slabs.
201  * For 32bit archs with 4 kB pages, is this 56.
202  * This is not serious, as it is only for large objects, when it is unwise
203  * to have too many per slab.
204  * Note: This limit can be raised by introducing a general cache whose size
205  * is less than 512 (PAGE_SIZE<<3), but greater than 256.
206  */
207 
208 typedef unsigned int kmem_bufctl_t;
209 #define BUFCTL_END	(((kmem_bufctl_t)(~0U))-0)
210 #define BUFCTL_FREE	(((kmem_bufctl_t)(~0U))-1)
211 #define	BUFCTL_ACTIVE	(((kmem_bufctl_t)(~0U))-2)
212 #define	SLAB_LIMIT	(((kmem_bufctl_t)(~0U))-3)
213 
214 /*
215  * struct slab
216  *
217  * Manages the objs in a slab. Placed either at the beginning of mem allocated
218  * for a slab, or allocated from an general cache.
219  * Slabs are chained into three list: fully used, partial, fully free slabs.
220  */
221 struct slab {
222 	struct list_head list;
223 	unsigned long colouroff;
224 	void *s_mem;		/* including colour offset */
225 	unsigned int inuse;	/* num of objs active in slab */
226 	kmem_bufctl_t free;
227 	unsigned short nodeid;
228 };
229 
230 /*
231  * struct slab_rcu
232  *
233  * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
234  * arrange for kmem_freepages to be called via RCU.  This is useful if
235  * we need to approach a kernel structure obliquely, from its address
236  * obtained without the usual locking.  We can lock the structure to
237  * stabilize it and check it's still at the given address, only if we
238  * can be sure that the memory has not been meanwhile reused for some
239  * other kind of object (which our subsystem's lock might corrupt).
240  *
241  * rcu_read_lock before reading the address, then rcu_read_unlock after
242  * taking the spinlock within the structure expected at that address.
243  *
244  * We assume struct slab_rcu can overlay struct slab when destroying.
245  */
246 struct slab_rcu {
247 	struct rcu_head head;
248 	struct kmem_cache *cachep;
249 	void *addr;
250 };
251 
252 /*
253  * struct array_cache
254  *
255  * Purpose:
256  * - LIFO ordering, to hand out cache-warm objects from _alloc
257  * - reduce the number of linked list operations
258  * - reduce spinlock operations
259  *
260  * The limit is stored in the per-cpu structure to reduce the data cache
261  * footprint.
262  *
263  */
264 struct array_cache {
265 	unsigned int avail;
266 	unsigned int limit;
267 	unsigned int batchcount;
268 	unsigned int touched;
269 	spinlock_t lock;
270 	void *entry[];	/*
271 			 * Must have this definition in here for the proper
272 			 * alignment of array_cache. Also simplifies accessing
273 			 * the entries.
274 			 */
275 };
276 
277 /*
278  * bootstrap: The caches do not work without cpuarrays anymore, but the
279  * cpuarrays are allocated from the generic caches...
280  */
281 #define BOOT_CPUCACHE_ENTRIES	1
282 struct arraycache_init {
283 	struct array_cache cache;
284 	void *entries[BOOT_CPUCACHE_ENTRIES];
285 };
286 
287 /*
288  * The slab lists for all objects.
289  */
290 struct kmem_list3 {
291 	struct list_head slabs_partial;	/* partial list first, better asm code */
292 	struct list_head slabs_full;
293 	struct list_head slabs_free;
294 	unsigned long free_objects;
295 	unsigned int free_limit;
296 	unsigned int colour_next;	/* Per-node cache coloring */
297 	spinlock_t list_lock;
298 	struct array_cache *shared;	/* shared per node */
299 	struct array_cache **alien;	/* on other nodes */
300 	unsigned long next_reap;	/* updated without locking */
301 	int free_touched;		/* updated without locking */
302 };
303 
304 /*
305  * Need this for bootstrapping a per node allocator.
306  */
307 #define NUM_INIT_LISTS (3 * MAX_NUMNODES)
308 struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
309 #define	CACHE_CACHE 0
310 #define	SIZE_AC MAX_NUMNODES
311 #define	SIZE_L3 (2 * MAX_NUMNODES)
312 
313 static int drain_freelist(struct kmem_cache *cache,
314 			struct kmem_list3 *l3, int tofree);
315 static void free_block(struct kmem_cache *cachep, void **objpp, int len,
316 			int node);
317 static int enable_cpucache(struct kmem_cache *cachep);
318 static void cache_reap(struct work_struct *unused);
319 
320 /*
321  * This function must be completely optimized away if a constant is passed to
322  * it.  Mostly the same as what is in linux/slab.h except it returns an index.
323  */
index_of(const size_t size)324 static __always_inline int index_of(const size_t size)
325 {
326 	extern void __bad_size(void);
327 
328 	if (__builtin_constant_p(size)) {
329 		int i = 0;
330 
331 #define CACHE(x) \
332 	if (size <=x) \
333 		return i; \
334 	else \
335 		i++;
336 #include <linux/kmalloc_sizes.h>
337 #undef CACHE
338 		__bad_size();
339 	} else
340 		__bad_size();
341 	return 0;
342 }
343 
344 static int slab_early_init = 1;
345 
346 #define INDEX_AC index_of(sizeof(struct arraycache_init))
347 #define INDEX_L3 index_of(sizeof(struct kmem_list3))
348 
kmem_list3_init(struct kmem_list3 * parent)349 static void kmem_list3_init(struct kmem_list3 *parent)
350 {
351 	INIT_LIST_HEAD(&parent->slabs_full);
352 	INIT_LIST_HEAD(&parent->slabs_partial);
353 	INIT_LIST_HEAD(&parent->slabs_free);
354 	parent->shared = NULL;
355 	parent->alien = NULL;
356 	parent->colour_next = 0;
357 	spin_lock_init(&parent->list_lock);
358 	parent->free_objects = 0;
359 	parent->free_touched = 0;
360 }
361 
362 #define MAKE_LIST(cachep, listp, slab, nodeid)				\
363 	do {								\
364 		INIT_LIST_HEAD(listp);					\
365 		list_splice(&(cachep->nodelists[nodeid]->slab), listp);	\
366 	} while (0)
367 
368 #define	MAKE_ALL_LISTS(cachep, ptr, nodeid)				\
369 	do {								\
370 	MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid);	\
371 	MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
372 	MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid);	\
373 	} while (0)
374 
375 /*
376  * struct kmem_cache
377  *
378  * manages a cache.
379  */
380 
381 struct kmem_cache {
382 /* 1) per-cpu data, touched during every alloc/free */
383 	struct array_cache *array[NR_CPUS];
384 /* 2) Cache tunables. Protected by cache_chain_mutex */
385 	unsigned int batchcount;
386 	unsigned int limit;
387 	unsigned int shared;
388 
389 	unsigned int buffer_size;
390 	u32 reciprocal_buffer_size;
391 /* 3) touched by every alloc & free from the backend */
392 
393 	unsigned int flags;		/* constant flags */
394 	unsigned int num;		/* # of objs per slab */
395 
396 /* 4) cache_grow/shrink */
397 	/* order of pgs per slab (2^n) */
398 	unsigned int gfporder;
399 
400 	/* force GFP flags, e.g. GFP_DMA */
401 	gfp_t gfpflags;
402 
403 	size_t colour;			/* cache colouring range */
404 	unsigned int colour_off;	/* colour offset */
405 	struct kmem_cache *slabp_cache;
406 	unsigned int slab_size;
407 	unsigned int dflags;		/* dynamic flags */
408 
409 	/* constructor func */
410 	void (*ctor)(void *obj);
411 
412 /* 5) cache creation/removal */
413 	const char *name;
414 	struct list_head next;
415 
416 /* 6) statistics */
417 #if STATS
418 	unsigned long num_active;
419 	unsigned long num_allocations;
420 	unsigned long high_mark;
421 	unsigned long grown;
422 	unsigned long reaped;
423 	unsigned long errors;
424 	unsigned long max_freeable;
425 	unsigned long node_allocs;
426 	unsigned long node_frees;
427 	unsigned long node_overflow;
428 	atomic_t allochit;
429 	atomic_t allocmiss;
430 	atomic_t freehit;
431 	atomic_t freemiss;
432 #endif
433 #if DEBUG
434 	/*
435 	 * If debugging is enabled, then the allocator can add additional
436 	 * fields and/or padding to every object. buffer_size contains the total
437 	 * object size including these internal fields, the following two
438 	 * variables contain the offset to the user object and its size.
439 	 */
440 	int obj_offset;
441 	int obj_size;
442 #endif
443 	/*
444 	 * We put nodelists[] at the end of kmem_cache, because we want to size
445 	 * this array to nr_node_ids slots instead of MAX_NUMNODES
446 	 * (see kmem_cache_init())
447 	 * We still use [MAX_NUMNODES] and not [1] or [0] because cache_cache
448 	 * is statically defined, so we reserve the max number of nodes.
449 	 */
450 	struct kmem_list3 *nodelists[MAX_NUMNODES];
451 	/*
452 	 * Do not add fields after nodelists[]
453 	 */
454 };
455 
456 #define CFLGS_OFF_SLAB		(0x80000000UL)
457 #define	OFF_SLAB(x)	((x)->flags & CFLGS_OFF_SLAB)
458 
459 #define BATCHREFILL_LIMIT	16
460 /*
461  * Optimization question: fewer reaps means less probability for unnessary
462  * cpucache drain/refill cycles.
463  *
464  * OTOH the cpuarrays can contain lots of objects,
465  * which could lock up otherwise freeable slabs.
466  */
467 #define REAPTIMEOUT_CPUC	(2*HZ)
468 #define REAPTIMEOUT_LIST3	(4*HZ)
469 
470 #if STATS
471 #define	STATS_INC_ACTIVE(x)	((x)->num_active++)
472 #define	STATS_DEC_ACTIVE(x)	((x)->num_active--)
473 #define	STATS_INC_ALLOCED(x)	((x)->num_allocations++)
474 #define	STATS_INC_GROWN(x)	((x)->grown++)
475 #define	STATS_ADD_REAPED(x,y)	((x)->reaped += (y))
476 #define	STATS_SET_HIGH(x)						\
477 	do {								\
478 		if ((x)->num_active > (x)->high_mark)			\
479 			(x)->high_mark = (x)->num_active;		\
480 	} while (0)
481 #define	STATS_INC_ERR(x)	((x)->errors++)
482 #define	STATS_INC_NODEALLOCS(x)	((x)->node_allocs++)
483 #define	STATS_INC_NODEFREES(x)	((x)->node_frees++)
484 #define STATS_INC_ACOVERFLOW(x)   ((x)->node_overflow++)
485 #define	STATS_SET_FREEABLE(x, i)					\
486 	do {								\
487 		if ((x)->max_freeable < i)				\
488 			(x)->max_freeable = i;				\
489 	} while (0)
490 #define STATS_INC_ALLOCHIT(x)	atomic_inc(&(x)->allochit)
491 #define STATS_INC_ALLOCMISS(x)	atomic_inc(&(x)->allocmiss)
492 #define STATS_INC_FREEHIT(x)	atomic_inc(&(x)->freehit)
493 #define STATS_INC_FREEMISS(x)	atomic_inc(&(x)->freemiss)
494 #else
495 #define	STATS_INC_ACTIVE(x)	do { } while (0)
496 #define	STATS_DEC_ACTIVE(x)	do { } while (0)
497 #define	STATS_INC_ALLOCED(x)	do { } while (0)
498 #define	STATS_INC_GROWN(x)	do { } while (0)
499 #define	STATS_ADD_REAPED(x,y)	do { } while (0)
500 #define	STATS_SET_HIGH(x)	do { } while (0)
501 #define	STATS_INC_ERR(x)	do { } while (0)
502 #define	STATS_INC_NODEALLOCS(x)	do { } while (0)
503 #define	STATS_INC_NODEFREES(x)	do { } while (0)
504 #define STATS_INC_ACOVERFLOW(x)   do { } while (0)
505 #define	STATS_SET_FREEABLE(x, i) do { } while (0)
506 #define STATS_INC_ALLOCHIT(x)	do { } while (0)
507 #define STATS_INC_ALLOCMISS(x)	do { } while (0)
508 #define STATS_INC_FREEHIT(x)	do { } while (0)
509 #define STATS_INC_FREEMISS(x)	do { } while (0)
510 #endif
511 
512 #if DEBUG
513 
514 /*
515  * memory layout of objects:
516  * 0		: objp
517  * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
518  * 		the end of an object is aligned with the end of the real
519  * 		allocation. Catches writes behind the end of the allocation.
520  * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
521  * 		redzone word.
522  * cachep->obj_offset: The real object.
523  * cachep->buffer_size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
524  * cachep->buffer_size - 1* BYTES_PER_WORD: last caller address
525  *					[BYTES_PER_WORD long]
526  */
obj_offset(struct kmem_cache * cachep)527 static int obj_offset(struct kmem_cache *cachep)
528 {
529 	return cachep->obj_offset;
530 }
531 
obj_size(struct kmem_cache * cachep)532 static int obj_size(struct kmem_cache *cachep)
533 {
534 	return cachep->obj_size;
535 }
536 
dbg_redzone1(struct kmem_cache * cachep,void * objp)537 static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
538 {
539 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
540 	return (unsigned long long*) (objp + obj_offset(cachep) -
541 				      sizeof(unsigned long long));
542 }
543 
dbg_redzone2(struct kmem_cache * cachep,void * objp)544 static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
545 {
546 	BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
547 	if (cachep->flags & SLAB_STORE_USER)
548 		return (unsigned long long *)(objp + cachep->buffer_size -
549 					      sizeof(unsigned long long) -
550 					      REDZONE_ALIGN);
551 	return (unsigned long long *) (objp + cachep->buffer_size -
552 				       sizeof(unsigned long long));
553 }
554 
dbg_userword(struct kmem_cache * cachep,void * objp)555 static void **dbg_userword(struct kmem_cache *cachep, void *objp)
556 {
557 	BUG_ON(!(cachep->flags & SLAB_STORE_USER));
558 	return (void **)(objp + cachep->buffer_size - BYTES_PER_WORD);
559 }
560 
561 #else
562 
563 #define obj_offset(x)			0
564 #define obj_size(cachep)		(cachep->buffer_size)
565 #define dbg_redzone1(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
566 #define dbg_redzone2(cachep, objp)	({BUG(); (unsigned long long *)NULL;})
567 #define dbg_userword(cachep, objp)	({BUG(); (void **)NULL;})
568 
569 #endif
570 
571 /*
572  * Do not go above this order unless 0 objects fit into the slab.
573  */
574 #define	BREAK_GFP_ORDER_HI	1
575 #define	BREAK_GFP_ORDER_LO	0
576 static int slab_break_gfp_order = BREAK_GFP_ORDER_LO;
577 
578 /*
579  * Functions for storing/retrieving the cachep and or slab from the page
580  * allocator.  These are used to find the slab an obj belongs to.  With kfree(),
581  * these are used to find the cache which an obj belongs to.
582  */
page_set_cache(struct page * page,struct kmem_cache * cache)583 static inline void page_set_cache(struct page *page, struct kmem_cache *cache)
584 {
585 	page->lru.next = (struct list_head *)cache;
586 }
587 
page_get_cache(struct page * page)588 static inline struct kmem_cache *page_get_cache(struct page *page)
589 {
590 	page = compound_head(page);
591 	BUG_ON(!PageSlab(page));
592 	return (struct kmem_cache *)page->lru.next;
593 }
594 
page_set_slab(struct page * page,struct slab * slab)595 static inline void page_set_slab(struct page *page, struct slab *slab)
596 {
597 	page->lru.prev = (struct list_head *)slab;
598 }
599 
page_get_slab(struct page * page)600 static inline struct slab *page_get_slab(struct page *page)
601 {
602 	BUG_ON(!PageSlab(page));
603 	return (struct slab *)page->lru.prev;
604 }
605 
virt_to_cache(const void * obj)606 static inline struct kmem_cache *virt_to_cache(const void *obj)
607 {
608 	struct page *page = virt_to_head_page(obj);
609 	return page_get_cache(page);
610 }
611 
virt_to_slab(const void * obj)612 static inline struct slab *virt_to_slab(const void *obj)
613 {
614 	struct page *page = virt_to_head_page(obj);
615 	return page_get_slab(page);
616 }
617 
index_to_obj(struct kmem_cache * cache,struct slab * slab,unsigned int idx)618 static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
619 				 unsigned int idx)
620 {
621 	return slab->s_mem + cache->buffer_size * idx;
622 }
623 
624 /*
625  * We want to avoid an expensive divide : (offset / cache->buffer_size)
626  *   Using the fact that buffer_size is a constant for a particular cache,
627  *   we can replace (offset / cache->buffer_size) by
628  *   reciprocal_divide(offset, cache->reciprocal_buffer_size)
629  */
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,void * obj)630 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
631 					const struct slab *slab, void *obj)
632 {
633 	u32 offset = (obj - slab->s_mem);
634 	return reciprocal_divide(offset, cache->reciprocal_buffer_size);
635 }
636 
637 /*
638  * These are the default caches for kmalloc. Custom caches can have other sizes.
639  */
640 struct cache_sizes malloc_sizes[] = {
641 #define CACHE(x) { .cs_size = (x) },
642 #include <linux/kmalloc_sizes.h>
643 	CACHE(ULONG_MAX)
644 #undef CACHE
645 };
646 EXPORT_SYMBOL(malloc_sizes);
647 
648 /* Must match cache_sizes above. Out of line to keep cache footprint low. */
649 struct cache_names {
650 	char *name;
651 	char *name_dma;
652 };
653 
654 static struct cache_names __initdata cache_names[] = {
655 #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
656 #include <linux/kmalloc_sizes.h>
657 	{NULL,}
658 #undef CACHE
659 };
660 
661 static struct arraycache_init initarray_cache __initdata =
662     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
663 static struct arraycache_init initarray_generic =
664     { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
665 
666 /* internal cache of cache description objs */
667 static struct kmem_cache cache_cache = {
668 	.batchcount = 1,
669 	.limit = BOOT_CPUCACHE_ENTRIES,
670 	.shared = 1,
671 	.buffer_size = sizeof(struct kmem_cache),
672 	.name = "kmem_cache",
673 };
674 
675 #define BAD_ALIEN_MAGIC 0x01020304ul
676 
677 #ifdef CONFIG_LOCKDEP
678 
679 /*
680  * Slab sometimes uses the kmalloc slabs to store the slab headers
681  * for other slabs "off slab".
682  * The locking for this is tricky in that it nests within the locks
683  * of all other slabs in a few places; to deal with this special
684  * locking we put on-slab caches into a separate lock-class.
685  *
686  * We set lock class for alien array caches which are up during init.
687  * The lock annotation will be lost if all cpus of a node goes down and
688  * then comes back up during hotplug
689  */
690 static struct lock_class_key on_slab_l3_key;
691 static struct lock_class_key on_slab_alc_key;
692 
init_lock_keys(void)693 static inline void init_lock_keys(void)
694 
695 {
696 	int q;
697 	struct cache_sizes *s = malloc_sizes;
698 
699 	while (s->cs_size != ULONG_MAX) {
700 		for_each_node(q) {
701 			struct array_cache **alc;
702 			int r;
703 			struct kmem_list3 *l3 = s->cs_cachep->nodelists[q];
704 			if (!l3 || OFF_SLAB(s->cs_cachep))
705 				continue;
706 			lockdep_set_class(&l3->list_lock, &on_slab_l3_key);
707 			alc = l3->alien;
708 			/*
709 			 * FIXME: This check for BAD_ALIEN_MAGIC
710 			 * should go away when common slab code is taught to
711 			 * work even without alien caches.
712 			 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
713 			 * for alloc_alien_cache,
714 			 */
715 			if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
716 				continue;
717 			for_each_node(r) {
718 				if (alc[r])
719 					lockdep_set_class(&alc[r]->lock,
720 					     &on_slab_alc_key);
721 			}
722 		}
723 		s++;
724 	}
725 }
726 #else
init_lock_keys(void)727 static inline void init_lock_keys(void)
728 {
729 }
730 #endif
731 
732 /*
733  * Guard access to the cache-chain.
734  */
735 static DEFINE_MUTEX(cache_chain_mutex);
736 static struct list_head cache_chain;
737 
738 /*
739  * chicken and egg problem: delay the per-cpu array allocation
740  * until the general caches are up.
741  */
742 static enum {
743 	NONE,
744 	PARTIAL_AC,
745 	PARTIAL_L3,
746 	FULL
747 } g_cpucache_up;
748 
749 /*
750  * used by boot code to determine if it can use slab based allocator
751  */
slab_is_available(void)752 int slab_is_available(void)
753 {
754 	return g_cpucache_up == FULL;
755 }
756 
757 static DEFINE_PER_CPU(struct delayed_work, reap_work);
758 
cpu_cache_get(struct kmem_cache * cachep)759 static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
760 {
761 	return cachep->array[smp_processor_id()];
762 }
763 
__find_general_cachep(size_t size,gfp_t gfpflags)764 static inline struct kmem_cache *__find_general_cachep(size_t size,
765 							gfp_t gfpflags)
766 {
767 	struct cache_sizes *csizep = malloc_sizes;
768 
769 #if DEBUG
770 	/* This happens if someone tries to call
771 	 * kmem_cache_create(), or __kmalloc(), before
772 	 * the generic caches are initialized.
773 	 */
774 	BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
775 #endif
776 	if (!size)
777 		return ZERO_SIZE_PTR;
778 
779 	while (size > csizep->cs_size)
780 		csizep++;
781 
782 	/*
783 	 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
784 	 * has cs_{dma,}cachep==NULL. Thus no special case
785 	 * for large kmalloc calls required.
786 	 */
787 #ifdef CONFIG_ZONE_DMA
788 	if (unlikely(gfpflags & GFP_DMA))
789 		return csizep->cs_dmacachep;
790 #endif
791 	return csizep->cs_cachep;
792 }
793 
kmem_find_general_cachep(size_t size,gfp_t gfpflags)794 static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
795 {
796 	return __find_general_cachep(size, gfpflags);
797 }
798 
slab_mgmt_size(size_t nr_objs,size_t align)799 static size_t slab_mgmt_size(size_t nr_objs, size_t align)
800 {
801 	return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
802 }
803 
804 /*
805  * Calculate the number of objects and left-over bytes for a given buffer size.
806  */
cache_estimate(unsigned long gfporder,size_t buffer_size,size_t align,int flags,size_t * left_over,unsigned int * num)807 static void cache_estimate(unsigned long gfporder, size_t buffer_size,
808 			   size_t align, int flags, size_t *left_over,
809 			   unsigned int *num)
810 {
811 	int nr_objs;
812 	size_t mgmt_size;
813 	size_t slab_size = PAGE_SIZE << gfporder;
814 
815 	/*
816 	 * The slab management structure can be either off the slab or
817 	 * on it. For the latter case, the memory allocated for a
818 	 * slab is used for:
819 	 *
820 	 * - The struct slab
821 	 * - One kmem_bufctl_t for each object
822 	 * - Padding to respect alignment of @align
823 	 * - @buffer_size bytes for each object
824 	 *
825 	 * If the slab management structure is off the slab, then the
826 	 * alignment will already be calculated into the size. Because
827 	 * the slabs are all pages aligned, the objects will be at the
828 	 * correct alignment when allocated.
829 	 */
830 	if (flags & CFLGS_OFF_SLAB) {
831 		mgmt_size = 0;
832 		nr_objs = slab_size / buffer_size;
833 
834 		if (nr_objs > SLAB_LIMIT)
835 			nr_objs = SLAB_LIMIT;
836 	} else {
837 		/*
838 		 * Ignore padding for the initial guess. The padding
839 		 * is at most @align-1 bytes, and @buffer_size is at
840 		 * least @align. In the worst case, this result will
841 		 * be one greater than the number of objects that fit
842 		 * into the memory allocation when taking the padding
843 		 * into account.
844 		 */
845 		nr_objs = (slab_size - sizeof(struct slab)) /
846 			  (buffer_size + sizeof(kmem_bufctl_t));
847 
848 		/*
849 		 * This calculated number will be either the right
850 		 * amount, or one greater than what we want.
851 		 */
852 		if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
853 		       > slab_size)
854 			nr_objs--;
855 
856 		if (nr_objs > SLAB_LIMIT)
857 			nr_objs = SLAB_LIMIT;
858 
859 		mgmt_size = slab_mgmt_size(nr_objs, align);
860 	}
861 	*num = nr_objs;
862 	*left_over = slab_size - nr_objs*buffer_size - mgmt_size;
863 }
864 
865 #define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
866 
__slab_error(const char * function,struct kmem_cache * cachep,char * msg)867 static void __slab_error(const char *function, struct kmem_cache *cachep,
868 			char *msg)
869 {
870 	printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
871 	       function, cachep->name, msg);
872 	dump_stack();
873 }
874 
875 /*
876  * By default on NUMA we use alien caches to stage the freeing of
877  * objects allocated from other nodes. This causes massive memory
878  * inefficiencies when using fake NUMA setup to split memory into a
879  * large number of small nodes, so it can be disabled on the command
880  * line
881   */
882 
883 static int use_alien_caches __read_mostly = 1;
884 static int numa_platform __read_mostly = 1;
noaliencache_setup(char * s)885 static int __init noaliencache_setup(char *s)
886 {
887 	use_alien_caches = 0;
888 	return 1;
889 }
890 __setup("noaliencache", noaliencache_setup);
891 
892 #ifdef CONFIG_NUMA
893 /*
894  * Special reaping functions for NUMA systems called from cache_reap().
895  * These take care of doing round robin flushing of alien caches (containing
896  * objects freed on different nodes from which they were allocated) and the
897  * flushing of remote pcps by calling drain_node_pages.
898  */
899 static DEFINE_PER_CPU(unsigned long, reap_node);
900 
init_reap_node(int cpu)901 static void init_reap_node(int cpu)
902 {
903 	int node;
904 
905 	node = next_node(cpu_to_node(cpu), node_online_map);
906 	if (node == MAX_NUMNODES)
907 		node = first_node(node_online_map);
908 
909 	per_cpu(reap_node, cpu) = node;
910 }
911 
next_reap_node(void)912 static void next_reap_node(void)
913 {
914 	int node = __get_cpu_var(reap_node);
915 
916 	node = next_node(node, node_online_map);
917 	if (unlikely(node >= MAX_NUMNODES))
918 		node = first_node(node_online_map);
919 	__get_cpu_var(reap_node) = node;
920 }
921 
922 #else
923 #define init_reap_node(cpu) do { } while (0)
924 #define next_reap_node(void) do { } while (0)
925 #endif
926 
927 /*
928  * Initiate the reap timer running on the target CPU.  We run at around 1 to 2Hz
929  * via the workqueue/eventd.
930  * Add the CPU number into the expiration time to minimize the possibility of
931  * the CPUs getting into lockstep and contending for the global cache chain
932  * lock.
933  */
start_cpu_timer(int cpu)934 static void __cpuinit start_cpu_timer(int cpu)
935 {
936 	struct delayed_work *reap_work = &per_cpu(reap_work, cpu);
937 
938 	/*
939 	 * When this gets called from do_initcalls via cpucache_init(),
940 	 * init_workqueues() has already run, so keventd will be setup
941 	 * at that time.
942 	 */
943 	if (keventd_up() && reap_work->work.func == NULL) {
944 		init_reap_node(cpu);
945 		INIT_DELAYED_WORK(reap_work, cache_reap);
946 		schedule_delayed_work_on(cpu, reap_work,
947 					__round_jiffies_relative(HZ, cpu));
948 	}
949 }
950 
alloc_arraycache(int node,int entries,int batchcount)951 static struct array_cache *alloc_arraycache(int node, int entries,
952 					    int batchcount)
953 {
954 	int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
955 	struct array_cache *nc = NULL;
956 
957 	nc = kmalloc_node(memsize, GFP_KERNEL, node);
958 	if (nc) {
959 		nc->avail = 0;
960 		nc->limit = entries;
961 		nc->batchcount = batchcount;
962 		nc->touched = 0;
963 		spin_lock_init(&nc->lock);
964 	}
965 	return nc;
966 }
967 
968 /*
969  * Transfer objects in one arraycache to another.
970  * Locking must be handled by the caller.
971  *
972  * Return the number of entries transferred.
973  */
transfer_objects(struct array_cache * to,struct array_cache * from,unsigned int max)974 static int transfer_objects(struct array_cache *to,
975 		struct array_cache *from, unsigned int max)
976 {
977 	/* Figure out how many entries to transfer */
978 	int nr = min(min(from->avail, max), to->limit - to->avail);
979 
980 	if (!nr)
981 		return 0;
982 
983 	memcpy(to->entry + to->avail, from->entry + from->avail -nr,
984 			sizeof(void *) *nr);
985 
986 	from->avail -= nr;
987 	to->avail += nr;
988 	to->touched = 1;
989 	return nr;
990 }
991 
992 #ifndef CONFIG_NUMA
993 
994 #define drain_alien_cache(cachep, alien) do { } while (0)
995 #define reap_alien(cachep, l3) do { } while (0)
996 
alloc_alien_cache(int node,int limit)997 static inline struct array_cache **alloc_alien_cache(int node, int limit)
998 {
999 	return (struct array_cache **)BAD_ALIEN_MAGIC;
1000 }
1001 
free_alien_cache(struct array_cache ** ac_ptr)1002 static inline void free_alien_cache(struct array_cache **ac_ptr)
1003 {
1004 }
1005 
cache_free_alien(struct kmem_cache * cachep,void * objp)1006 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1007 {
1008 	return 0;
1009 }
1010 
alternate_node_alloc(struct kmem_cache * cachep,gfp_t flags)1011 static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1012 		gfp_t flags)
1013 {
1014 	return NULL;
1015 }
1016 
____cache_alloc_node(struct kmem_cache * cachep,gfp_t flags,int nodeid)1017 static inline void *____cache_alloc_node(struct kmem_cache *cachep,
1018 		 gfp_t flags, int nodeid)
1019 {
1020 	return NULL;
1021 }
1022 
1023 #else	/* CONFIG_NUMA */
1024 
1025 static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
1026 static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
1027 
alloc_alien_cache(int node,int limit)1028 static struct array_cache **alloc_alien_cache(int node, int limit)
1029 {
1030 	struct array_cache **ac_ptr;
1031 	int memsize = sizeof(void *) * nr_node_ids;
1032 	int i;
1033 
1034 	if (limit > 1)
1035 		limit = 12;
1036 	ac_ptr = kmalloc_node(memsize, GFP_KERNEL, node);
1037 	if (ac_ptr) {
1038 		for_each_node(i) {
1039 			if (i == node || !node_online(i)) {
1040 				ac_ptr[i] = NULL;
1041 				continue;
1042 			}
1043 			ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d);
1044 			if (!ac_ptr[i]) {
1045 				for (i--; i >= 0; i--)
1046 					kfree(ac_ptr[i]);
1047 				kfree(ac_ptr);
1048 				return NULL;
1049 			}
1050 		}
1051 	}
1052 	return ac_ptr;
1053 }
1054 
free_alien_cache(struct array_cache ** ac_ptr)1055 static void free_alien_cache(struct array_cache **ac_ptr)
1056 {
1057 	int i;
1058 
1059 	if (!ac_ptr)
1060 		return;
1061 	for_each_node(i)
1062 	    kfree(ac_ptr[i]);
1063 	kfree(ac_ptr);
1064 }
1065 
__drain_alien_cache(struct kmem_cache * cachep,struct array_cache * ac,int node)1066 static void __drain_alien_cache(struct kmem_cache *cachep,
1067 				struct array_cache *ac, int node)
1068 {
1069 	struct kmem_list3 *rl3 = cachep->nodelists[node];
1070 
1071 	if (ac->avail) {
1072 		spin_lock(&rl3->list_lock);
1073 		/*
1074 		 * Stuff objects into the remote nodes shared array first.
1075 		 * That way we could avoid the overhead of putting the objects
1076 		 * into the free lists and getting them back later.
1077 		 */
1078 		if (rl3->shared)
1079 			transfer_objects(rl3->shared, ac, ac->limit);
1080 
1081 		free_block(cachep, ac->entry, ac->avail, node);
1082 		ac->avail = 0;
1083 		spin_unlock(&rl3->list_lock);
1084 	}
1085 }
1086 
1087 /*
1088  * Called from cache_reap() to regularly drain alien caches round robin.
1089  */
reap_alien(struct kmem_cache * cachep,struct kmem_list3 * l3)1090 static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1091 {
1092 	int node = __get_cpu_var(reap_node);
1093 
1094 	if (l3->alien) {
1095 		struct array_cache *ac = l3->alien[node];
1096 
1097 		if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
1098 			__drain_alien_cache(cachep, ac, node);
1099 			spin_unlock_irq(&ac->lock);
1100 		}
1101 	}
1102 }
1103 
drain_alien_cache(struct kmem_cache * cachep,struct array_cache ** alien)1104 static void drain_alien_cache(struct kmem_cache *cachep,
1105 				struct array_cache **alien)
1106 {
1107 	int i = 0;
1108 	struct array_cache *ac;
1109 	unsigned long flags;
1110 
1111 	for_each_online_node(i) {
1112 		ac = alien[i];
1113 		if (ac) {
1114 			spin_lock_irqsave(&ac->lock, flags);
1115 			__drain_alien_cache(cachep, ac, i);
1116 			spin_unlock_irqrestore(&ac->lock, flags);
1117 		}
1118 	}
1119 }
1120 
cache_free_alien(struct kmem_cache * cachep,void * objp)1121 static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1122 {
1123 	struct slab *slabp = virt_to_slab(objp);
1124 	int nodeid = slabp->nodeid;
1125 	struct kmem_list3 *l3;
1126 	struct array_cache *alien = NULL;
1127 	int node;
1128 
1129 	node = numa_node_id();
1130 
1131 	/*
1132 	 * Make sure we are not freeing a object from another node to the array
1133 	 * cache on this cpu.
1134 	 */
1135 	if (likely(slabp->nodeid == node))
1136 		return 0;
1137 
1138 	l3 = cachep->nodelists[node];
1139 	STATS_INC_NODEFREES(cachep);
1140 	if (l3->alien && l3->alien[nodeid]) {
1141 		alien = l3->alien[nodeid];
1142 		spin_lock(&alien->lock);
1143 		if (unlikely(alien->avail == alien->limit)) {
1144 			STATS_INC_ACOVERFLOW(cachep);
1145 			__drain_alien_cache(cachep, alien, nodeid);
1146 		}
1147 		alien->entry[alien->avail++] = objp;
1148 		spin_unlock(&alien->lock);
1149 	} else {
1150 		spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1151 		free_block(cachep, &objp, 1, nodeid);
1152 		spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1153 	}
1154 	return 1;
1155 }
1156 #endif
1157 
cpuup_canceled(long cpu)1158 static void __cpuinit cpuup_canceled(long cpu)
1159 {
1160 	struct kmem_cache *cachep;
1161 	struct kmem_list3 *l3 = NULL;
1162 	int node = cpu_to_node(cpu);
1163 	node_to_cpumask_ptr(mask, node);
1164 
1165 	list_for_each_entry(cachep, &cache_chain, next) {
1166 		struct array_cache *nc;
1167 		struct array_cache *shared;
1168 		struct array_cache **alien;
1169 
1170 		/* cpu is dead; no one can alloc from it. */
1171 		nc = cachep->array[cpu];
1172 		cachep->array[cpu] = NULL;
1173 		l3 = cachep->nodelists[node];
1174 
1175 		if (!l3)
1176 			goto free_array_cache;
1177 
1178 		spin_lock_irq(&l3->list_lock);
1179 
1180 		/* Free limit for this kmem_list3 */
1181 		l3->free_limit -= cachep->batchcount;
1182 		if (nc)
1183 			free_block(cachep, nc->entry, nc->avail, node);
1184 
1185 		if (!cpus_empty(*mask)) {
1186 			spin_unlock_irq(&l3->list_lock);
1187 			goto free_array_cache;
1188 		}
1189 
1190 		shared = l3->shared;
1191 		if (shared) {
1192 			free_block(cachep, shared->entry,
1193 				   shared->avail, node);
1194 			l3->shared = NULL;
1195 		}
1196 
1197 		alien = l3->alien;
1198 		l3->alien = NULL;
1199 
1200 		spin_unlock_irq(&l3->list_lock);
1201 
1202 		kfree(shared);
1203 		if (alien) {
1204 			drain_alien_cache(cachep, alien);
1205 			free_alien_cache(alien);
1206 		}
1207 free_array_cache:
1208 		kfree(nc);
1209 	}
1210 	/*
1211 	 * In the previous loop, all the objects were freed to
1212 	 * the respective cache's slabs,  now we can go ahead and
1213 	 * shrink each nodelist to its limit.
1214 	 */
1215 	list_for_each_entry(cachep, &cache_chain, next) {
1216 		l3 = cachep->nodelists[node];
1217 		if (!l3)
1218 			continue;
1219 		drain_freelist(cachep, l3, l3->free_objects);
1220 	}
1221 }
1222 
cpuup_prepare(long cpu)1223 static int __cpuinit cpuup_prepare(long cpu)
1224 {
1225 	struct kmem_cache *cachep;
1226 	struct kmem_list3 *l3 = NULL;
1227 	int node = cpu_to_node(cpu);
1228 	const int memsize = sizeof(struct kmem_list3);
1229 
1230 	/*
1231 	 * We need to do this right in the beginning since
1232 	 * alloc_arraycache's are going to use this list.
1233 	 * kmalloc_node allows us to add the slab to the right
1234 	 * kmem_list3 and not this cpu's kmem_list3
1235 	 */
1236 
1237 	list_for_each_entry(cachep, &cache_chain, next) {
1238 		/*
1239 		 * Set up the size64 kmemlist for cpu before we can
1240 		 * begin anything. Make sure some other cpu on this
1241 		 * node has not already allocated this
1242 		 */
1243 		if (!cachep->nodelists[node]) {
1244 			l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1245 			if (!l3)
1246 				goto bad;
1247 			kmem_list3_init(l3);
1248 			l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1249 			    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1250 
1251 			/*
1252 			 * The l3s don't come and go as CPUs come and
1253 			 * go.  cache_chain_mutex is sufficient
1254 			 * protection here.
1255 			 */
1256 			cachep->nodelists[node] = l3;
1257 		}
1258 
1259 		spin_lock_irq(&cachep->nodelists[node]->list_lock);
1260 		cachep->nodelists[node]->free_limit =
1261 			(1 + nr_cpus_node(node)) *
1262 			cachep->batchcount + cachep->num;
1263 		spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1264 	}
1265 
1266 	/*
1267 	 * Now we can go ahead with allocating the shared arrays and
1268 	 * array caches
1269 	 */
1270 	list_for_each_entry(cachep, &cache_chain, next) {
1271 		struct array_cache *nc;
1272 		struct array_cache *shared = NULL;
1273 		struct array_cache **alien = NULL;
1274 
1275 		nc = alloc_arraycache(node, cachep->limit,
1276 					cachep->batchcount);
1277 		if (!nc)
1278 			goto bad;
1279 		if (cachep->shared) {
1280 			shared = alloc_arraycache(node,
1281 				cachep->shared * cachep->batchcount,
1282 				0xbaadf00d);
1283 			if (!shared) {
1284 				kfree(nc);
1285 				goto bad;
1286 			}
1287 		}
1288 		if (use_alien_caches) {
1289 			alien = alloc_alien_cache(node, cachep->limit);
1290 			if (!alien) {
1291 				kfree(shared);
1292 				kfree(nc);
1293 				goto bad;
1294 			}
1295 		}
1296 		cachep->array[cpu] = nc;
1297 		l3 = cachep->nodelists[node];
1298 		BUG_ON(!l3);
1299 
1300 		spin_lock_irq(&l3->list_lock);
1301 		if (!l3->shared) {
1302 			/*
1303 			 * We are serialised from CPU_DEAD or
1304 			 * CPU_UP_CANCELLED by the cpucontrol lock
1305 			 */
1306 			l3->shared = shared;
1307 			shared = NULL;
1308 		}
1309 #ifdef CONFIG_NUMA
1310 		if (!l3->alien) {
1311 			l3->alien = alien;
1312 			alien = NULL;
1313 		}
1314 #endif
1315 		spin_unlock_irq(&l3->list_lock);
1316 		kfree(shared);
1317 		free_alien_cache(alien);
1318 	}
1319 	return 0;
1320 bad:
1321 	cpuup_canceled(cpu);
1322 	return -ENOMEM;
1323 }
1324 
cpuup_callback(struct notifier_block * nfb,unsigned long action,void * hcpu)1325 static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1326 				    unsigned long action, void *hcpu)
1327 {
1328 	long cpu = (long)hcpu;
1329 	int err = 0;
1330 
1331 	switch (action) {
1332 	case CPU_UP_PREPARE:
1333 	case CPU_UP_PREPARE_FROZEN:
1334 		mutex_lock(&cache_chain_mutex);
1335 		err = cpuup_prepare(cpu);
1336 		mutex_unlock(&cache_chain_mutex);
1337 		break;
1338 	case CPU_ONLINE:
1339 	case CPU_ONLINE_FROZEN:
1340 		start_cpu_timer(cpu);
1341 		break;
1342 #ifdef CONFIG_HOTPLUG_CPU
1343   	case CPU_DOWN_PREPARE:
1344   	case CPU_DOWN_PREPARE_FROZEN:
1345 		/*
1346 		 * Shutdown cache reaper. Note that the cache_chain_mutex is
1347 		 * held so that if cache_reap() is invoked it cannot do
1348 		 * anything expensive but will only modify reap_work
1349 		 * and reschedule the timer.
1350 		*/
1351 		cancel_rearming_delayed_work(&per_cpu(reap_work, cpu));
1352 		/* Now the cache_reaper is guaranteed to be not running. */
1353 		per_cpu(reap_work, cpu).work.func = NULL;
1354   		break;
1355   	case CPU_DOWN_FAILED:
1356   	case CPU_DOWN_FAILED_FROZEN:
1357 		start_cpu_timer(cpu);
1358   		break;
1359 	case CPU_DEAD:
1360 	case CPU_DEAD_FROZEN:
1361 		/*
1362 		 * Even if all the cpus of a node are down, we don't free the
1363 		 * kmem_list3 of any cache. This to avoid a race between
1364 		 * cpu_down, and a kmalloc allocation from another cpu for
1365 		 * memory from the node of the cpu going down.  The list3
1366 		 * structure is usually allocated from kmem_cache_create() and
1367 		 * gets destroyed at kmem_cache_destroy().
1368 		 */
1369 		/* fall through */
1370 #endif
1371 	case CPU_UP_CANCELED:
1372 	case CPU_UP_CANCELED_FROZEN:
1373 		mutex_lock(&cache_chain_mutex);
1374 		cpuup_canceled(cpu);
1375 		mutex_unlock(&cache_chain_mutex);
1376 		break;
1377 	}
1378 	return err ? NOTIFY_BAD : NOTIFY_OK;
1379 }
1380 
1381 static struct notifier_block __cpuinitdata cpucache_notifier = {
1382 	&cpuup_callback, NULL, 0
1383 };
1384 
1385 /*
1386  * swap the static kmem_list3 with kmalloced memory
1387  */
init_list(struct kmem_cache * cachep,struct kmem_list3 * list,int nodeid)1388 static void init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1389 			int nodeid)
1390 {
1391 	struct kmem_list3 *ptr;
1392 
1393 	ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, nodeid);
1394 	BUG_ON(!ptr);
1395 
1396 	local_irq_disable();
1397 	memcpy(ptr, list, sizeof(struct kmem_list3));
1398 	/*
1399 	 * Do not assume that spinlocks can be initialized via memcpy:
1400 	 */
1401 	spin_lock_init(&ptr->list_lock);
1402 
1403 	MAKE_ALL_LISTS(cachep, ptr, nodeid);
1404 	cachep->nodelists[nodeid] = ptr;
1405 	local_irq_enable();
1406 }
1407 
1408 /*
1409  * For setting up all the kmem_list3s for cache whose buffer_size is same as
1410  * size of kmem_list3.
1411  */
set_up_list3s(struct kmem_cache * cachep,int index)1412 static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1413 {
1414 	int node;
1415 
1416 	for_each_online_node(node) {
1417 		cachep->nodelists[node] = &initkmem_list3[index + node];
1418 		cachep->nodelists[node]->next_reap = jiffies +
1419 		    REAPTIMEOUT_LIST3 +
1420 		    ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1421 	}
1422 }
1423 
1424 /*
1425  * Initialisation.  Called after the page allocator have been initialised and
1426  * before smp_init().
1427  */
kmem_cache_init(void)1428 void __init kmem_cache_init(void)
1429 {
1430 	size_t left_over;
1431 	struct cache_sizes *sizes;
1432 	struct cache_names *names;
1433 	int i;
1434 	int order;
1435 	int node;
1436 
1437 	if (num_possible_nodes() == 1) {
1438 		use_alien_caches = 0;
1439 		numa_platform = 0;
1440 	}
1441 
1442 	for (i = 0; i < NUM_INIT_LISTS; i++) {
1443 		kmem_list3_init(&initkmem_list3[i]);
1444 		if (i < MAX_NUMNODES)
1445 			cache_cache.nodelists[i] = NULL;
1446 	}
1447 	set_up_list3s(&cache_cache, CACHE_CACHE);
1448 
1449 	/*
1450 	 * Fragmentation resistance on low memory - only use bigger
1451 	 * page orders on machines with more than 32MB of memory.
1452 	 */
1453 	if (num_physpages > (32 << 20) >> PAGE_SHIFT)
1454 		slab_break_gfp_order = BREAK_GFP_ORDER_HI;
1455 
1456 	/* Bootstrap is tricky, because several objects are allocated
1457 	 * from caches that do not exist yet:
1458 	 * 1) initialize the cache_cache cache: it contains the struct
1459 	 *    kmem_cache structures of all caches, except cache_cache itself:
1460 	 *    cache_cache is statically allocated.
1461 	 *    Initially an __init data area is used for the head array and the
1462 	 *    kmem_list3 structures, it's replaced with a kmalloc allocated
1463 	 *    array at the end of the bootstrap.
1464 	 * 2) Create the first kmalloc cache.
1465 	 *    The struct kmem_cache for the new cache is allocated normally.
1466 	 *    An __init data area is used for the head array.
1467 	 * 3) Create the remaining kmalloc caches, with minimally sized
1468 	 *    head arrays.
1469 	 * 4) Replace the __init data head arrays for cache_cache and the first
1470 	 *    kmalloc cache with kmalloc allocated arrays.
1471 	 * 5) Replace the __init data for kmem_list3 for cache_cache and
1472 	 *    the other cache's with kmalloc allocated memory.
1473 	 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1474 	 */
1475 
1476 	node = numa_node_id();
1477 
1478 	/* 1) create the cache_cache */
1479 	INIT_LIST_HEAD(&cache_chain);
1480 	list_add(&cache_cache.next, &cache_chain);
1481 	cache_cache.colour_off = cache_line_size();
1482 	cache_cache.array[smp_processor_id()] = &initarray_cache.cache;
1483 	cache_cache.nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1484 
1485 	/*
1486 	 * struct kmem_cache size depends on nr_node_ids, which
1487 	 * can be less than MAX_NUMNODES.
1488 	 */
1489 	cache_cache.buffer_size = offsetof(struct kmem_cache, nodelists) +
1490 				 nr_node_ids * sizeof(struct kmem_list3 *);
1491 #if DEBUG
1492 	cache_cache.obj_size = cache_cache.buffer_size;
1493 #endif
1494 	cache_cache.buffer_size = ALIGN(cache_cache.buffer_size,
1495 					cache_line_size());
1496 	cache_cache.reciprocal_buffer_size =
1497 		reciprocal_value(cache_cache.buffer_size);
1498 
1499 	for (order = 0; order < MAX_ORDER; order++) {
1500 		cache_estimate(order, cache_cache.buffer_size,
1501 			cache_line_size(), 0, &left_over, &cache_cache.num);
1502 		if (cache_cache.num)
1503 			break;
1504 	}
1505 	BUG_ON(!cache_cache.num);
1506 	cache_cache.gfporder = order;
1507 	cache_cache.colour = left_over / cache_cache.colour_off;
1508 	cache_cache.slab_size = ALIGN(cache_cache.num * sizeof(kmem_bufctl_t) +
1509 				      sizeof(struct slab), cache_line_size());
1510 
1511 	/* 2+3) create the kmalloc caches */
1512 	sizes = malloc_sizes;
1513 	names = cache_names;
1514 
1515 	/*
1516 	 * Initialize the caches that provide memory for the array cache and the
1517 	 * kmem_list3 structures first.  Without this, further allocations will
1518 	 * bug.
1519 	 */
1520 
1521 	sizes[INDEX_AC].cs_cachep = kmem_cache_create(names[INDEX_AC].name,
1522 					sizes[INDEX_AC].cs_size,
1523 					ARCH_KMALLOC_MINALIGN,
1524 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1525 					NULL);
1526 
1527 	if (INDEX_AC != INDEX_L3) {
1528 		sizes[INDEX_L3].cs_cachep =
1529 			kmem_cache_create(names[INDEX_L3].name,
1530 				sizes[INDEX_L3].cs_size,
1531 				ARCH_KMALLOC_MINALIGN,
1532 				ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1533 				NULL);
1534 	}
1535 
1536 	slab_early_init = 0;
1537 
1538 	while (sizes->cs_size != ULONG_MAX) {
1539 		/*
1540 		 * For performance, all the general caches are L1 aligned.
1541 		 * This should be particularly beneficial on SMP boxes, as it
1542 		 * eliminates "false sharing".
1543 		 * Note for systems short on memory removing the alignment will
1544 		 * allow tighter packing of the smaller caches.
1545 		 */
1546 		if (!sizes->cs_cachep) {
1547 			sizes->cs_cachep = kmem_cache_create(names->name,
1548 					sizes->cs_size,
1549 					ARCH_KMALLOC_MINALIGN,
1550 					ARCH_KMALLOC_FLAGS|SLAB_PANIC,
1551 					NULL);
1552 		}
1553 #ifdef CONFIG_ZONE_DMA
1554 		sizes->cs_dmacachep = kmem_cache_create(
1555 					names->name_dma,
1556 					sizes->cs_size,
1557 					ARCH_KMALLOC_MINALIGN,
1558 					ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1559 						SLAB_PANIC,
1560 					NULL);
1561 #endif
1562 		sizes++;
1563 		names++;
1564 	}
1565 	/* 4) Replace the bootstrap head arrays */
1566 	{
1567 		struct array_cache *ptr;
1568 
1569 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1570 
1571 		local_irq_disable();
1572 		BUG_ON(cpu_cache_get(&cache_cache) != &initarray_cache.cache);
1573 		memcpy(ptr, cpu_cache_get(&cache_cache),
1574 		       sizeof(struct arraycache_init));
1575 		/*
1576 		 * Do not assume that spinlocks can be initialized via memcpy:
1577 		 */
1578 		spin_lock_init(&ptr->lock);
1579 
1580 		cache_cache.array[smp_processor_id()] = ptr;
1581 		local_irq_enable();
1582 
1583 		ptr = kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
1584 
1585 		local_irq_disable();
1586 		BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
1587 		       != &initarray_generic.cache);
1588 		memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
1589 		       sizeof(struct arraycache_init));
1590 		/*
1591 		 * Do not assume that spinlocks can be initialized via memcpy:
1592 		 */
1593 		spin_lock_init(&ptr->lock);
1594 
1595 		malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
1596 		    ptr;
1597 		local_irq_enable();
1598 	}
1599 	/* 5) Replace the bootstrap kmem_list3's */
1600 	{
1601 		int nid;
1602 
1603 		for_each_online_node(nid) {
1604 			init_list(&cache_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
1605 
1606 			init_list(malloc_sizes[INDEX_AC].cs_cachep,
1607 				  &initkmem_list3[SIZE_AC + nid], nid);
1608 
1609 			if (INDEX_AC != INDEX_L3) {
1610 				init_list(malloc_sizes[INDEX_L3].cs_cachep,
1611 					  &initkmem_list3[SIZE_L3 + nid], nid);
1612 			}
1613 		}
1614 	}
1615 
1616 	/* 6) resize the head arrays to their final sizes */
1617 	{
1618 		struct kmem_cache *cachep;
1619 		mutex_lock(&cache_chain_mutex);
1620 		list_for_each_entry(cachep, &cache_chain, next)
1621 			if (enable_cpucache(cachep))
1622 				BUG();
1623 		mutex_unlock(&cache_chain_mutex);
1624 	}
1625 
1626 	/* Annotate slab for lockdep -- annotate the malloc caches */
1627 	init_lock_keys();
1628 
1629 
1630 	/* Done! */
1631 	g_cpucache_up = FULL;
1632 
1633 	/*
1634 	 * Register a cpu startup notifier callback that initializes
1635 	 * cpu_cache_get for all new cpus
1636 	 */
1637 	register_cpu_notifier(&cpucache_notifier);
1638 
1639 	/*
1640 	 * The reap timers are started later, with a module init call: That part
1641 	 * of the kernel is not yet operational.
1642 	 */
1643 }
1644 
cpucache_init(void)1645 static int __init cpucache_init(void)
1646 {
1647 	int cpu;
1648 
1649 	/*
1650 	 * Register the timers that return unneeded pages to the page allocator
1651 	 */
1652 	for_each_online_cpu(cpu)
1653 		start_cpu_timer(cpu);
1654 	return 0;
1655 }
1656 __initcall(cpucache_init);
1657 
1658 /*
1659  * Interface to system's page allocator. No need to hold the cache-lock.
1660  *
1661  * If we requested dmaable memory, we will get it. Even if we
1662  * did not request dmaable memory, we might get it, but that
1663  * would be relatively rare and ignorable.
1664  */
kmem_getpages(struct kmem_cache * cachep,gfp_t flags,int nodeid)1665 static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1666 {
1667 	struct page *page;
1668 	int nr_pages;
1669 	int i;
1670 
1671 #ifndef CONFIG_MMU
1672 	/*
1673 	 * Nommu uses slab's for process anonymous memory allocations, and thus
1674 	 * requires __GFP_COMP to properly refcount higher order allocations
1675 	 */
1676 	flags |= __GFP_COMP;
1677 #endif
1678 
1679 	flags |= cachep->gfpflags;
1680 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1681 		flags |= __GFP_RECLAIMABLE;
1682 
1683 	page = alloc_pages_node(nodeid, flags, cachep->gfporder);
1684 	if (!page)
1685 		return NULL;
1686 
1687 	nr_pages = (1 << cachep->gfporder);
1688 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1689 		add_zone_page_state(page_zone(page),
1690 			NR_SLAB_RECLAIMABLE, nr_pages);
1691 	else
1692 		add_zone_page_state(page_zone(page),
1693 			NR_SLAB_UNRECLAIMABLE, nr_pages);
1694 	for (i = 0; i < nr_pages; i++)
1695 		__SetPageSlab(page + i);
1696 	return page_address(page);
1697 }
1698 
1699 /*
1700  * Interface to system's page release.
1701  */
kmem_freepages(struct kmem_cache * cachep,void * addr)1702 static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1703 {
1704 	unsigned long i = (1 << cachep->gfporder);
1705 	struct page *page = virt_to_page(addr);
1706 	const unsigned long nr_freed = i;
1707 
1708 	if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1709 		sub_zone_page_state(page_zone(page),
1710 				NR_SLAB_RECLAIMABLE, nr_freed);
1711 	else
1712 		sub_zone_page_state(page_zone(page),
1713 				NR_SLAB_UNRECLAIMABLE, nr_freed);
1714 	while (i--) {
1715 		BUG_ON(!PageSlab(page));
1716 		__ClearPageSlab(page);
1717 		page++;
1718 	}
1719 	if (current->reclaim_state)
1720 		current->reclaim_state->reclaimed_slab += nr_freed;
1721 	free_pages((unsigned long)addr, cachep->gfporder);
1722 }
1723 
kmem_rcu_free(struct rcu_head * head)1724 static void kmem_rcu_free(struct rcu_head *head)
1725 {
1726 	struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
1727 	struct kmem_cache *cachep = slab_rcu->cachep;
1728 
1729 	kmem_freepages(cachep, slab_rcu->addr);
1730 	if (OFF_SLAB(cachep))
1731 		kmem_cache_free(cachep->slabp_cache, slab_rcu);
1732 }
1733 
1734 #if DEBUG
1735 
1736 #ifdef CONFIG_DEBUG_PAGEALLOC
store_stackinfo(struct kmem_cache * cachep,unsigned long * addr,unsigned long caller)1737 static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
1738 			    unsigned long caller)
1739 {
1740 	int size = obj_size(cachep);
1741 
1742 	addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1743 
1744 	if (size < 5 * sizeof(unsigned long))
1745 		return;
1746 
1747 	*addr++ = 0x12345678;
1748 	*addr++ = caller;
1749 	*addr++ = smp_processor_id();
1750 	size -= 3 * sizeof(unsigned long);
1751 	{
1752 		unsigned long *sptr = &caller;
1753 		unsigned long svalue;
1754 
1755 		while (!kstack_end(sptr)) {
1756 			svalue = *sptr++;
1757 			if (kernel_text_address(svalue)) {
1758 				*addr++ = svalue;
1759 				size -= sizeof(unsigned long);
1760 				if (size <= sizeof(unsigned long))
1761 					break;
1762 			}
1763 		}
1764 
1765 	}
1766 	*addr++ = 0x87654321;
1767 }
1768 #endif
1769 
poison_obj(struct kmem_cache * cachep,void * addr,unsigned char val)1770 static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1771 {
1772 	int size = obj_size(cachep);
1773 	addr = &((char *)addr)[obj_offset(cachep)];
1774 
1775 	memset(addr, val, size);
1776 	*(unsigned char *)(addr + size - 1) = POISON_END;
1777 }
1778 
dump_line(char * data,int offset,int limit)1779 static void dump_line(char *data, int offset, int limit)
1780 {
1781 	int i;
1782 	unsigned char error = 0;
1783 	int bad_count = 0;
1784 
1785 	printk(KERN_ERR "%03x:", offset);
1786 	for (i = 0; i < limit; i++) {
1787 		if (data[offset + i] != POISON_FREE) {
1788 			error = data[offset + i];
1789 			bad_count++;
1790 		}
1791 		printk(" %02x", (unsigned char)data[offset + i]);
1792 	}
1793 	printk("\n");
1794 
1795 	if (bad_count == 1) {
1796 		error ^= POISON_FREE;
1797 		if (!(error & (error - 1))) {
1798 			printk(KERN_ERR "Single bit error detected. Probably "
1799 					"bad RAM.\n");
1800 #ifdef CONFIG_X86
1801 			printk(KERN_ERR "Run memtest86+ or a similar memory "
1802 					"test tool.\n");
1803 #else
1804 			printk(KERN_ERR "Run a memory test tool.\n");
1805 #endif
1806 		}
1807 	}
1808 }
1809 #endif
1810 
1811 #if DEBUG
1812 
print_objinfo(struct kmem_cache * cachep,void * objp,int lines)1813 static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1814 {
1815 	int i, size;
1816 	char *realobj;
1817 
1818 	if (cachep->flags & SLAB_RED_ZONE) {
1819 		printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
1820 			*dbg_redzone1(cachep, objp),
1821 			*dbg_redzone2(cachep, objp));
1822 	}
1823 
1824 	if (cachep->flags & SLAB_STORE_USER) {
1825 		printk(KERN_ERR "Last user: [<%p>]",
1826 			*dbg_userword(cachep, objp));
1827 		print_symbol("(%s)",
1828 				(unsigned long)*dbg_userword(cachep, objp));
1829 		printk("\n");
1830 	}
1831 	realobj = (char *)objp + obj_offset(cachep);
1832 	size = obj_size(cachep);
1833 	for (i = 0; i < size && lines; i += 16, lines--) {
1834 		int limit;
1835 		limit = 16;
1836 		if (i + limit > size)
1837 			limit = size - i;
1838 		dump_line(realobj, i, limit);
1839 	}
1840 }
1841 
check_poison_obj(struct kmem_cache * cachep,void * objp)1842 static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1843 {
1844 	char *realobj;
1845 	int size, i;
1846 	int lines = 0;
1847 
1848 	realobj = (char *)objp + obj_offset(cachep);
1849 	size = obj_size(cachep);
1850 
1851 	for (i = 0; i < size; i++) {
1852 		char exp = POISON_FREE;
1853 		if (i == size - 1)
1854 			exp = POISON_END;
1855 		if (realobj[i] != exp) {
1856 			int limit;
1857 			/* Mismatch ! */
1858 			/* Print header */
1859 			if (lines == 0) {
1860 				printk(KERN_ERR
1861 					"Slab corruption: %s start=%p, len=%d\n",
1862 					cachep->name, realobj, size);
1863 				print_objinfo(cachep, objp, 0);
1864 			}
1865 			/* Hexdump the affected line */
1866 			i = (i / 16) * 16;
1867 			limit = 16;
1868 			if (i + limit > size)
1869 				limit = size - i;
1870 			dump_line(realobj, i, limit);
1871 			i += 16;
1872 			lines++;
1873 			/* Limit to 5 lines */
1874 			if (lines > 5)
1875 				break;
1876 		}
1877 	}
1878 	if (lines != 0) {
1879 		/* Print some data about the neighboring objects, if they
1880 		 * exist:
1881 		 */
1882 		struct slab *slabp = virt_to_slab(objp);
1883 		unsigned int objnr;
1884 
1885 		objnr = obj_to_index(cachep, slabp, objp);
1886 		if (objnr) {
1887 			objp = index_to_obj(cachep, slabp, objnr - 1);
1888 			realobj = (char *)objp + obj_offset(cachep);
1889 			printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
1890 			       realobj, size);
1891 			print_objinfo(cachep, objp, 2);
1892 		}
1893 		if (objnr + 1 < cachep->num) {
1894 			objp = index_to_obj(cachep, slabp, objnr + 1);
1895 			realobj = (char *)objp + obj_offset(cachep);
1896 			printk(KERN_ERR "Next obj: start=%p, len=%d\n",
1897 			       realobj, size);
1898 			print_objinfo(cachep, objp, 2);
1899 		}
1900 	}
1901 }
1902 #endif
1903 
1904 #if DEBUG
slab_destroy_debugcheck(struct kmem_cache * cachep,struct slab * slabp)1905 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1906 {
1907 	int i;
1908 	for (i = 0; i < cachep->num; i++) {
1909 		void *objp = index_to_obj(cachep, slabp, i);
1910 
1911 		if (cachep->flags & SLAB_POISON) {
1912 #ifdef CONFIG_DEBUG_PAGEALLOC
1913 			if (cachep->buffer_size % PAGE_SIZE == 0 &&
1914 					OFF_SLAB(cachep))
1915 				kernel_map_pages(virt_to_page(objp),
1916 					cachep->buffer_size / PAGE_SIZE, 1);
1917 			else
1918 				check_poison_obj(cachep, objp);
1919 #else
1920 			check_poison_obj(cachep, objp);
1921 #endif
1922 		}
1923 		if (cachep->flags & SLAB_RED_ZONE) {
1924 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
1925 				slab_error(cachep, "start of a freed object "
1926 					   "was overwritten");
1927 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
1928 				slab_error(cachep, "end of a freed object "
1929 					   "was overwritten");
1930 		}
1931 	}
1932 }
1933 #else
slab_destroy_debugcheck(struct kmem_cache * cachep,struct slab * slabp)1934 static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1935 {
1936 }
1937 #endif
1938 
1939 /**
1940  * slab_destroy - destroy and release all objects in a slab
1941  * @cachep: cache pointer being destroyed
1942  * @slabp: slab pointer being destroyed
1943  *
1944  * Destroy all the objs in a slab, and release the mem back to the system.
1945  * Before calling the slab must have been unlinked from the cache.  The
1946  * cache-lock is not held/needed.
1947  */
slab_destroy(struct kmem_cache * cachep,struct slab * slabp)1948 static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
1949 {
1950 	void *addr = slabp->s_mem - slabp->colouroff;
1951 
1952 	slab_destroy_debugcheck(cachep, slabp);
1953 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
1954 		struct slab_rcu *slab_rcu;
1955 
1956 		slab_rcu = (struct slab_rcu *)slabp;
1957 		slab_rcu->cachep = cachep;
1958 		slab_rcu->addr = addr;
1959 		call_rcu(&slab_rcu->head, kmem_rcu_free);
1960 	} else {
1961 		kmem_freepages(cachep, addr);
1962 		if (OFF_SLAB(cachep))
1963 			kmem_cache_free(cachep->slabp_cache, slabp);
1964 	}
1965 }
1966 
__kmem_cache_destroy(struct kmem_cache * cachep)1967 static void __kmem_cache_destroy(struct kmem_cache *cachep)
1968 {
1969 	int i;
1970 	struct kmem_list3 *l3;
1971 
1972 	for_each_online_cpu(i)
1973 	    kfree(cachep->array[i]);
1974 
1975 	/* NUMA: free the list3 structures */
1976 	for_each_online_node(i) {
1977 		l3 = cachep->nodelists[i];
1978 		if (l3) {
1979 			kfree(l3->shared);
1980 			free_alien_cache(l3->alien);
1981 			kfree(l3);
1982 		}
1983 	}
1984 	kmem_cache_free(&cache_cache, cachep);
1985 }
1986 
1987 
1988 /**
1989  * calculate_slab_order - calculate size (page order) of slabs
1990  * @cachep: pointer to the cache that is being created
1991  * @size: size of objects to be created in this cache.
1992  * @align: required alignment for the objects.
1993  * @flags: slab allocation flags
1994  *
1995  * Also calculates the number of objects per slab.
1996  *
1997  * This could be made much more intelligent.  For now, try to avoid using
1998  * high order pages for slabs.  When the gfp() functions are more friendly
1999  * towards high-order requests, this should be changed.
2000  */
calculate_slab_order(struct kmem_cache * cachep,size_t size,size_t align,unsigned long flags)2001 static size_t calculate_slab_order(struct kmem_cache *cachep,
2002 			size_t size, size_t align, unsigned long flags)
2003 {
2004 	unsigned long offslab_limit;
2005 	size_t left_over = 0;
2006 	int gfporder;
2007 
2008 	for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
2009 		unsigned int num;
2010 		size_t remainder;
2011 
2012 		cache_estimate(gfporder, size, align, flags, &remainder, &num);
2013 		if (!num)
2014 			continue;
2015 
2016 		if (flags & CFLGS_OFF_SLAB) {
2017 			/*
2018 			 * Max number of objs-per-slab for caches which
2019 			 * use off-slab slabs. Needed to avoid a possible
2020 			 * looping condition in cache_grow().
2021 			 */
2022 			offslab_limit = size - sizeof(struct slab);
2023 			offslab_limit /= sizeof(kmem_bufctl_t);
2024 
2025  			if (num > offslab_limit)
2026 				break;
2027 		}
2028 
2029 		/* Found something acceptable - save it away */
2030 		cachep->num = num;
2031 		cachep->gfporder = gfporder;
2032 		left_over = remainder;
2033 
2034 		/*
2035 		 * A VFS-reclaimable slab tends to have most allocations
2036 		 * as GFP_NOFS and we really don't want to have to be allocating
2037 		 * higher-order pages when we are unable to shrink dcache.
2038 		 */
2039 		if (flags & SLAB_RECLAIM_ACCOUNT)
2040 			break;
2041 
2042 		/*
2043 		 * Large number of objects is good, but very large slabs are
2044 		 * currently bad for the gfp()s.
2045 		 */
2046 		if (gfporder >= slab_break_gfp_order)
2047 			break;
2048 
2049 		/*
2050 		 * Acceptable internal fragmentation?
2051 		 */
2052 		if (left_over * 8 <= (PAGE_SIZE << gfporder))
2053 			break;
2054 	}
2055 	return left_over;
2056 }
2057 
setup_cpu_cache(struct kmem_cache * cachep)2058 static int __init_refok setup_cpu_cache(struct kmem_cache *cachep)
2059 {
2060 	if (g_cpucache_up == FULL)
2061 		return enable_cpucache(cachep);
2062 
2063 	if (g_cpucache_up == NONE) {
2064 		/*
2065 		 * Note: the first kmem_cache_create must create the cache
2066 		 * that's used by kmalloc(24), otherwise the creation of
2067 		 * further caches will BUG().
2068 		 */
2069 		cachep->array[smp_processor_id()] = &initarray_generic.cache;
2070 
2071 		/*
2072 		 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2073 		 * the first cache, then we need to set up all its list3s,
2074 		 * otherwise the creation of further caches will BUG().
2075 		 */
2076 		set_up_list3s(cachep, SIZE_AC);
2077 		if (INDEX_AC == INDEX_L3)
2078 			g_cpucache_up = PARTIAL_L3;
2079 		else
2080 			g_cpucache_up = PARTIAL_AC;
2081 	} else {
2082 		cachep->array[smp_processor_id()] =
2083 			kmalloc(sizeof(struct arraycache_init), GFP_KERNEL);
2084 
2085 		if (g_cpucache_up == PARTIAL_AC) {
2086 			set_up_list3s(cachep, SIZE_L3);
2087 			g_cpucache_up = PARTIAL_L3;
2088 		} else {
2089 			int node;
2090 			for_each_online_node(node) {
2091 				cachep->nodelists[node] =
2092 				    kmalloc_node(sizeof(struct kmem_list3),
2093 						GFP_KERNEL, node);
2094 				BUG_ON(!cachep->nodelists[node]);
2095 				kmem_list3_init(cachep->nodelists[node]);
2096 			}
2097 		}
2098 	}
2099 	cachep->nodelists[numa_node_id()]->next_reap =
2100 			jiffies + REAPTIMEOUT_LIST3 +
2101 			((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2102 
2103 	cpu_cache_get(cachep)->avail = 0;
2104 	cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2105 	cpu_cache_get(cachep)->batchcount = 1;
2106 	cpu_cache_get(cachep)->touched = 0;
2107 	cachep->batchcount = 1;
2108 	cachep->limit = BOOT_CPUCACHE_ENTRIES;
2109 	return 0;
2110 }
2111 
2112 /**
2113  * kmem_cache_create - Create a cache.
2114  * @name: A string which is used in /proc/slabinfo to identify this cache.
2115  * @size: The size of objects to be created in this cache.
2116  * @align: The required alignment for the objects.
2117  * @flags: SLAB flags
2118  * @ctor: A constructor for the objects.
2119  *
2120  * Returns a ptr to the cache on success, NULL on failure.
2121  * Cannot be called within a int, but can be interrupted.
2122  * The @ctor is run when new pages are allocated by the cache.
2123  *
2124  * @name must be valid until the cache is destroyed. This implies that
2125  * the module calling this has to destroy the cache before getting unloaded.
2126  * Note that kmem_cache_name() is not guaranteed to return the same pointer,
2127  * therefore applications must manage it themselves.
2128  *
2129  * The flags are
2130  *
2131  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2132  * to catch references to uninitialised memory.
2133  *
2134  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2135  * for buffer overruns.
2136  *
2137  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2138  * cacheline.  This can be beneficial if you're counting cycles as closely
2139  * as davem.
2140  */
2141 struct kmem_cache *
kmem_cache_create(const char * name,size_t size,size_t align,unsigned long flags,void (* ctor)(void *))2142 kmem_cache_create (const char *name, size_t size, size_t align,
2143 	unsigned long flags, void (*ctor)(void *))
2144 {
2145 	size_t left_over, slab_size, ralign;
2146 	struct kmem_cache *cachep = NULL, *pc;
2147 
2148 	/*
2149 	 * Sanity checks... these are all serious usage bugs.
2150 	 */
2151 	if (!name || in_interrupt() || (size < BYTES_PER_WORD) ||
2152 	    size > KMALLOC_MAX_SIZE) {
2153 		printk(KERN_ERR "%s: Early error in slab %s\n", __func__,
2154 				name);
2155 		BUG();
2156 	}
2157 
2158 	/*
2159 	 * We use cache_chain_mutex to ensure a consistent view of
2160 	 * cpu_online_mask as well.  Please see cpuup_callback
2161 	 */
2162 	get_online_cpus();
2163 	mutex_lock(&cache_chain_mutex);
2164 
2165 	list_for_each_entry(pc, &cache_chain, next) {
2166 		char tmp;
2167 		int res;
2168 
2169 		/*
2170 		 * This happens when the module gets unloaded and doesn't
2171 		 * destroy its slab cache and no-one else reuses the vmalloc
2172 		 * area of the module.  Print a warning.
2173 		 */
2174 		res = probe_kernel_address(pc->name, tmp);
2175 		if (res) {
2176 			printk(KERN_ERR
2177 			       "SLAB: cache with size %d has lost its name\n",
2178 			       pc->buffer_size);
2179 			continue;
2180 		}
2181 
2182 		if (!strcmp(pc->name, name)) {
2183 			printk(KERN_ERR
2184 			       "kmem_cache_create: duplicate cache %s\n", name);
2185 			dump_stack();
2186 			goto oops;
2187 		}
2188 	}
2189 
2190 #if DEBUG
2191 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
2192 #if FORCED_DEBUG
2193 	/*
2194 	 * Enable redzoning and last user accounting, except for caches with
2195 	 * large objects, if the increased size would increase the object size
2196 	 * above the next power of two: caches with object sizes just above a
2197 	 * power of two have a significant amount of internal fragmentation.
2198 	 */
2199 	if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2200 						2 * sizeof(unsigned long long)))
2201 		flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
2202 	if (!(flags & SLAB_DESTROY_BY_RCU))
2203 		flags |= SLAB_POISON;
2204 #endif
2205 	if (flags & SLAB_DESTROY_BY_RCU)
2206 		BUG_ON(flags & SLAB_POISON);
2207 #endif
2208 	/*
2209 	 * Always checks flags, a caller might be expecting debug support which
2210 	 * isn't available.
2211 	 */
2212 	BUG_ON(flags & ~CREATE_MASK);
2213 
2214 	/*
2215 	 * Check that size is in terms of words.  This is needed to avoid
2216 	 * unaligned accesses for some archs when redzoning is used, and makes
2217 	 * sure any on-slab bufctl's are also correctly aligned.
2218 	 */
2219 	if (size & (BYTES_PER_WORD - 1)) {
2220 		size += (BYTES_PER_WORD - 1);
2221 		size &= ~(BYTES_PER_WORD - 1);
2222 	}
2223 
2224 	/* calculate the final buffer alignment: */
2225 
2226 	/* 1) arch recommendation: can be overridden for debug */
2227 	if (flags & SLAB_HWCACHE_ALIGN) {
2228 		/*
2229 		 * Default alignment: as specified by the arch code.  Except if
2230 		 * an object is really small, then squeeze multiple objects into
2231 		 * one cacheline.
2232 		 */
2233 		ralign = cache_line_size();
2234 		while (size <= ralign / 2)
2235 			ralign /= 2;
2236 	} else {
2237 		ralign = BYTES_PER_WORD;
2238 	}
2239 
2240 	/*
2241 	 * Redzoning and user store require word alignment or possibly larger.
2242 	 * Note this will be overridden by architecture or caller mandated
2243 	 * alignment if either is greater than BYTES_PER_WORD.
2244 	 */
2245 	if (flags & SLAB_STORE_USER)
2246 		ralign = BYTES_PER_WORD;
2247 
2248 	if (flags & SLAB_RED_ZONE) {
2249 		ralign = REDZONE_ALIGN;
2250 		/* If redzoning, ensure that the second redzone is suitably
2251 		 * aligned, by adjusting the object size accordingly. */
2252 		size += REDZONE_ALIGN - 1;
2253 		size &= ~(REDZONE_ALIGN - 1);
2254 	}
2255 
2256 	/* 2) arch mandated alignment */
2257 	if (ralign < ARCH_SLAB_MINALIGN) {
2258 		ralign = ARCH_SLAB_MINALIGN;
2259 	}
2260 	/* 3) caller mandated alignment */
2261 	if (ralign < align) {
2262 		ralign = align;
2263 	}
2264 	/* disable debug if necessary */
2265 	if (ralign > __alignof__(unsigned long long))
2266 		flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2267 	/*
2268 	 * 4) Store it.
2269 	 */
2270 	align = ralign;
2271 
2272 	/* Get cache's description obj. */
2273 	cachep = kmem_cache_zalloc(&cache_cache, GFP_KERNEL);
2274 	if (!cachep)
2275 		goto oops;
2276 
2277 #if DEBUG
2278 	cachep->obj_size = size;
2279 
2280 	/*
2281 	 * Both debugging options require word-alignment which is calculated
2282 	 * into align above.
2283 	 */
2284 	if (flags & SLAB_RED_ZONE) {
2285 		/* add space for red zone words */
2286 		cachep->obj_offset += sizeof(unsigned long long);
2287 		size += 2 * sizeof(unsigned long long);
2288 	}
2289 	if (flags & SLAB_STORE_USER) {
2290 		/* user store requires one word storage behind the end of
2291 		 * the real object. But if the second red zone needs to be
2292 		 * aligned to 64 bits, we must allow that much space.
2293 		 */
2294 		if (flags & SLAB_RED_ZONE)
2295 			size += REDZONE_ALIGN;
2296 		else
2297 			size += BYTES_PER_WORD;
2298 	}
2299 #if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
2300 	if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
2301 	    && cachep->obj_size > cache_line_size() && size < PAGE_SIZE) {
2302 		cachep->obj_offset += PAGE_SIZE - size;
2303 		size = PAGE_SIZE;
2304 	}
2305 #endif
2306 #endif
2307 
2308 	/*
2309 	 * Determine if the slab management is 'on' or 'off' slab.
2310 	 * (bootstrapping cannot cope with offslab caches so don't do
2311 	 * it too early on.)
2312 	 */
2313 	if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init)
2314 		/*
2315 		 * Size is large, assume best to place the slab management obj
2316 		 * off-slab (should allow better packing of objs).
2317 		 */
2318 		flags |= CFLGS_OFF_SLAB;
2319 
2320 	size = ALIGN(size, align);
2321 
2322 	left_over = calculate_slab_order(cachep, size, align, flags);
2323 
2324 	if (!cachep->num) {
2325 		printk(KERN_ERR
2326 		       "kmem_cache_create: couldn't create cache %s.\n", name);
2327 		kmem_cache_free(&cache_cache, cachep);
2328 		cachep = NULL;
2329 		goto oops;
2330 	}
2331 	slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2332 			  + sizeof(struct slab), align);
2333 
2334 	/*
2335 	 * If the slab has been placed off-slab, and we have enough space then
2336 	 * move it on-slab. This is at the expense of any extra colouring.
2337 	 */
2338 	if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2339 		flags &= ~CFLGS_OFF_SLAB;
2340 		left_over -= slab_size;
2341 	}
2342 
2343 	if (flags & CFLGS_OFF_SLAB) {
2344 		/* really off slab. No need for manual alignment */
2345 		slab_size =
2346 		    cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
2347 	}
2348 
2349 	cachep->colour_off = cache_line_size();
2350 	/* Offset must be a multiple of the alignment. */
2351 	if (cachep->colour_off < align)
2352 		cachep->colour_off = align;
2353 	cachep->colour = left_over / cachep->colour_off;
2354 	cachep->slab_size = slab_size;
2355 	cachep->flags = flags;
2356 	cachep->gfpflags = 0;
2357 	if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
2358 		cachep->gfpflags |= GFP_DMA;
2359 	cachep->buffer_size = size;
2360 	cachep->reciprocal_buffer_size = reciprocal_value(size);
2361 
2362 	if (flags & CFLGS_OFF_SLAB) {
2363 		cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
2364 		/*
2365 		 * This is a possibility for one of the malloc_sizes caches.
2366 		 * But since we go off slab only for object size greater than
2367 		 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2368 		 * this should not happen at all.
2369 		 * But leave a BUG_ON for some lucky dude.
2370 		 */
2371 		BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
2372 	}
2373 	cachep->ctor = ctor;
2374 	cachep->name = name;
2375 
2376 	if (setup_cpu_cache(cachep)) {
2377 		__kmem_cache_destroy(cachep);
2378 		cachep = NULL;
2379 		goto oops;
2380 	}
2381 
2382 	/* cache setup completed, link it into the list */
2383 	list_add(&cachep->next, &cache_chain);
2384 oops:
2385 	if (!cachep && (flags & SLAB_PANIC))
2386 		panic("kmem_cache_create(): failed to create slab `%s'\n",
2387 		      name);
2388 	mutex_unlock(&cache_chain_mutex);
2389 	put_online_cpus();
2390 	return cachep;
2391 }
2392 EXPORT_SYMBOL(kmem_cache_create);
2393 
2394 #if DEBUG
check_irq_off(void)2395 static void check_irq_off(void)
2396 {
2397 	BUG_ON(!irqs_disabled());
2398 }
2399 
check_irq_on(void)2400 static void check_irq_on(void)
2401 {
2402 	BUG_ON(irqs_disabled());
2403 }
2404 
check_spinlock_acquired(struct kmem_cache * cachep)2405 static void check_spinlock_acquired(struct kmem_cache *cachep)
2406 {
2407 #ifdef CONFIG_SMP
2408 	check_irq_off();
2409 	assert_spin_locked(&cachep->nodelists[numa_node_id()]->list_lock);
2410 #endif
2411 }
2412 
check_spinlock_acquired_node(struct kmem_cache * cachep,int node)2413 static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
2414 {
2415 #ifdef CONFIG_SMP
2416 	check_irq_off();
2417 	assert_spin_locked(&cachep->nodelists[node]->list_lock);
2418 #endif
2419 }
2420 
2421 #else
2422 #define check_irq_off()	do { } while(0)
2423 #define check_irq_on()	do { } while(0)
2424 #define check_spinlock_acquired(x) do { } while(0)
2425 #define check_spinlock_acquired_node(x, y) do { } while(0)
2426 #endif
2427 
2428 static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2429 			struct array_cache *ac,
2430 			int force, int node);
2431 
do_drain(void * arg)2432 static void do_drain(void *arg)
2433 {
2434 	struct kmem_cache *cachep = arg;
2435 	struct array_cache *ac;
2436 	int node = numa_node_id();
2437 
2438 	check_irq_off();
2439 	ac = cpu_cache_get(cachep);
2440 	spin_lock(&cachep->nodelists[node]->list_lock);
2441 	free_block(cachep, ac->entry, ac->avail, node);
2442 	spin_unlock(&cachep->nodelists[node]->list_lock);
2443 	ac->avail = 0;
2444 }
2445 
drain_cpu_caches(struct kmem_cache * cachep)2446 static void drain_cpu_caches(struct kmem_cache *cachep)
2447 {
2448 	struct kmem_list3 *l3;
2449 	int node;
2450 
2451 	on_each_cpu(do_drain, cachep, 1);
2452 	check_irq_on();
2453 	for_each_online_node(node) {
2454 		l3 = cachep->nodelists[node];
2455 		if (l3 && l3->alien)
2456 			drain_alien_cache(cachep, l3->alien);
2457 	}
2458 
2459 	for_each_online_node(node) {
2460 		l3 = cachep->nodelists[node];
2461 		if (l3)
2462 			drain_array(cachep, l3, l3->shared, 1, node);
2463 	}
2464 }
2465 
2466 /*
2467  * Remove slabs from the list of free slabs.
2468  * Specify the number of slabs to drain in tofree.
2469  *
2470  * Returns the actual number of slabs released.
2471  */
drain_freelist(struct kmem_cache * cache,struct kmem_list3 * l3,int tofree)2472 static int drain_freelist(struct kmem_cache *cache,
2473 			struct kmem_list3 *l3, int tofree)
2474 {
2475 	struct list_head *p;
2476 	int nr_freed;
2477 	struct slab *slabp;
2478 
2479 	nr_freed = 0;
2480 	while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
2481 
2482 		spin_lock_irq(&l3->list_lock);
2483 		p = l3->slabs_free.prev;
2484 		if (p == &l3->slabs_free) {
2485 			spin_unlock_irq(&l3->list_lock);
2486 			goto out;
2487 		}
2488 
2489 		slabp = list_entry(p, struct slab, list);
2490 #if DEBUG
2491 		BUG_ON(slabp->inuse);
2492 #endif
2493 		list_del(&slabp->list);
2494 		/*
2495 		 * Safe to drop the lock. The slab is no longer linked
2496 		 * to the cache.
2497 		 */
2498 		l3->free_objects -= cache->num;
2499 		spin_unlock_irq(&l3->list_lock);
2500 		slab_destroy(cache, slabp);
2501 		nr_freed++;
2502 	}
2503 out:
2504 	return nr_freed;
2505 }
2506 
2507 /* Called with cache_chain_mutex held to protect against cpu hotplug */
__cache_shrink(struct kmem_cache * cachep)2508 static int __cache_shrink(struct kmem_cache *cachep)
2509 {
2510 	int ret = 0, i = 0;
2511 	struct kmem_list3 *l3;
2512 
2513 	drain_cpu_caches(cachep);
2514 
2515 	check_irq_on();
2516 	for_each_online_node(i) {
2517 		l3 = cachep->nodelists[i];
2518 		if (!l3)
2519 			continue;
2520 
2521 		drain_freelist(cachep, l3, l3->free_objects);
2522 
2523 		ret += !list_empty(&l3->slabs_full) ||
2524 			!list_empty(&l3->slabs_partial);
2525 	}
2526 	return (ret ? 1 : 0);
2527 }
2528 
2529 /**
2530  * kmem_cache_shrink - Shrink a cache.
2531  * @cachep: The cache to shrink.
2532  *
2533  * Releases as many slabs as possible for a cache.
2534  * To help debugging, a zero exit status indicates all slabs were released.
2535  */
kmem_cache_shrink(struct kmem_cache * cachep)2536 int kmem_cache_shrink(struct kmem_cache *cachep)
2537 {
2538 	int ret;
2539 	BUG_ON(!cachep || in_interrupt());
2540 
2541 	get_online_cpus();
2542 	mutex_lock(&cache_chain_mutex);
2543 	ret = __cache_shrink(cachep);
2544 	mutex_unlock(&cache_chain_mutex);
2545 	put_online_cpus();
2546 	return ret;
2547 }
2548 EXPORT_SYMBOL(kmem_cache_shrink);
2549 
2550 /**
2551  * kmem_cache_destroy - delete a cache
2552  * @cachep: the cache to destroy
2553  *
2554  * Remove a &struct kmem_cache object from the slab cache.
2555  *
2556  * It is expected this function will be called by a module when it is
2557  * unloaded.  This will remove the cache completely, and avoid a duplicate
2558  * cache being allocated each time a module is loaded and unloaded, if the
2559  * module doesn't have persistent in-kernel storage across loads and unloads.
2560  *
2561  * The cache must be empty before calling this function.
2562  *
2563  * The caller must guarantee that noone will allocate memory from the cache
2564  * during the kmem_cache_destroy().
2565  */
kmem_cache_destroy(struct kmem_cache * cachep)2566 void kmem_cache_destroy(struct kmem_cache *cachep)
2567 {
2568 	BUG_ON(!cachep || in_interrupt());
2569 
2570 	/* Find the cache in the chain of caches. */
2571 	get_online_cpus();
2572 	mutex_lock(&cache_chain_mutex);
2573 	/*
2574 	 * the chain is never empty, cache_cache is never destroyed
2575 	 */
2576 	list_del(&cachep->next);
2577 	if (__cache_shrink(cachep)) {
2578 		slab_error(cachep, "Can't free all objects");
2579 		list_add(&cachep->next, &cache_chain);
2580 		mutex_unlock(&cache_chain_mutex);
2581 		put_online_cpus();
2582 		return;
2583 	}
2584 
2585 	if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU))
2586 		synchronize_rcu();
2587 
2588 	__kmem_cache_destroy(cachep);
2589 	mutex_unlock(&cache_chain_mutex);
2590 	put_online_cpus();
2591 }
2592 EXPORT_SYMBOL(kmem_cache_destroy);
2593 
2594 /*
2595  * Get the memory for a slab management obj.
2596  * For a slab cache when the slab descriptor is off-slab, slab descriptors
2597  * always come from malloc_sizes caches.  The slab descriptor cannot
2598  * come from the same cache which is getting created because,
2599  * when we are searching for an appropriate cache for these
2600  * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2601  * If we are creating a malloc_sizes cache here it would not be visible to
2602  * kmem_find_general_cachep till the initialization is complete.
2603  * Hence we cannot have slabp_cache same as the original cache.
2604  */
alloc_slabmgmt(struct kmem_cache * cachep,void * objp,int colour_off,gfp_t local_flags,int nodeid)2605 static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
2606 				   int colour_off, gfp_t local_flags,
2607 				   int nodeid)
2608 {
2609 	struct slab *slabp;
2610 
2611 	if (OFF_SLAB(cachep)) {
2612 		/* Slab management obj is off-slab. */
2613 		slabp = kmem_cache_alloc_node(cachep->slabp_cache,
2614 					      local_flags, nodeid);
2615 		if (!slabp)
2616 			return NULL;
2617 	} else {
2618 		slabp = objp + colour_off;
2619 		colour_off += cachep->slab_size;
2620 	}
2621 	slabp->inuse = 0;
2622 	slabp->colouroff = colour_off;
2623 	slabp->s_mem = objp + colour_off;
2624 	slabp->nodeid = nodeid;
2625 	slabp->free = 0;
2626 	return slabp;
2627 }
2628 
slab_bufctl(struct slab * slabp)2629 static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2630 {
2631 	return (kmem_bufctl_t *) (slabp + 1);
2632 }
2633 
cache_init_objs(struct kmem_cache * cachep,struct slab * slabp)2634 static void cache_init_objs(struct kmem_cache *cachep,
2635 			    struct slab *slabp)
2636 {
2637 	int i;
2638 
2639 	for (i = 0; i < cachep->num; i++) {
2640 		void *objp = index_to_obj(cachep, slabp, i);
2641 #if DEBUG
2642 		/* need to poison the objs? */
2643 		if (cachep->flags & SLAB_POISON)
2644 			poison_obj(cachep, objp, POISON_FREE);
2645 		if (cachep->flags & SLAB_STORE_USER)
2646 			*dbg_userword(cachep, objp) = NULL;
2647 
2648 		if (cachep->flags & SLAB_RED_ZONE) {
2649 			*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2650 			*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2651 		}
2652 		/*
2653 		 * Constructors are not allowed to allocate memory from the same
2654 		 * cache which they are a constructor for.  Otherwise, deadlock.
2655 		 * They must also be threaded.
2656 		 */
2657 		if (cachep->ctor && !(cachep->flags & SLAB_POISON))
2658 			cachep->ctor(objp + obj_offset(cachep));
2659 
2660 		if (cachep->flags & SLAB_RED_ZONE) {
2661 			if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2662 				slab_error(cachep, "constructor overwrote the"
2663 					   " end of an object");
2664 			if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2665 				slab_error(cachep, "constructor overwrote the"
2666 					   " start of an object");
2667 		}
2668 		if ((cachep->buffer_size % PAGE_SIZE) == 0 &&
2669 			    OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
2670 			kernel_map_pages(virt_to_page(objp),
2671 					 cachep->buffer_size / PAGE_SIZE, 0);
2672 #else
2673 		if (cachep->ctor)
2674 			cachep->ctor(objp);
2675 #endif
2676 		slab_bufctl(slabp)[i] = i + 1;
2677 	}
2678 	slab_bufctl(slabp)[i - 1] = BUFCTL_END;
2679 }
2680 
kmem_flagcheck(struct kmem_cache * cachep,gfp_t flags)2681 static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
2682 {
2683 	if (CONFIG_ZONE_DMA_FLAG) {
2684 		if (flags & GFP_DMA)
2685 			BUG_ON(!(cachep->gfpflags & GFP_DMA));
2686 		else
2687 			BUG_ON(cachep->gfpflags & GFP_DMA);
2688 	}
2689 }
2690 
slab_get_obj(struct kmem_cache * cachep,struct slab * slabp,int nodeid)2691 static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2692 				int nodeid)
2693 {
2694 	void *objp = index_to_obj(cachep, slabp, slabp->free);
2695 	kmem_bufctl_t next;
2696 
2697 	slabp->inuse++;
2698 	next = slab_bufctl(slabp)[slabp->free];
2699 #if DEBUG
2700 	slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2701 	WARN_ON(slabp->nodeid != nodeid);
2702 #endif
2703 	slabp->free = next;
2704 
2705 	return objp;
2706 }
2707 
slab_put_obj(struct kmem_cache * cachep,struct slab * slabp,void * objp,int nodeid)2708 static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2709 				void *objp, int nodeid)
2710 {
2711 	unsigned int objnr = obj_to_index(cachep, slabp, objp);
2712 
2713 #if DEBUG
2714 	/* Verify that the slab belongs to the intended node */
2715 	WARN_ON(slabp->nodeid != nodeid);
2716 
2717 	if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
2718 		printk(KERN_ERR "slab: double free detected in cache "
2719 				"'%s', objp %p\n", cachep->name, objp);
2720 		BUG();
2721 	}
2722 #endif
2723 	slab_bufctl(slabp)[objnr] = slabp->free;
2724 	slabp->free = objnr;
2725 	slabp->inuse--;
2726 }
2727 
2728 /*
2729  * Map pages beginning at addr to the given cache and slab. This is required
2730  * for the slab allocator to be able to lookup the cache and slab of a
2731  * virtual address for kfree, ksize, kmem_ptr_validate, and slab debugging.
2732  */
slab_map_pages(struct kmem_cache * cache,struct slab * slab,void * addr)2733 static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2734 			   void *addr)
2735 {
2736 	int nr_pages;
2737 	struct page *page;
2738 
2739 	page = virt_to_page(addr);
2740 
2741 	nr_pages = 1;
2742 	if (likely(!PageCompound(page)))
2743 		nr_pages <<= cache->gfporder;
2744 
2745 	do {
2746 		page_set_cache(page, cache);
2747 		page_set_slab(page, slab);
2748 		page++;
2749 	} while (--nr_pages);
2750 }
2751 
2752 /*
2753  * Grow (by 1) the number of slabs within a cache.  This is called by
2754  * kmem_cache_alloc() when there are no active objs left in a cache.
2755  */
cache_grow(struct kmem_cache * cachep,gfp_t flags,int nodeid,void * objp)2756 static int cache_grow(struct kmem_cache *cachep,
2757 		gfp_t flags, int nodeid, void *objp)
2758 {
2759 	struct slab *slabp;
2760 	size_t offset;
2761 	gfp_t local_flags;
2762 	struct kmem_list3 *l3;
2763 
2764 	/*
2765 	 * Be lazy and only check for valid flags here,  keeping it out of the
2766 	 * critical path in kmem_cache_alloc().
2767 	 */
2768 	BUG_ON(flags & GFP_SLAB_BUG_MASK);
2769 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
2770 
2771 	/* Take the l3 list lock to change the colour_next on this node */
2772 	check_irq_off();
2773 	l3 = cachep->nodelists[nodeid];
2774 	spin_lock(&l3->list_lock);
2775 
2776 	/* Get colour for the slab, and cal the next value. */
2777 	offset = l3->colour_next;
2778 	l3->colour_next++;
2779 	if (l3->colour_next >= cachep->colour)
2780 		l3->colour_next = 0;
2781 	spin_unlock(&l3->list_lock);
2782 
2783 	offset *= cachep->colour_off;
2784 
2785 	if (local_flags & __GFP_WAIT)
2786 		local_irq_enable();
2787 
2788 	/*
2789 	 * The test for missing atomic flag is performed here, rather than
2790 	 * the more obvious place, simply to reduce the critical path length
2791 	 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2792 	 * will eventually be caught here (where it matters).
2793 	 */
2794 	kmem_flagcheck(cachep, flags);
2795 
2796 	/*
2797 	 * Get mem for the objs.  Attempt to allocate a physical page from
2798 	 * 'nodeid'.
2799 	 */
2800 	if (!objp)
2801 		objp = kmem_getpages(cachep, local_flags, nodeid);
2802 	if (!objp)
2803 		goto failed;
2804 
2805 	/* Get slab management. */
2806 	slabp = alloc_slabmgmt(cachep, objp, offset,
2807 			local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
2808 	if (!slabp)
2809 		goto opps1;
2810 
2811 	slab_map_pages(cachep, slabp, objp);
2812 
2813 	cache_init_objs(cachep, slabp);
2814 
2815 	if (local_flags & __GFP_WAIT)
2816 		local_irq_disable();
2817 	check_irq_off();
2818 	spin_lock(&l3->list_lock);
2819 
2820 	/* Make slab active. */
2821 	list_add_tail(&slabp->list, &(l3->slabs_free));
2822 	STATS_INC_GROWN(cachep);
2823 	l3->free_objects += cachep->num;
2824 	spin_unlock(&l3->list_lock);
2825 	return 1;
2826 opps1:
2827 	kmem_freepages(cachep, objp);
2828 failed:
2829 	if (local_flags & __GFP_WAIT)
2830 		local_irq_disable();
2831 	return 0;
2832 }
2833 
2834 #if DEBUG
2835 
2836 /*
2837  * Perform extra freeing checks:
2838  * - detect bad pointers.
2839  * - POISON/RED_ZONE checking
2840  */
kfree_debugcheck(const void * objp)2841 static void kfree_debugcheck(const void *objp)
2842 {
2843 	if (!virt_addr_valid(objp)) {
2844 		printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
2845 		       (unsigned long)objp);
2846 		BUG();
2847 	}
2848 }
2849 
verify_redzone_free(struct kmem_cache * cache,void * obj)2850 static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
2851 {
2852 	unsigned long long redzone1, redzone2;
2853 
2854 	redzone1 = *dbg_redzone1(cache, obj);
2855 	redzone2 = *dbg_redzone2(cache, obj);
2856 
2857 	/*
2858 	 * Redzone is ok.
2859 	 */
2860 	if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
2861 		return;
2862 
2863 	if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
2864 		slab_error(cache, "double free detected");
2865 	else
2866 		slab_error(cache, "memory outside object was overwritten");
2867 
2868 	printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
2869 			obj, redzone1, redzone2);
2870 }
2871 
cache_free_debugcheck(struct kmem_cache * cachep,void * objp,void * caller)2872 static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
2873 				   void *caller)
2874 {
2875 	struct page *page;
2876 	unsigned int objnr;
2877 	struct slab *slabp;
2878 
2879 	BUG_ON(virt_to_cache(objp) != cachep);
2880 
2881 	objp -= obj_offset(cachep);
2882 	kfree_debugcheck(objp);
2883 	page = virt_to_head_page(objp);
2884 
2885 	slabp = page_get_slab(page);
2886 
2887 	if (cachep->flags & SLAB_RED_ZONE) {
2888 		verify_redzone_free(cachep, objp);
2889 		*dbg_redzone1(cachep, objp) = RED_INACTIVE;
2890 		*dbg_redzone2(cachep, objp) = RED_INACTIVE;
2891 	}
2892 	if (cachep->flags & SLAB_STORE_USER)
2893 		*dbg_userword(cachep, objp) = caller;
2894 
2895 	objnr = obj_to_index(cachep, slabp, objp);
2896 
2897 	BUG_ON(objnr >= cachep->num);
2898 	BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
2899 
2900 #ifdef CONFIG_DEBUG_SLAB_LEAK
2901 	slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
2902 #endif
2903 	if (cachep->flags & SLAB_POISON) {
2904 #ifdef CONFIG_DEBUG_PAGEALLOC
2905 		if ((cachep->buffer_size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
2906 			store_stackinfo(cachep, objp, (unsigned long)caller);
2907 			kernel_map_pages(virt_to_page(objp),
2908 					 cachep->buffer_size / PAGE_SIZE, 0);
2909 		} else {
2910 			poison_obj(cachep, objp, POISON_FREE);
2911 		}
2912 #else
2913 		poison_obj(cachep, objp, POISON_FREE);
2914 #endif
2915 	}
2916 	return objp;
2917 }
2918 
check_slabp(struct kmem_cache * cachep,struct slab * slabp)2919 static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
2920 {
2921 	kmem_bufctl_t i;
2922 	int entries = 0;
2923 
2924 	/* Check slab's freelist to see if this obj is there. */
2925 	for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
2926 		entries++;
2927 		if (entries > cachep->num || i >= cachep->num)
2928 			goto bad;
2929 	}
2930 	if (entries != cachep->num - slabp->inuse) {
2931 bad:
2932 		printk(KERN_ERR "slab: Internal list corruption detected in "
2933 				"cache '%s'(%d), slabp %p(%d). Hexdump:\n",
2934 			cachep->name, cachep->num, slabp, slabp->inuse);
2935 		for (i = 0;
2936 		     i < sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t);
2937 		     i++) {
2938 			if (i % 16 == 0)
2939 				printk("\n%03x:", i);
2940 			printk(" %02x", ((unsigned char *)slabp)[i]);
2941 		}
2942 		printk("\n");
2943 		BUG();
2944 	}
2945 }
2946 #else
2947 #define kfree_debugcheck(x) do { } while(0)
2948 #define cache_free_debugcheck(x,objp,z) (objp)
2949 #define check_slabp(x,y) do { } while(0)
2950 #endif
2951 
cache_alloc_refill(struct kmem_cache * cachep,gfp_t flags)2952 static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags)
2953 {
2954 	int batchcount;
2955 	struct kmem_list3 *l3;
2956 	struct array_cache *ac;
2957 	int node;
2958 
2959 retry:
2960 	check_irq_off();
2961 	node = numa_node_id();
2962 	ac = cpu_cache_get(cachep);
2963 	batchcount = ac->batchcount;
2964 	if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
2965 		/*
2966 		 * If there was little recent activity on this cache, then
2967 		 * perform only a partial refill.  Otherwise we could generate
2968 		 * refill bouncing.
2969 		 */
2970 		batchcount = BATCHREFILL_LIMIT;
2971 	}
2972 	l3 = cachep->nodelists[node];
2973 
2974 	BUG_ON(ac->avail > 0 || !l3);
2975 	spin_lock(&l3->list_lock);
2976 
2977 	/* See if we can refill from the shared array */
2978 	if (l3->shared && transfer_objects(ac, l3->shared, batchcount))
2979 		goto alloc_done;
2980 
2981 	while (batchcount > 0) {
2982 		struct list_head *entry;
2983 		struct slab *slabp;
2984 		/* Get slab alloc is to come from. */
2985 		entry = l3->slabs_partial.next;
2986 		if (entry == &l3->slabs_partial) {
2987 			l3->free_touched = 1;
2988 			entry = l3->slabs_free.next;
2989 			if (entry == &l3->slabs_free)
2990 				goto must_grow;
2991 		}
2992 
2993 		slabp = list_entry(entry, struct slab, list);
2994 		check_slabp(cachep, slabp);
2995 		check_spinlock_acquired(cachep);
2996 
2997 		/*
2998 		 * The slab was either on partial or free list so
2999 		 * there must be at least one object available for
3000 		 * allocation.
3001 		 */
3002 		BUG_ON(slabp->inuse >= cachep->num);
3003 
3004 		while (slabp->inuse < cachep->num && batchcount--) {
3005 			STATS_INC_ALLOCED(cachep);
3006 			STATS_INC_ACTIVE(cachep);
3007 			STATS_SET_HIGH(cachep);
3008 
3009 			ac->entry[ac->avail++] = slab_get_obj(cachep, slabp,
3010 							    node);
3011 		}
3012 		check_slabp(cachep, slabp);
3013 
3014 		/* move slabp to correct slabp list: */
3015 		list_del(&slabp->list);
3016 		if (slabp->free == BUFCTL_END)
3017 			list_add(&slabp->list, &l3->slabs_full);
3018 		else
3019 			list_add(&slabp->list, &l3->slabs_partial);
3020 	}
3021 
3022 must_grow:
3023 	l3->free_objects -= ac->avail;
3024 alloc_done:
3025 	spin_unlock(&l3->list_lock);
3026 
3027 	if (unlikely(!ac->avail)) {
3028 		int x;
3029 		x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
3030 
3031 		/* cache_grow can reenable interrupts, then ac could change. */
3032 		ac = cpu_cache_get(cachep);
3033 		if (!x && ac->avail == 0)	/* no objects in sight? abort */
3034 			return NULL;
3035 
3036 		if (!ac->avail)		/* objects refilled by interrupt? */
3037 			goto retry;
3038 	}
3039 	ac->touched = 1;
3040 	return ac->entry[--ac->avail];
3041 }
3042 
cache_alloc_debugcheck_before(struct kmem_cache * cachep,gfp_t flags)3043 static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3044 						gfp_t flags)
3045 {
3046 	might_sleep_if(flags & __GFP_WAIT);
3047 #if DEBUG
3048 	kmem_flagcheck(cachep, flags);
3049 #endif
3050 }
3051 
3052 #if DEBUG
cache_alloc_debugcheck_after(struct kmem_cache * cachep,gfp_t flags,void * objp,void * caller)3053 static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3054 				gfp_t flags, void *objp, void *caller)
3055 {
3056 	if (!objp)
3057 		return objp;
3058 	if (cachep->flags & SLAB_POISON) {
3059 #ifdef CONFIG_DEBUG_PAGEALLOC
3060 		if ((cachep->buffer_size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
3061 			kernel_map_pages(virt_to_page(objp),
3062 					 cachep->buffer_size / PAGE_SIZE, 1);
3063 		else
3064 			check_poison_obj(cachep, objp);
3065 #else
3066 		check_poison_obj(cachep, objp);
3067 #endif
3068 		poison_obj(cachep, objp, POISON_INUSE);
3069 	}
3070 	if (cachep->flags & SLAB_STORE_USER)
3071 		*dbg_userword(cachep, objp) = caller;
3072 
3073 	if (cachep->flags & SLAB_RED_ZONE) {
3074 		if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3075 				*dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3076 			slab_error(cachep, "double free, or memory outside"
3077 						" object was overwritten");
3078 			printk(KERN_ERR
3079 				"%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
3080 				objp, *dbg_redzone1(cachep, objp),
3081 				*dbg_redzone2(cachep, objp));
3082 		}
3083 		*dbg_redzone1(cachep, objp) = RED_ACTIVE;
3084 		*dbg_redzone2(cachep, objp) = RED_ACTIVE;
3085 	}
3086 #ifdef CONFIG_DEBUG_SLAB_LEAK
3087 	{
3088 		struct slab *slabp;
3089 		unsigned objnr;
3090 
3091 		slabp = page_get_slab(virt_to_head_page(objp));
3092 		objnr = (unsigned)(objp - slabp->s_mem) / cachep->buffer_size;
3093 		slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3094 	}
3095 #endif
3096 	objp += obj_offset(cachep);
3097 	if (cachep->ctor && cachep->flags & SLAB_POISON)
3098 		cachep->ctor(objp);
3099 #if ARCH_SLAB_MINALIGN
3100 	if ((u32)objp & (ARCH_SLAB_MINALIGN-1)) {
3101 		printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
3102 		       objp, ARCH_SLAB_MINALIGN);
3103 	}
3104 #endif
3105 	return objp;
3106 }
3107 #else
3108 #define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3109 #endif
3110 
slab_should_failslab(struct kmem_cache * cachep,gfp_t flags)3111 static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
3112 {
3113 	if (cachep == &cache_cache)
3114 		return false;
3115 
3116 	return should_failslab(obj_size(cachep), flags);
3117 }
3118 
____cache_alloc(struct kmem_cache * cachep,gfp_t flags)3119 static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3120 {
3121 	void *objp;
3122 	struct array_cache *ac;
3123 
3124 	check_irq_off();
3125 
3126 	ac = cpu_cache_get(cachep);
3127 	if (likely(ac->avail)) {
3128 		STATS_INC_ALLOCHIT(cachep);
3129 		ac->touched = 1;
3130 		objp = ac->entry[--ac->avail];
3131 	} else {
3132 		STATS_INC_ALLOCMISS(cachep);
3133 		objp = cache_alloc_refill(cachep, flags);
3134 	}
3135 	return objp;
3136 }
3137 
3138 #ifdef CONFIG_NUMA
3139 /*
3140  * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
3141  *
3142  * If we are in_interrupt, then process context, including cpusets and
3143  * mempolicy, may not apply and should not be used for allocation policy.
3144  */
alternate_node_alloc(struct kmem_cache * cachep,gfp_t flags)3145 static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3146 {
3147 	int nid_alloc, nid_here;
3148 
3149 	if (in_interrupt() || (flags & __GFP_THISNODE))
3150 		return NULL;
3151 	nid_alloc = nid_here = numa_node_id();
3152 	if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
3153 		nid_alloc = cpuset_mem_spread_node();
3154 	else if (current->mempolicy)
3155 		nid_alloc = slab_node(current->mempolicy);
3156 	if (nid_alloc != nid_here)
3157 		return ____cache_alloc_node(cachep, flags, nid_alloc);
3158 	return NULL;
3159 }
3160 
3161 /*
3162  * Fallback function if there was no memory available and no objects on a
3163  * certain node and fall back is permitted. First we scan all the
3164  * available nodelists for available objects. If that fails then we
3165  * perform an allocation without specifying a node. This allows the page
3166  * allocator to do its reclaim / fallback magic. We then insert the
3167  * slab into the proper nodelist and then allocate from it.
3168  */
fallback_alloc(struct kmem_cache * cache,gfp_t flags)3169 static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
3170 {
3171 	struct zonelist *zonelist;
3172 	gfp_t local_flags;
3173 	struct zoneref *z;
3174 	struct zone *zone;
3175 	enum zone_type high_zoneidx = gfp_zone(flags);
3176 	void *obj = NULL;
3177 	int nid;
3178 
3179 	if (flags & __GFP_THISNODE)
3180 		return NULL;
3181 
3182 	zonelist = node_zonelist(slab_node(current->mempolicy), flags);
3183 	local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
3184 
3185 retry:
3186 	/*
3187 	 * Look through allowed nodes for objects available
3188 	 * from existing per node queues.
3189 	 */
3190 	for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3191 		nid = zone_to_nid(zone);
3192 
3193 		if (cpuset_zone_allowed_hardwall(zone, flags) &&
3194 			cache->nodelists[nid] &&
3195 			cache->nodelists[nid]->free_objects) {
3196 				obj = ____cache_alloc_node(cache,
3197 					flags | GFP_THISNODE, nid);
3198 				if (obj)
3199 					break;
3200 		}
3201 	}
3202 
3203 	if (!obj) {
3204 		/*
3205 		 * This allocation will be performed within the constraints
3206 		 * of the current cpuset / memory policy requirements.
3207 		 * We may trigger various forms of reclaim on the allowed
3208 		 * set and go into memory reserves if necessary.
3209 		 */
3210 		if (local_flags & __GFP_WAIT)
3211 			local_irq_enable();
3212 		kmem_flagcheck(cache, flags);
3213 		obj = kmem_getpages(cache, local_flags, -1);
3214 		if (local_flags & __GFP_WAIT)
3215 			local_irq_disable();
3216 		if (obj) {
3217 			/*
3218 			 * Insert into the appropriate per node queues
3219 			 */
3220 			nid = page_to_nid(virt_to_page(obj));
3221 			if (cache_grow(cache, flags, nid, obj)) {
3222 				obj = ____cache_alloc_node(cache,
3223 					flags | GFP_THISNODE, nid);
3224 				if (!obj)
3225 					/*
3226 					 * Another processor may allocate the
3227 					 * objects in the slab since we are
3228 					 * not holding any locks.
3229 					 */
3230 					goto retry;
3231 			} else {
3232 				/* cache_grow already freed obj */
3233 				obj = NULL;
3234 			}
3235 		}
3236 	}
3237 	return obj;
3238 }
3239 
3240 /*
3241  * A interface to enable slab creation on nodeid
3242  */
____cache_alloc_node(struct kmem_cache * cachep,gfp_t flags,int nodeid)3243 static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
3244 				int nodeid)
3245 {
3246 	struct list_head *entry;
3247 	struct slab *slabp;
3248 	struct kmem_list3 *l3;
3249 	void *obj;
3250 	int x;
3251 
3252 	l3 = cachep->nodelists[nodeid];
3253 	BUG_ON(!l3);
3254 
3255 retry:
3256 	check_irq_off();
3257 	spin_lock(&l3->list_lock);
3258 	entry = l3->slabs_partial.next;
3259 	if (entry == &l3->slabs_partial) {
3260 		l3->free_touched = 1;
3261 		entry = l3->slabs_free.next;
3262 		if (entry == &l3->slabs_free)
3263 			goto must_grow;
3264 	}
3265 
3266 	slabp = list_entry(entry, struct slab, list);
3267 	check_spinlock_acquired_node(cachep, nodeid);
3268 	check_slabp(cachep, slabp);
3269 
3270 	STATS_INC_NODEALLOCS(cachep);
3271 	STATS_INC_ACTIVE(cachep);
3272 	STATS_SET_HIGH(cachep);
3273 
3274 	BUG_ON(slabp->inuse == cachep->num);
3275 
3276 	obj = slab_get_obj(cachep, slabp, nodeid);
3277 	check_slabp(cachep, slabp);
3278 	l3->free_objects--;
3279 	/* move slabp to correct slabp list: */
3280 	list_del(&slabp->list);
3281 
3282 	if (slabp->free == BUFCTL_END)
3283 		list_add(&slabp->list, &l3->slabs_full);
3284 	else
3285 		list_add(&slabp->list, &l3->slabs_partial);
3286 
3287 	spin_unlock(&l3->list_lock);
3288 	goto done;
3289 
3290 must_grow:
3291 	spin_unlock(&l3->list_lock);
3292 	x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
3293 	if (x)
3294 		goto retry;
3295 
3296 	return fallback_alloc(cachep, flags);
3297 
3298 done:
3299 	return obj;
3300 }
3301 
3302 /**
3303  * kmem_cache_alloc_node - Allocate an object on the specified node
3304  * @cachep: The cache to allocate from.
3305  * @flags: See kmalloc().
3306  * @nodeid: node number of the target node.
3307  * @caller: return address of caller, used for debug information
3308  *
3309  * Identical to kmem_cache_alloc but it will allocate memory on the given
3310  * node, which can improve the performance for cpu bound structures.
3311  *
3312  * Fallback to other node is possible if __GFP_THISNODE is not set.
3313  */
3314 static __always_inline void *
__cache_alloc_node(struct kmem_cache * cachep,gfp_t flags,int nodeid,void * caller)3315 __cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3316 		   void *caller)
3317 {
3318 	unsigned long save_flags;
3319 	void *ptr;
3320 
3321 	if (slab_should_failslab(cachep, flags))
3322 		return NULL;
3323 
3324 	cache_alloc_debugcheck_before(cachep, flags);
3325 	local_irq_save(save_flags);
3326 
3327 	if (unlikely(nodeid == -1))
3328 		nodeid = numa_node_id();
3329 
3330 	if (unlikely(!cachep->nodelists[nodeid])) {
3331 		/* Node not bootstrapped yet */
3332 		ptr = fallback_alloc(cachep, flags);
3333 		goto out;
3334 	}
3335 
3336 	if (nodeid == numa_node_id()) {
3337 		/*
3338 		 * Use the locally cached objects if possible.
3339 		 * However ____cache_alloc does not allow fallback
3340 		 * to other nodes. It may fail while we still have
3341 		 * objects on other nodes available.
3342 		 */
3343 		ptr = ____cache_alloc(cachep, flags);
3344 		if (ptr)
3345 			goto out;
3346 	}
3347 	/* ___cache_alloc_node can fall back to other nodes */
3348 	ptr = ____cache_alloc_node(cachep, flags, nodeid);
3349   out:
3350 	local_irq_restore(save_flags);
3351 	ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
3352 
3353 	if (unlikely((flags & __GFP_ZERO) && ptr))
3354 		memset(ptr, 0, obj_size(cachep));
3355 
3356 	return ptr;
3357 }
3358 
3359 static __always_inline void *
__do_cache_alloc(struct kmem_cache * cache,gfp_t flags)3360 __do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3361 {
3362 	void *objp;
3363 
3364 	if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3365 		objp = alternate_node_alloc(cache, flags);
3366 		if (objp)
3367 			goto out;
3368 	}
3369 	objp = ____cache_alloc(cache, flags);
3370 
3371 	/*
3372 	 * We may just have run out of memory on the local node.
3373 	 * ____cache_alloc_node() knows how to locate memory on other nodes
3374 	 */
3375  	if (!objp)
3376  		objp = ____cache_alloc_node(cache, flags, numa_node_id());
3377 
3378   out:
3379 	return objp;
3380 }
3381 #else
3382 
3383 static __always_inline void *
__do_cache_alloc(struct kmem_cache * cachep,gfp_t flags)3384 __do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3385 {
3386 	return ____cache_alloc(cachep, flags);
3387 }
3388 
3389 #endif /* CONFIG_NUMA */
3390 
3391 static __always_inline void *
__cache_alloc(struct kmem_cache * cachep,gfp_t flags,void * caller)3392 __cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3393 {
3394 	unsigned long save_flags;
3395 	void *objp;
3396 
3397 	if (slab_should_failslab(cachep, flags))
3398 		return NULL;
3399 
3400 	cache_alloc_debugcheck_before(cachep, flags);
3401 	local_irq_save(save_flags);
3402 	objp = __do_cache_alloc(cachep, flags);
3403 	local_irq_restore(save_flags);
3404 	objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
3405 	prefetchw(objp);
3406 
3407 	if (unlikely((flags & __GFP_ZERO) && objp))
3408 		memset(objp, 0, obj_size(cachep));
3409 
3410 	return objp;
3411 }
3412 
3413 /*
3414  * Caller needs to acquire correct kmem_list's list_lock
3415  */
free_block(struct kmem_cache * cachep,void ** objpp,int nr_objects,int node)3416 static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
3417 		       int node)
3418 {
3419 	int i;
3420 	struct kmem_list3 *l3;
3421 
3422 	for (i = 0; i < nr_objects; i++) {
3423 		void *objp = objpp[i];
3424 		struct slab *slabp;
3425 
3426 		slabp = virt_to_slab(objp);
3427 		l3 = cachep->nodelists[node];
3428 		list_del(&slabp->list);
3429 		check_spinlock_acquired_node(cachep, node);
3430 		check_slabp(cachep, slabp);
3431 		slab_put_obj(cachep, slabp, objp, node);
3432 		STATS_DEC_ACTIVE(cachep);
3433 		l3->free_objects++;
3434 		check_slabp(cachep, slabp);
3435 
3436 		/* fixup slab chains */
3437 		if (slabp->inuse == 0) {
3438 			if (l3->free_objects > l3->free_limit) {
3439 				l3->free_objects -= cachep->num;
3440 				/* No need to drop any previously held
3441 				 * lock here, even if we have a off-slab slab
3442 				 * descriptor it is guaranteed to come from
3443 				 * a different cache, refer to comments before
3444 				 * alloc_slabmgmt.
3445 				 */
3446 				slab_destroy(cachep, slabp);
3447 			} else {
3448 				list_add(&slabp->list, &l3->slabs_free);
3449 			}
3450 		} else {
3451 			/* Unconditionally move a slab to the end of the
3452 			 * partial list on free - maximum time for the
3453 			 * other objects to be freed, too.
3454 			 */
3455 			list_add_tail(&slabp->list, &l3->slabs_partial);
3456 		}
3457 	}
3458 }
3459 
cache_flusharray(struct kmem_cache * cachep,struct array_cache * ac)3460 static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
3461 {
3462 	int batchcount;
3463 	struct kmem_list3 *l3;
3464 	int node = numa_node_id();
3465 
3466 	batchcount = ac->batchcount;
3467 #if DEBUG
3468 	BUG_ON(!batchcount || batchcount > ac->avail);
3469 #endif
3470 	check_irq_off();
3471 	l3 = cachep->nodelists[node];
3472 	spin_lock(&l3->list_lock);
3473 	if (l3->shared) {
3474 		struct array_cache *shared_array = l3->shared;
3475 		int max = shared_array->limit - shared_array->avail;
3476 		if (max) {
3477 			if (batchcount > max)
3478 				batchcount = max;
3479 			memcpy(&(shared_array->entry[shared_array->avail]),
3480 			       ac->entry, sizeof(void *) * batchcount);
3481 			shared_array->avail += batchcount;
3482 			goto free_done;
3483 		}
3484 	}
3485 
3486 	free_block(cachep, ac->entry, batchcount, node);
3487 free_done:
3488 #if STATS
3489 	{
3490 		int i = 0;
3491 		struct list_head *p;
3492 
3493 		p = l3->slabs_free.next;
3494 		while (p != &(l3->slabs_free)) {
3495 			struct slab *slabp;
3496 
3497 			slabp = list_entry(p, struct slab, list);
3498 			BUG_ON(slabp->inuse);
3499 
3500 			i++;
3501 			p = p->next;
3502 		}
3503 		STATS_SET_FREEABLE(cachep, i);
3504 	}
3505 #endif
3506 	spin_unlock(&l3->list_lock);
3507 	ac->avail -= batchcount;
3508 	memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
3509 }
3510 
3511 /*
3512  * Release an obj back to its cache. If the obj has a constructed state, it must
3513  * be in this state _before_ it is released.  Called with disabled ints.
3514  */
__cache_free(struct kmem_cache * cachep,void * objp)3515 static inline void __cache_free(struct kmem_cache *cachep, void *objp)
3516 {
3517 	struct array_cache *ac = cpu_cache_get(cachep);
3518 
3519 	check_irq_off();
3520 	objp = cache_free_debugcheck(cachep, objp, __builtin_return_address(0));
3521 
3522 	/*
3523 	 * Skip calling cache_free_alien() when the platform is not numa.
3524 	 * This will avoid cache misses that happen while accessing slabp (which
3525 	 * is per page memory  reference) to get nodeid. Instead use a global
3526 	 * variable to skip the call, which is mostly likely to be present in
3527 	 * the cache.
3528 	 */
3529 	if (numa_platform && cache_free_alien(cachep, objp))
3530 		return;
3531 
3532 	if (likely(ac->avail < ac->limit)) {
3533 		STATS_INC_FREEHIT(cachep);
3534 		ac->entry[ac->avail++] = objp;
3535 		return;
3536 	} else {
3537 		STATS_INC_FREEMISS(cachep);
3538 		cache_flusharray(cachep, ac);
3539 		ac->entry[ac->avail++] = objp;
3540 	}
3541 }
3542 
3543 /**
3544  * kmem_cache_alloc - Allocate an object
3545  * @cachep: The cache to allocate from.
3546  * @flags: See kmalloc().
3547  *
3548  * Allocate an object from this cache.  The flags are only relevant
3549  * if the cache has no available objects.
3550  */
kmem_cache_alloc(struct kmem_cache * cachep,gfp_t flags)3551 void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3552 {
3553 	return __cache_alloc(cachep, flags, __builtin_return_address(0));
3554 }
3555 EXPORT_SYMBOL(kmem_cache_alloc);
3556 
3557 /**
3558  * kmem_ptr_validate - check if an untrusted pointer might be a slab entry.
3559  * @cachep: the cache we're checking against
3560  * @ptr: pointer to validate
3561  *
3562  * This verifies that the untrusted pointer looks sane;
3563  * it is _not_ a guarantee that the pointer is actually
3564  * part of the slab cache in question, but it at least
3565  * validates that the pointer can be dereferenced and
3566  * looks half-way sane.
3567  *
3568  * Currently only used for dentry validation.
3569  */
kmem_ptr_validate(struct kmem_cache * cachep,const void * ptr)3570 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr)
3571 {
3572 	unsigned long addr = (unsigned long)ptr;
3573 	unsigned long min_addr = PAGE_OFFSET;
3574 	unsigned long align_mask = BYTES_PER_WORD - 1;
3575 	unsigned long size = cachep->buffer_size;
3576 	struct page *page;
3577 
3578 	if (unlikely(addr < min_addr))
3579 		goto out;
3580 	if (unlikely(addr > (unsigned long)high_memory - size))
3581 		goto out;
3582 	if (unlikely(addr & align_mask))
3583 		goto out;
3584 	if (unlikely(!kern_addr_valid(addr)))
3585 		goto out;
3586 	if (unlikely(!kern_addr_valid(addr + size - 1)))
3587 		goto out;
3588 	page = virt_to_page(ptr);
3589 	if (unlikely(!PageSlab(page)))
3590 		goto out;
3591 	if (unlikely(page_get_cache(page) != cachep))
3592 		goto out;
3593 	return 1;
3594 out:
3595 	return 0;
3596 }
3597 
3598 #ifdef CONFIG_NUMA
kmem_cache_alloc_node(struct kmem_cache * cachep,gfp_t flags,int nodeid)3599 void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3600 {
3601 	return __cache_alloc_node(cachep, flags, nodeid,
3602 			__builtin_return_address(0));
3603 }
3604 EXPORT_SYMBOL(kmem_cache_alloc_node);
3605 
3606 static __always_inline void *
__do_kmalloc_node(size_t size,gfp_t flags,int node,void * caller)3607 __do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
3608 {
3609 	struct kmem_cache *cachep;
3610 
3611 	cachep = kmem_find_general_cachep(size, flags);
3612 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3613 		return cachep;
3614 	return kmem_cache_alloc_node(cachep, flags, node);
3615 }
3616 
3617 #ifdef CONFIG_DEBUG_SLAB
__kmalloc_node(size_t size,gfp_t flags,int node)3618 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3619 {
3620 	return __do_kmalloc_node(size, flags, node,
3621 			__builtin_return_address(0));
3622 }
3623 EXPORT_SYMBOL(__kmalloc_node);
3624 
__kmalloc_node_track_caller(size_t size,gfp_t flags,int node,unsigned long caller)3625 void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
3626 		int node, unsigned long caller)
3627 {
3628 	return __do_kmalloc_node(size, flags, node, (void *)caller);
3629 }
3630 EXPORT_SYMBOL(__kmalloc_node_track_caller);
3631 #else
__kmalloc_node(size_t size,gfp_t flags,int node)3632 void *__kmalloc_node(size_t size, gfp_t flags, int node)
3633 {
3634 	return __do_kmalloc_node(size, flags, node, NULL);
3635 }
3636 EXPORT_SYMBOL(__kmalloc_node);
3637 #endif /* CONFIG_DEBUG_SLAB */
3638 #endif /* CONFIG_NUMA */
3639 
3640 /**
3641  * __do_kmalloc - allocate memory
3642  * @size: how many bytes of memory are required.
3643  * @flags: the type of memory to allocate (see kmalloc).
3644  * @caller: function caller for debug tracking of the caller
3645  */
__do_kmalloc(size_t size,gfp_t flags,void * caller)3646 static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3647 					  void *caller)
3648 {
3649 	struct kmem_cache *cachep;
3650 
3651 	/* If you want to save a few bytes .text space: replace
3652 	 * __ with kmem_.
3653 	 * Then kmalloc uses the uninlined functions instead of the inline
3654 	 * functions.
3655 	 */
3656 	cachep = __find_general_cachep(size, flags);
3657 	if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3658 		return cachep;
3659 	return __cache_alloc(cachep, flags, caller);
3660 }
3661 
3662 
3663 #ifdef CONFIG_DEBUG_SLAB
__kmalloc(size_t size,gfp_t flags)3664 void *__kmalloc(size_t size, gfp_t flags)
3665 {
3666 	return __do_kmalloc(size, flags, __builtin_return_address(0));
3667 }
3668 EXPORT_SYMBOL(__kmalloc);
3669 
__kmalloc_track_caller(size_t size,gfp_t flags,unsigned long caller)3670 void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
3671 {
3672 	return __do_kmalloc(size, flags, (void *)caller);
3673 }
3674 EXPORT_SYMBOL(__kmalloc_track_caller);
3675 
3676 #else
__kmalloc(size_t size,gfp_t flags)3677 void *__kmalloc(size_t size, gfp_t flags)
3678 {
3679 	return __do_kmalloc(size, flags, NULL);
3680 }
3681 EXPORT_SYMBOL(__kmalloc);
3682 #endif
3683 
3684 /**
3685  * kmem_cache_free - Deallocate an object
3686  * @cachep: The cache the allocation was from.
3687  * @objp: The previously allocated object.
3688  *
3689  * Free an object which was previously allocated from this
3690  * cache.
3691  */
kmem_cache_free(struct kmem_cache * cachep,void * objp)3692 void kmem_cache_free(struct kmem_cache *cachep, void *objp)
3693 {
3694 	unsigned long flags;
3695 
3696 	local_irq_save(flags);
3697 	debug_check_no_locks_freed(objp, obj_size(cachep));
3698 	if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
3699 		debug_check_no_obj_freed(objp, obj_size(cachep));
3700 	__cache_free(cachep, objp);
3701 	local_irq_restore(flags);
3702 }
3703 EXPORT_SYMBOL(kmem_cache_free);
3704 
3705 /**
3706  * kfree - free previously allocated memory
3707  * @objp: pointer returned by kmalloc.
3708  *
3709  * If @objp is NULL, no operation is performed.
3710  *
3711  * Don't free memory not originally allocated by kmalloc()
3712  * or you will run into trouble.
3713  */
kfree(const void * objp)3714 void kfree(const void *objp)
3715 {
3716 	struct kmem_cache *c;
3717 	unsigned long flags;
3718 
3719 	if (unlikely(ZERO_OR_NULL_PTR(objp)))
3720 		return;
3721 	local_irq_save(flags);
3722 	kfree_debugcheck(objp);
3723 	c = virt_to_cache(objp);
3724 	debug_check_no_locks_freed(objp, obj_size(c));
3725 	debug_check_no_obj_freed(objp, obj_size(c));
3726 	__cache_free(c, (void *)objp);
3727 	local_irq_restore(flags);
3728 }
3729 EXPORT_SYMBOL(kfree);
3730 
kmem_cache_size(struct kmem_cache * cachep)3731 unsigned int kmem_cache_size(struct kmem_cache *cachep)
3732 {
3733 	return obj_size(cachep);
3734 }
3735 EXPORT_SYMBOL(kmem_cache_size);
3736 
kmem_cache_name(struct kmem_cache * cachep)3737 const char *kmem_cache_name(struct kmem_cache *cachep)
3738 {
3739 	return cachep->name;
3740 }
3741 EXPORT_SYMBOL_GPL(kmem_cache_name);
3742 
3743 /*
3744  * This initializes kmem_list3 or resizes various caches for all nodes.
3745  */
alloc_kmemlist(struct kmem_cache * cachep)3746 static int alloc_kmemlist(struct kmem_cache *cachep)
3747 {
3748 	int node;
3749 	struct kmem_list3 *l3;
3750 	struct array_cache *new_shared;
3751 	struct array_cache **new_alien = NULL;
3752 
3753 	for_each_online_node(node) {
3754 
3755                 if (use_alien_caches) {
3756                         new_alien = alloc_alien_cache(node, cachep->limit);
3757                         if (!new_alien)
3758                                 goto fail;
3759                 }
3760 
3761 		new_shared = NULL;
3762 		if (cachep->shared) {
3763 			new_shared = alloc_arraycache(node,
3764 				cachep->shared*cachep->batchcount,
3765 					0xbaadf00d);
3766 			if (!new_shared) {
3767 				free_alien_cache(new_alien);
3768 				goto fail;
3769 			}
3770 		}
3771 
3772 		l3 = cachep->nodelists[node];
3773 		if (l3) {
3774 			struct array_cache *shared = l3->shared;
3775 
3776 			spin_lock_irq(&l3->list_lock);
3777 
3778 			if (shared)
3779 				free_block(cachep, shared->entry,
3780 						shared->avail, node);
3781 
3782 			l3->shared = new_shared;
3783 			if (!l3->alien) {
3784 				l3->alien = new_alien;
3785 				new_alien = NULL;
3786 			}
3787 			l3->free_limit = (1 + nr_cpus_node(node)) *
3788 					cachep->batchcount + cachep->num;
3789 			spin_unlock_irq(&l3->list_lock);
3790 			kfree(shared);
3791 			free_alien_cache(new_alien);
3792 			continue;
3793 		}
3794 		l3 = kmalloc_node(sizeof(struct kmem_list3), GFP_KERNEL, node);
3795 		if (!l3) {
3796 			free_alien_cache(new_alien);
3797 			kfree(new_shared);
3798 			goto fail;
3799 		}
3800 
3801 		kmem_list3_init(l3);
3802 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
3803 				((unsigned long)cachep) % REAPTIMEOUT_LIST3;
3804 		l3->shared = new_shared;
3805 		l3->alien = new_alien;
3806 		l3->free_limit = (1 + nr_cpus_node(node)) *
3807 					cachep->batchcount + cachep->num;
3808 		cachep->nodelists[node] = l3;
3809 	}
3810 	return 0;
3811 
3812 fail:
3813 	if (!cachep->next.next) {
3814 		/* Cache is not active yet. Roll back what we did */
3815 		node--;
3816 		while (node >= 0) {
3817 			if (cachep->nodelists[node]) {
3818 				l3 = cachep->nodelists[node];
3819 
3820 				kfree(l3->shared);
3821 				free_alien_cache(l3->alien);
3822 				kfree(l3);
3823 				cachep->nodelists[node] = NULL;
3824 			}
3825 			node--;
3826 		}
3827 	}
3828 	return -ENOMEM;
3829 }
3830 
3831 struct ccupdate_struct {
3832 	struct kmem_cache *cachep;
3833 	struct array_cache *new[NR_CPUS];
3834 };
3835 
do_ccupdate_local(void * info)3836 static void do_ccupdate_local(void *info)
3837 {
3838 	struct ccupdate_struct *new = info;
3839 	struct array_cache *old;
3840 
3841 	check_irq_off();
3842 	old = cpu_cache_get(new->cachep);
3843 
3844 	new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
3845 	new->new[smp_processor_id()] = old;
3846 }
3847 
3848 /* Always called with the cache_chain_mutex held */
do_tune_cpucache(struct kmem_cache * cachep,int limit,int batchcount,int shared)3849 static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
3850 				int batchcount, int shared)
3851 {
3852 	struct ccupdate_struct *new;
3853 	int i;
3854 
3855 	new = kzalloc(sizeof(*new), GFP_KERNEL);
3856 	if (!new)
3857 		return -ENOMEM;
3858 
3859 	for_each_online_cpu(i) {
3860 		new->new[i] = alloc_arraycache(cpu_to_node(i), limit,
3861 						batchcount);
3862 		if (!new->new[i]) {
3863 			for (i--; i >= 0; i--)
3864 				kfree(new->new[i]);
3865 			kfree(new);
3866 			return -ENOMEM;
3867 		}
3868 	}
3869 	new->cachep = cachep;
3870 
3871 	on_each_cpu(do_ccupdate_local, (void *)new, 1);
3872 
3873 	check_irq_on();
3874 	cachep->batchcount = batchcount;
3875 	cachep->limit = limit;
3876 	cachep->shared = shared;
3877 
3878 	for_each_online_cpu(i) {
3879 		struct array_cache *ccold = new->new[i];
3880 		if (!ccold)
3881 			continue;
3882 		spin_lock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3883 		free_block(cachep, ccold->entry, ccold->avail, cpu_to_node(i));
3884 		spin_unlock_irq(&cachep->nodelists[cpu_to_node(i)]->list_lock);
3885 		kfree(ccold);
3886 	}
3887 	kfree(new);
3888 	return alloc_kmemlist(cachep);
3889 }
3890 
3891 /* Called with cache_chain_mutex held always */
enable_cpucache(struct kmem_cache * cachep)3892 static int enable_cpucache(struct kmem_cache *cachep)
3893 {
3894 	int err;
3895 	int limit, shared;
3896 
3897 	/*
3898 	 * The head array serves three purposes:
3899 	 * - create a LIFO ordering, i.e. return objects that are cache-warm
3900 	 * - reduce the number of spinlock operations.
3901 	 * - reduce the number of linked list operations on the slab and
3902 	 *   bufctl chains: array operations are cheaper.
3903 	 * The numbers are guessed, we should auto-tune as described by
3904 	 * Bonwick.
3905 	 */
3906 	if (cachep->buffer_size > 131072)
3907 		limit = 1;
3908 	else if (cachep->buffer_size > PAGE_SIZE)
3909 		limit = 8;
3910 	else if (cachep->buffer_size > 1024)
3911 		limit = 24;
3912 	else if (cachep->buffer_size > 256)
3913 		limit = 54;
3914 	else
3915 		limit = 120;
3916 
3917 	/*
3918 	 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
3919 	 * allocation behaviour: Most allocs on one cpu, most free operations
3920 	 * on another cpu. For these cases, an efficient object passing between
3921 	 * cpus is necessary. This is provided by a shared array. The array
3922 	 * replaces Bonwick's magazine layer.
3923 	 * On uniprocessor, it's functionally equivalent (but less efficient)
3924 	 * to a larger limit. Thus disabled by default.
3925 	 */
3926 	shared = 0;
3927 	if (cachep->buffer_size <= PAGE_SIZE && num_possible_cpus() > 1)
3928 		shared = 8;
3929 
3930 #if DEBUG
3931 	/*
3932 	 * With debugging enabled, large batchcount lead to excessively long
3933 	 * periods with disabled local interrupts. Limit the batchcount
3934 	 */
3935 	if (limit > 32)
3936 		limit = 32;
3937 #endif
3938 	err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared);
3939 	if (err)
3940 		printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
3941 		       cachep->name, -err);
3942 	return err;
3943 }
3944 
3945 /*
3946  * Drain an array if it contains any elements taking the l3 lock only if
3947  * necessary. Note that the l3 listlock also protects the array_cache
3948  * if drain_array() is used on the shared array.
3949  */
drain_array(struct kmem_cache * cachep,struct kmem_list3 * l3,struct array_cache * ac,int force,int node)3950 void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
3951 			 struct array_cache *ac, int force, int node)
3952 {
3953 	int tofree;
3954 
3955 	if (!ac || !ac->avail)
3956 		return;
3957 	if (ac->touched && !force) {
3958 		ac->touched = 0;
3959 	} else {
3960 		spin_lock_irq(&l3->list_lock);
3961 		if (ac->avail) {
3962 			tofree = force ? ac->avail : (ac->limit + 4) / 5;
3963 			if (tofree > ac->avail)
3964 				tofree = (ac->avail + 1) / 2;
3965 			free_block(cachep, ac->entry, tofree, node);
3966 			ac->avail -= tofree;
3967 			memmove(ac->entry, &(ac->entry[tofree]),
3968 				sizeof(void *) * ac->avail);
3969 		}
3970 		spin_unlock_irq(&l3->list_lock);
3971 	}
3972 }
3973 
3974 /**
3975  * cache_reap - Reclaim memory from caches.
3976  * @w: work descriptor
3977  *
3978  * Called from workqueue/eventd every few seconds.
3979  * Purpose:
3980  * - clear the per-cpu caches for this CPU.
3981  * - return freeable pages to the main free memory pool.
3982  *
3983  * If we cannot acquire the cache chain mutex then just give up - we'll try
3984  * again on the next iteration.
3985  */
cache_reap(struct work_struct * w)3986 static void cache_reap(struct work_struct *w)
3987 {
3988 	struct kmem_cache *searchp;
3989 	struct kmem_list3 *l3;
3990 	int node = numa_node_id();
3991 	struct delayed_work *work =
3992 		container_of(w, struct delayed_work, work);
3993 
3994 	if (!mutex_trylock(&cache_chain_mutex))
3995 		/* Give up. Setup the next iteration. */
3996 		goto out;
3997 
3998 	list_for_each_entry(searchp, &cache_chain, next) {
3999 		check_irq_on();
4000 
4001 		/*
4002 		 * We only take the l3 lock if absolutely necessary and we
4003 		 * have established with reasonable certainty that
4004 		 * we can do some work if the lock was obtained.
4005 		 */
4006 		l3 = searchp->nodelists[node];
4007 
4008 		reap_alien(searchp, l3);
4009 
4010 		drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
4011 
4012 		/*
4013 		 * These are racy checks but it does not matter
4014 		 * if we skip one check or scan twice.
4015 		 */
4016 		if (time_after(l3->next_reap, jiffies))
4017 			goto next;
4018 
4019 		l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
4020 
4021 		drain_array(searchp, l3, l3->shared, 0, node);
4022 
4023 		if (l3->free_touched)
4024 			l3->free_touched = 0;
4025 		else {
4026 			int freed;
4027 
4028 			freed = drain_freelist(searchp, l3, (l3->free_limit +
4029 				5 * searchp->num - 1) / (5 * searchp->num));
4030 			STATS_ADD_REAPED(searchp, freed);
4031 		}
4032 next:
4033 		cond_resched();
4034 	}
4035 	check_irq_on();
4036 	mutex_unlock(&cache_chain_mutex);
4037 	next_reap_node();
4038 out:
4039 	/* Set up the next iteration */
4040 	schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
4041 }
4042 
4043 #ifdef CONFIG_SLABINFO
4044 
print_slabinfo_header(struct seq_file * m)4045 static void print_slabinfo_header(struct seq_file *m)
4046 {
4047 	/*
4048 	 * Output format version, so at least we can change it
4049 	 * without _too_ many complaints.
4050 	 */
4051 #if STATS
4052 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
4053 #else
4054 	seq_puts(m, "slabinfo - version: 2.1\n");
4055 #endif
4056 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> "
4057 		 "<objperslab> <pagesperslab>");
4058 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4059 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4060 #if STATS
4061 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
4062 		 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
4063 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
4064 #endif
4065 	seq_putc(m, '\n');
4066 }
4067 
s_start(struct seq_file * m,loff_t * pos)4068 static void *s_start(struct seq_file *m, loff_t *pos)
4069 {
4070 	loff_t n = *pos;
4071 
4072 	mutex_lock(&cache_chain_mutex);
4073 	if (!n)
4074 		print_slabinfo_header(m);
4075 
4076 	return seq_list_start(&cache_chain, *pos);
4077 }
4078 
s_next(struct seq_file * m,void * p,loff_t * pos)4079 static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4080 {
4081 	return seq_list_next(p, &cache_chain, pos);
4082 }
4083 
s_stop(struct seq_file * m,void * p)4084 static void s_stop(struct seq_file *m, void *p)
4085 {
4086 	mutex_unlock(&cache_chain_mutex);
4087 }
4088 
s_show(struct seq_file * m,void * p)4089 static int s_show(struct seq_file *m, void *p)
4090 {
4091 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4092 	struct slab *slabp;
4093 	unsigned long active_objs;
4094 	unsigned long num_objs;
4095 	unsigned long active_slabs = 0;
4096 	unsigned long num_slabs, free_objects = 0, shared_avail = 0;
4097 	const char *name;
4098 	char *error = NULL;
4099 	int node;
4100 	struct kmem_list3 *l3;
4101 
4102 	active_objs = 0;
4103 	num_slabs = 0;
4104 	for_each_online_node(node) {
4105 		l3 = cachep->nodelists[node];
4106 		if (!l3)
4107 			continue;
4108 
4109 		check_irq_on();
4110 		spin_lock_irq(&l3->list_lock);
4111 
4112 		list_for_each_entry(slabp, &l3->slabs_full, list) {
4113 			if (slabp->inuse != cachep->num && !error)
4114 				error = "slabs_full accounting error";
4115 			active_objs += cachep->num;
4116 			active_slabs++;
4117 		}
4118 		list_for_each_entry(slabp, &l3->slabs_partial, list) {
4119 			if (slabp->inuse == cachep->num && !error)
4120 				error = "slabs_partial inuse accounting error";
4121 			if (!slabp->inuse && !error)
4122 				error = "slabs_partial/inuse accounting error";
4123 			active_objs += slabp->inuse;
4124 			active_slabs++;
4125 		}
4126 		list_for_each_entry(slabp, &l3->slabs_free, list) {
4127 			if (slabp->inuse && !error)
4128 				error = "slabs_free/inuse accounting error";
4129 			num_slabs++;
4130 		}
4131 		free_objects += l3->free_objects;
4132 		if (l3->shared)
4133 			shared_avail += l3->shared->avail;
4134 
4135 		spin_unlock_irq(&l3->list_lock);
4136 	}
4137 	num_slabs += active_slabs;
4138 	num_objs = num_slabs * cachep->num;
4139 	if (num_objs - active_objs != free_objects && !error)
4140 		error = "free_objects accounting error";
4141 
4142 	name = cachep->name;
4143 	if (error)
4144 		printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4145 
4146 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
4147 		   name, active_objs, num_objs, cachep->buffer_size,
4148 		   cachep->num, (1 << cachep->gfporder));
4149 	seq_printf(m, " : tunables %4u %4u %4u",
4150 		   cachep->limit, cachep->batchcount, cachep->shared);
4151 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
4152 		   active_slabs, num_slabs, shared_avail);
4153 #if STATS
4154 	{			/* list3 stats */
4155 		unsigned long high = cachep->high_mark;
4156 		unsigned long allocs = cachep->num_allocations;
4157 		unsigned long grown = cachep->grown;
4158 		unsigned long reaped = cachep->reaped;
4159 		unsigned long errors = cachep->errors;
4160 		unsigned long max_freeable = cachep->max_freeable;
4161 		unsigned long node_allocs = cachep->node_allocs;
4162 		unsigned long node_frees = cachep->node_frees;
4163 		unsigned long overflows = cachep->node_overflow;
4164 
4165 		seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu \
4166 				%4lu %4lu %4lu %4lu %4lu", allocs, high, grown,
4167 				reaped, errors, max_freeable, node_allocs,
4168 				node_frees, overflows);
4169 	}
4170 	/* cpu stats */
4171 	{
4172 		unsigned long allochit = atomic_read(&cachep->allochit);
4173 		unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4174 		unsigned long freehit = atomic_read(&cachep->freehit);
4175 		unsigned long freemiss = atomic_read(&cachep->freemiss);
4176 
4177 		seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
4178 			   allochit, allocmiss, freehit, freemiss);
4179 	}
4180 #endif
4181 	seq_putc(m, '\n');
4182 	return 0;
4183 }
4184 
4185 /*
4186  * slabinfo_op - iterator that generates /proc/slabinfo
4187  *
4188  * Output layout:
4189  * cache-name
4190  * num-active-objs
4191  * total-objs
4192  * object size
4193  * num-active-slabs
4194  * total-slabs
4195  * num-pages-per-slab
4196  * + further values on SMP and with statistics enabled
4197  */
4198 
4199 static const struct seq_operations slabinfo_op = {
4200 	.start = s_start,
4201 	.next = s_next,
4202 	.stop = s_stop,
4203 	.show = s_show,
4204 };
4205 
4206 #define MAX_SLABINFO_WRITE 128
4207 /**
4208  * slabinfo_write - Tuning for the slab allocator
4209  * @file: unused
4210  * @buffer: user buffer
4211  * @count: data length
4212  * @ppos: unused
4213  */
slabinfo_write(struct file * file,const char __user * buffer,size_t count,loff_t * ppos)4214 ssize_t slabinfo_write(struct file *file, const char __user * buffer,
4215 		       size_t count, loff_t *ppos)
4216 {
4217 	char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
4218 	int limit, batchcount, shared, res;
4219 	struct kmem_cache *cachep;
4220 
4221 	if (count > MAX_SLABINFO_WRITE)
4222 		return -EINVAL;
4223 	if (copy_from_user(&kbuf, buffer, count))
4224 		return -EFAULT;
4225 	kbuf[MAX_SLABINFO_WRITE] = '\0';
4226 
4227 	tmp = strchr(kbuf, ' ');
4228 	if (!tmp)
4229 		return -EINVAL;
4230 	*tmp = '\0';
4231 	tmp++;
4232 	if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4233 		return -EINVAL;
4234 
4235 	/* Find the cache in the chain of caches. */
4236 	mutex_lock(&cache_chain_mutex);
4237 	res = -EINVAL;
4238 	list_for_each_entry(cachep, &cache_chain, next) {
4239 		if (!strcmp(cachep->name, kbuf)) {
4240 			if (limit < 1 || batchcount < 1 ||
4241 					batchcount > limit || shared < 0) {
4242 				res = 0;
4243 			} else {
4244 				res = do_tune_cpucache(cachep, limit,
4245 						       batchcount, shared);
4246 			}
4247 			break;
4248 		}
4249 	}
4250 	mutex_unlock(&cache_chain_mutex);
4251 	if (res >= 0)
4252 		res = count;
4253 	return res;
4254 }
4255 
slabinfo_open(struct inode * inode,struct file * file)4256 static int slabinfo_open(struct inode *inode, struct file *file)
4257 {
4258 	return seq_open(file, &slabinfo_op);
4259 }
4260 
4261 static const struct file_operations proc_slabinfo_operations = {
4262 	.open		= slabinfo_open,
4263 	.read		= seq_read,
4264 	.write		= slabinfo_write,
4265 	.llseek		= seq_lseek,
4266 	.release	= seq_release,
4267 };
4268 
4269 #ifdef CONFIG_DEBUG_SLAB_LEAK
4270 
leaks_start(struct seq_file * m,loff_t * pos)4271 static void *leaks_start(struct seq_file *m, loff_t *pos)
4272 {
4273 	mutex_lock(&cache_chain_mutex);
4274 	return seq_list_start(&cache_chain, *pos);
4275 }
4276 
add_caller(unsigned long * n,unsigned long v)4277 static inline int add_caller(unsigned long *n, unsigned long v)
4278 {
4279 	unsigned long *p;
4280 	int l;
4281 	if (!v)
4282 		return 1;
4283 	l = n[1];
4284 	p = n + 2;
4285 	while (l) {
4286 		int i = l/2;
4287 		unsigned long *q = p + 2 * i;
4288 		if (*q == v) {
4289 			q[1]++;
4290 			return 1;
4291 		}
4292 		if (*q > v) {
4293 			l = i;
4294 		} else {
4295 			p = q + 2;
4296 			l -= i + 1;
4297 		}
4298 	}
4299 	if (++n[1] == n[0])
4300 		return 0;
4301 	memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4302 	p[0] = v;
4303 	p[1] = 1;
4304 	return 1;
4305 }
4306 
handle_slab(unsigned long * n,struct kmem_cache * c,struct slab * s)4307 static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4308 {
4309 	void *p;
4310 	int i;
4311 	if (n[0] == n[1])
4312 		return;
4313 	for (i = 0, p = s->s_mem; i < c->num; i++, p += c->buffer_size) {
4314 		if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4315 			continue;
4316 		if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4317 			return;
4318 	}
4319 }
4320 
show_symbol(struct seq_file * m,unsigned long address)4321 static void show_symbol(struct seq_file *m, unsigned long address)
4322 {
4323 #ifdef CONFIG_KALLSYMS
4324 	unsigned long offset, size;
4325 	char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
4326 
4327 	if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
4328 		seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
4329 		if (modname[0])
4330 			seq_printf(m, " [%s]", modname);
4331 		return;
4332 	}
4333 #endif
4334 	seq_printf(m, "%p", (void *)address);
4335 }
4336 
leaks_show(struct seq_file * m,void * p)4337 static int leaks_show(struct seq_file *m, void *p)
4338 {
4339 	struct kmem_cache *cachep = list_entry(p, struct kmem_cache, next);
4340 	struct slab *slabp;
4341 	struct kmem_list3 *l3;
4342 	const char *name;
4343 	unsigned long *n = m->private;
4344 	int node;
4345 	int i;
4346 
4347 	if (!(cachep->flags & SLAB_STORE_USER))
4348 		return 0;
4349 	if (!(cachep->flags & SLAB_RED_ZONE))
4350 		return 0;
4351 
4352 	/* OK, we can do it */
4353 
4354 	n[1] = 0;
4355 
4356 	for_each_online_node(node) {
4357 		l3 = cachep->nodelists[node];
4358 		if (!l3)
4359 			continue;
4360 
4361 		check_irq_on();
4362 		spin_lock_irq(&l3->list_lock);
4363 
4364 		list_for_each_entry(slabp, &l3->slabs_full, list)
4365 			handle_slab(n, cachep, slabp);
4366 		list_for_each_entry(slabp, &l3->slabs_partial, list)
4367 			handle_slab(n, cachep, slabp);
4368 		spin_unlock_irq(&l3->list_lock);
4369 	}
4370 	name = cachep->name;
4371 	if (n[0] == n[1]) {
4372 		/* Increase the buffer size */
4373 		mutex_unlock(&cache_chain_mutex);
4374 		m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4375 		if (!m->private) {
4376 			/* Too bad, we are really out */
4377 			m->private = n;
4378 			mutex_lock(&cache_chain_mutex);
4379 			return -ENOMEM;
4380 		}
4381 		*(unsigned long *)m->private = n[0] * 2;
4382 		kfree(n);
4383 		mutex_lock(&cache_chain_mutex);
4384 		/* Now make sure this entry will be retried */
4385 		m->count = m->size;
4386 		return 0;
4387 	}
4388 	for (i = 0; i < n[1]; i++) {
4389 		seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4390 		show_symbol(m, n[2*i+2]);
4391 		seq_putc(m, '\n');
4392 	}
4393 
4394 	return 0;
4395 }
4396 
4397 static const struct seq_operations slabstats_op = {
4398 	.start = leaks_start,
4399 	.next = s_next,
4400 	.stop = s_stop,
4401 	.show = leaks_show,
4402 };
4403 
slabstats_open(struct inode * inode,struct file * file)4404 static int slabstats_open(struct inode *inode, struct file *file)
4405 {
4406 	unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4407 	int ret = -ENOMEM;
4408 	if (n) {
4409 		ret = seq_open(file, &slabstats_op);
4410 		if (!ret) {
4411 			struct seq_file *m = file->private_data;
4412 			*n = PAGE_SIZE / (2 * sizeof(unsigned long));
4413 			m->private = n;
4414 			n = NULL;
4415 		}
4416 		kfree(n);
4417 	}
4418 	return ret;
4419 }
4420 
4421 static const struct file_operations proc_slabstats_operations = {
4422 	.open		= slabstats_open,
4423 	.read		= seq_read,
4424 	.llseek		= seq_lseek,
4425 	.release	= seq_release_private,
4426 };
4427 #endif
4428 
slab_proc_init(void)4429 static int __init slab_proc_init(void)
4430 {
4431 	proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
4432 #ifdef CONFIG_DEBUG_SLAB_LEAK
4433 	proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
4434 #endif
4435 	return 0;
4436 }
4437 module_init(slab_proc_init);
4438 #endif
4439 
4440 /**
4441  * ksize - get the actual amount of memory allocated for a given object
4442  * @objp: Pointer to the object
4443  *
4444  * kmalloc may internally round up allocations and return more memory
4445  * than requested. ksize() can be used to determine the actual amount of
4446  * memory allocated. The caller may use this additional memory, even though
4447  * a smaller amount of memory was initially specified with the kmalloc call.
4448  * The caller must guarantee that objp points to a valid object previously
4449  * allocated with either kmalloc() or kmem_cache_alloc(). The object
4450  * must not be freed during the duration of the call.
4451  */
ksize(const void * objp)4452 size_t ksize(const void *objp)
4453 {
4454 	BUG_ON(!objp);
4455 	if (unlikely(objp == ZERO_SIZE_PTR))
4456 		return 0;
4457 
4458 	return obj_size(virt_to_cache(objp));
4459 }
4460 EXPORT_SYMBOL(ksize);
4461