• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Slab allocator functions that are independent of the allocator strategy
3  *
4  * (C) 2012 Christoph Lameter <cl@linux.com>
5  */
6 #include <linux/slab.h>
7 
8 #include <linux/mm.h>
9 #include <linux/poison.h>
10 #include <linux/interrupt.h>
11 #include <linux/memory.h>
12 #include <linux/compiler.h>
13 #include <linux/module.h>
14 #include <linux/cpu.h>
15 #include <linux/uaccess.h>
16 #include <linux/seq_file.h>
17 #include <linux/proc_fs.h>
18 #include <asm/cacheflush.h>
19 #include <asm/tlbflush.h>
20 #include <asm/page.h>
21 #include <linux/memcontrol.h>
22 
23 #define CREATE_TRACE_POINTS
24 #include <trace/events/kmem.h>
25 
26 #include "slab.h"
27 
28 enum slab_state slab_state;
29 LIST_HEAD(slab_caches);
30 DEFINE_MUTEX(slab_mutex);
31 struct kmem_cache *kmem_cache;
32 
33 /*
34  * Set of flags that will prevent slab merging
35  */
36 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
37 		SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
38 		SLAB_FAILSLAB | SLAB_KASAN)
39 
40 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | SLAB_NOTRACK)
41 
42 /*
43  * Merge control. If this is set then no merging of slab caches will occur.
44  * (Could be removed. This was introduced to pacify the merge skeptics.)
45  */
46 static int slab_nomerge;
47 
setup_slab_nomerge(char * str)48 static int __init setup_slab_nomerge(char *str)
49 {
50 	slab_nomerge = 1;
51 	return 1;
52 }
53 
54 #ifdef CONFIG_SLUB
55 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
56 #endif
57 
58 __setup("slab_nomerge", setup_slab_nomerge);
59 
60 /*
61  * Determine the size of a slab object
62  */
kmem_cache_size(struct kmem_cache * s)63 unsigned int kmem_cache_size(struct kmem_cache *s)
64 {
65 	return s->object_size;
66 }
67 EXPORT_SYMBOL(kmem_cache_size);
68 
69 #ifdef CONFIG_DEBUG_VM
kmem_cache_sanity_check(const char * name,size_t size)70 static int kmem_cache_sanity_check(const char *name, size_t size)
71 {
72 	struct kmem_cache *s = NULL;
73 
74 	if (!name || in_interrupt() || size < sizeof(void *) ||
75 		size > KMALLOC_MAX_SIZE) {
76 		pr_err("kmem_cache_create(%s) integrity check failed\n", name);
77 		return -EINVAL;
78 	}
79 
80 	list_for_each_entry(s, &slab_caches, list) {
81 		char tmp;
82 		int res;
83 
84 		/*
85 		 * This happens when the module gets unloaded and doesn't
86 		 * destroy its slab cache and no-one else reuses the vmalloc
87 		 * area of the module.  Print a warning.
88 		 */
89 		res = probe_kernel_address(s->name, tmp);
90 		if (res) {
91 			pr_err("Slab cache with size %d has lost its name\n",
92 			       s->object_size);
93 			continue;
94 		}
95 	}
96 
97 	WARN_ON(strchr(name, ' '));	/* It confuses parsers */
98 	return 0;
99 }
100 #else
kmem_cache_sanity_check(const char * name,size_t size)101 static inline int kmem_cache_sanity_check(const char *name, size_t size)
102 {
103 	return 0;
104 }
105 #endif
106 
__kmem_cache_free_bulk(struct kmem_cache * s,size_t nr,void ** p)107 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
108 {
109 	size_t i;
110 
111 	for (i = 0; i < nr; i++)
112 		kmem_cache_free(s, p[i]);
113 }
114 
__kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t nr,void ** p)115 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
116 								void **p)
117 {
118 	size_t i;
119 
120 	for (i = 0; i < nr; i++) {
121 		void *x = p[i] = kmem_cache_alloc(s, flags);
122 		if (!x) {
123 			__kmem_cache_free_bulk(s, i, p);
124 			return 0;
125 		}
126 	}
127 	return i;
128 }
129 
130 #ifdef CONFIG_MEMCG_KMEM
slab_init_memcg_params(struct kmem_cache * s)131 void slab_init_memcg_params(struct kmem_cache *s)
132 {
133 	s->memcg_params.is_root_cache = true;
134 	INIT_LIST_HEAD(&s->memcg_params.list);
135 	RCU_INIT_POINTER(s->memcg_params.memcg_caches, NULL);
136 }
137 
init_memcg_params(struct kmem_cache * s,struct mem_cgroup * memcg,struct kmem_cache * root_cache)138 static int init_memcg_params(struct kmem_cache *s,
139 		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
140 {
141 	struct memcg_cache_array *arr;
142 
143 	if (memcg) {
144 		s->memcg_params.is_root_cache = false;
145 		s->memcg_params.memcg = memcg;
146 		s->memcg_params.root_cache = root_cache;
147 		return 0;
148 	}
149 
150 	slab_init_memcg_params(s);
151 
152 	if (!memcg_nr_cache_ids)
153 		return 0;
154 
155 	arr = kzalloc(sizeof(struct memcg_cache_array) +
156 		      memcg_nr_cache_ids * sizeof(void *),
157 		      GFP_KERNEL);
158 	if (!arr)
159 		return -ENOMEM;
160 
161 	RCU_INIT_POINTER(s->memcg_params.memcg_caches, arr);
162 	return 0;
163 }
164 
destroy_memcg_params(struct kmem_cache * s)165 static void destroy_memcg_params(struct kmem_cache *s)
166 {
167 	if (is_root_cache(s))
168 		kfree(rcu_access_pointer(s->memcg_params.memcg_caches));
169 }
170 
update_memcg_params(struct kmem_cache * s,int new_array_size)171 static int update_memcg_params(struct kmem_cache *s, int new_array_size)
172 {
173 	struct memcg_cache_array *old, *new;
174 
175 	if (!is_root_cache(s))
176 		return 0;
177 
178 	new = kzalloc(sizeof(struct memcg_cache_array) +
179 		      new_array_size * sizeof(void *), GFP_KERNEL);
180 	if (!new)
181 		return -ENOMEM;
182 
183 	old = rcu_dereference_protected(s->memcg_params.memcg_caches,
184 					lockdep_is_held(&slab_mutex));
185 	if (old)
186 		memcpy(new->entries, old->entries,
187 		       memcg_nr_cache_ids * sizeof(void *));
188 
189 	rcu_assign_pointer(s->memcg_params.memcg_caches, new);
190 	if (old)
191 		kfree_rcu(old, rcu);
192 	return 0;
193 }
194 
memcg_update_all_caches(int num_memcgs)195 int memcg_update_all_caches(int num_memcgs)
196 {
197 	struct kmem_cache *s;
198 	int ret = 0;
199 
200 	mutex_lock(&slab_mutex);
201 	list_for_each_entry(s, &slab_caches, list) {
202 		ret = update_memcg_params(s, num_memcgs);
203 		/*
204 		 * Instead of freeing the memory, we'll just leave the caches
205 		 * up to this point in an updated state.
206 		 */
207 		if (ret)
208 			break;
209 	}
210 	mutex_unlock(&slab_mutex);
211 	return ret;
212 }
213 #else
init_memcg_params(struct kmem_cache * s,struct mem_cgroup * memcg,struct kmem_cache * root_cache)214 static inline int init_memcg_params(struct kmem_cache *s,
215 		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
216 {
217 	return 0;
218 }
219 
destroy_memcg_params(struct kmem_cache * s)220 static inline void destroy_memcg_params(struct kmem_cache *s)
221 {
222 }
223 #endif /* CONFIG_MEMCG_KMEM */
224 
225 /*
226  * Find a mergeable slab cache
227  */
slab_unmergeable(struct kmem_cache * s)228 int slab_unmergeable(struct kmem_cache *s)
229 {
230 	if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
231 		return 1;
232 
233 	if (!is_root_cache(s))
234 		return 1;
235 
236 	if (s->ctor)
237 		return 1;
238 
239 	/*
240 	 * We may have set a slab to be unmergeable during bootstrap.
241 	 */
242 	if (s->refcount < 0)
243 		return 1;
244 
245 	return 0;
246 }
247 
find_mergeable(size_t size,size_t align,unsigned long flags,const char * name,void (* ctor)(void *))248 struct kmem_cache *find_mergeable(size_t size, size_t align,
249 		unsigned long flags, const char *name, void (*ctor)(void *))
250 {
251 	struct kmem_cache *s;
252 
253 	if (slab_nomerge)
254 		return NULL;
255 
256 	if (ctor)
257 		return NULL;
258 
259 	size = ALIGN(size, sizeof(void *));
260 	align = calculate_alignment(flags, align, size);
261 	size = ALIGN(size, align);
262 	flags = kmem_cache_flags(size, flags, name, NULL);
263 
264 	if (flags & SLAB_NEVER_MERGE)
265 		return NULL;
266 
267 	list_for_each_entry_reverse(s, &slab_caches, list) {
268 		if (slab_unmergeable(s))
269 			continue;
270 
271 		if (size > s->size)
272 			continue;
273 
274 		if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
275 			continue;
276 		/*
277 		 * Check if alignment is compatible.
278 		 * Courtesy of Adrian Drzewiecki
279 		 */
280 		if ((s->size & ~(align - 1)) != s->size)
281 			continue;
282 
283 		if (s->size - size >= sizeof(void *))
284 			continue;
285 
286 		if (IS_ENABLED(CONFIG_SLAB) && align &&
287 			(align > s->align || s->align % align))
288 			continue;
289 
290 		return s;
291 	}
292 	return NULL;
293 }
294 
295 /*
296  * Figure out what the alignment of the objects will be given a set of
297  * flags, a user specified alignment and the size of the objects.
298  */
calculate_alignment(unsigned long flags,unsigned long align,unsigned long size)299 unsigned long calculate_alignment(unsigned long flags,
300 		unsigned long align, unsigned long size)
301 {
302 	/*
303 	 * If the user wants hardware cache aligned objects then follow that
304 	 * suggestion if the object is sufficiently large.
305 	 *
306 	 * The hardware cache alignment cannot override the specified
307 	 * alignment though. If that is greater then use it.
308 	 */
309 	if (flags & SLAB_HWCACHE_ALIGN) {
310 		unsigned long ralign = cache_line_size();
311 		while (size <= ralign / 2)
312 			ralign /= 2;
313 		align = max(align, ralign);
314 	}
315 
316 	if (align < ARCH_SLAB_MINALIGN)
317 		align = ARCH_SLAB_MINALIGN;
318 
319 	return ALIGN(align, sizeof(void *));
320 }
321 
create_cache(const char * name,size_t object_size,size_t size,size_t align,unsigned long flags,void (* ctor)(void *),struct mem_cgroup * memcg,struct kmem_cache * root_cache)322 static struct kmem_cache *create_cache(const char *name,
323 		size_t object_size, size_t size, size_t align,
324 		unsigned long flags, void (*ctor)(void *),
325 		struct mem_cgroup *memcg, struct kmem_cache *root_cache)
326 {
327 	struct kmem_cache *s;
328 	int err;
329 
330 	err = -ENOMEM;
331 	s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
332 	if (!s)
333 		goto out;
334 
335 	s->name = name;
336 	s->object_size = object_size;
337 	s->size = size;
338 	s->align = align;
339 	s->ctor = ctor;
340 
341 	err = init_memcg_params(s, memcg, root_cache);
342 	if (err)
343 		goto out_free_cache;
344 
345 	err = __kmem_cache_create(s, flags);
346 	if (err)
347 		goto out_free_cache;
348 
349 	s->refcount = 1;
350 	list_add(&s->list, &slab_caches);
351 out:
352 	if (err)
353 		return ERR_PTR(err);
354 	return s;
355 
356 out_free_cache:
357 	destroy_memcg_params(s);
358 	kmem_cache_free(kmem_cache, s);
359 	goto out;
360 }
361 
362 /*
363  * kmem_cache_create - Create a cache.
364  * @name: A string which is used in /proc/slabinfo to identify this cache.
365  * @size: The size of objects to be created in this cache.
366  * @align: The required alignment for the objects.
367  * @flags: SLAB flags
368  * @ctor: A constructor for the objects.
369  *
370  * Returns a ptr to the cache on success, NULL on failure.
371  * Cannot be called within a interrupt, but can be interrupted.
372  * The @ctor is run when new pages are allocated by the cache.
373  *
374  * The flags are
375  *
376  * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
377  * to catch references to uninitialised memory.
378  *
379  * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
380  * for buffer overruns.
381  *
382  * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
383  * cacheline.  This can be beneficial if you're counting cycles as closely
384  * as davem.
385  */
386 struct kmem_cache *
kmem_cache_create(const char * name,size_t size,size_t align,unsigned long flags,void (* ctor)(void *))387 kmem_cache_create(const char *name, size_t size, size_t align,
388 		  unsigned long flags, void (*ctor)(void *))
389 {
390 	struct kmem_cache *s = NULL;
391 	const char *cache_name;
392 	int err;
393 
394 	get_online_cpus();
395 	get_online_mems();
396 	memcg_get_cache_ids();
397 
398 	mutex_lock(&slab_mutex);
399 
400 	err = kmem_cache_sanity_check(name, size);
401 	if (err) {
402 		goto out_unlock;
403 	}
404 
405 	/*
406 	 * Some allocators will constraint the set of valid flags to a subset
407 	 * of all flags. We expect them to define CACHE_CREATE_MASK in this
408 	 * case, and we'll just provide them with a sanitized version of the
409 	 * passed flags.
410 	 */
411 	flags &= CACHE_CREATE_MASK;
412 
413 	s = __kmem_cache_alias(name, size, align, flags, ctor);
414 	if (s)
415 		goto out_unlock;
416 
417 	cache_name = kstrdup_const(name, GFP_KERNEL);
418 	if (!cache_name) {
419 		err = -ENOMEM;
420 		goto out_unlock;
421 	}
422 
423 	s = create_cache(cache_name, size, size,
424 			 calculate_alignment(flags, align, size),
425 			 flags, ctor, NULL, NULL);
426 	if (IS_ERR(s)) {
427 		err = PTR_ERR(s);
428 		kfree_const(cache_name);
429 	}
430 
431 out_unlock:
432 	mutex_unlock(&slab_mutex);
433 
434 	memcg_put_cache_ids();
435 	put_online_mems();
436 	put_online_cpus();
437 
438 	if (err) {
439 		if (flags & SLAB_PANIC)
440 			panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
441 				name, err);
442 		else {
443 			printk(KERN_WARNING "kmem_cache_create(%s) failed with error %d",
444 				name, err);
445 			dump_stack();
446 		}
447 		return NULL;
448 	}
449 	return s;
450 }
451 EXPORT_SYMBOL(kmem_cache_create);
452 
shutdown_cache(struct kmem_cache * s,struct list_head * release,bool * need_rcu_barrier)453 static int shutdown_cache(struct kmem_cache *s,
454 		struct list_head *release, bool *need_rcu_barrier)
455 {
456 	/* free asan quarantined objects */
457 	kasan_cache_shutdown(s);
458 
459 	if (__kmem_cache_shutdown(s) != 0)
460 		return -EBUSY;
461 
462 	if (s->flags & SLAB_DESTROY_BY_RCU)
463 		*need_rcu_barrier = true;
464 
465 	list_move(&s->list, release);
466 	return 0;
467 }
468 
release_caches(struct list_head * release,bool need_rcu_barrier)469 static void release_caches(struct list_head *release, bool need_rcu_barrier)
470 {
471 	struct kmem_cache *s, *s2;
472 
473 	if (need_rcu_barrier)
474 		rcu_barrier();
475 
476 	list_for_each_entry_safe(s, s2, release, list) {
477 #ifdef SLAB_SUPPORTS_SYSFS
478 		sysfs_slab_remove(s);
479 #else
480 		slab_kmem_cache_release(s);
481 #endif
482 	}
483 }
484 
485 #ifdef CONFIG_MEMCG_KMEM
486 /*
487  * memcg_create_kmem_cache - Create a cache for a memory cgroup.
488  * @memcg: The memory cgroup the new cache is for.
489  * @root_cache: The parent of the new cache.
490  *
491  * This function attempts to create a kmem cache that will serve allocation
492  * requests going from @memcg to @root_cache. The new cache inherits properties
493  * from its parent.
494  */
memcg_create_kmem_cache(struct mem_cgroup * memcg,struct kmem_cache * root_cache)495 void memcg_create_kmem_cache(struct mem_cgroup *memcg,
496 			     struct kmem_cache *root_cache)
497 {
498 	static char memcg_name_buf[NAME_MAX + 1]; /* protected by slab_mutex */
499 	struct cgroup_subsys_state *css = &memcg->css;
500 	struct memcg_cache_array *arr;
501 	struct kmem_cache *s = NULL;
502 	char *cache_name;
503 	int idx;
504 
505 	get_online_cpus();
506 	get_online_mems();
507 
508 	mutex_lock(&slab_mutex);
509 
510 	/*
511 	 * The memory cgroup could have been deactivated while the cache
512 	 * creation work was pending.
513 	 */
514 	if (!memcg_kmem_is_active(memcg))
515 		goto out_unlock;
516 
517 	idx = memcg_cache_id(memcg);
518 	arr = rcu_dereference_protected(root_cache->memcg_params.memcg_caches,
519 					lockdep_is_held(&slab_mutex));
520 
521 	/*
522 	 * Since per-memcg caches are created asynchronously on first
523 	 * allocation (see memcg_kmem_get_cache()), several threads can try to
524 	 * create the same cache, but only one of them may succeed.
525 	 */
526 	if (arr->entries[idx])
527 		goto out_unlock;
528 
529 	cgroup_name(css->cgroup, memcg_name_buf, sizeof(memcg_name_buf));
530 	cache_name = kasprintf(GFP_KERNEL, "%s(%llu:%s)", root_cache->name,
531 			       css->serial_nr, memcg_name_buf);
532 	if (!cache_name)
533 		goto out_unlock;
534 
535 	s = create_cache(cache_name, root_cache->object_size,
536 			 root_cache->size, root_cache->align,
537 			 root_cache->flags, root_cache->ctor,
538 			 memcg, root_cache);
539 	/*
540 	 * If we could not create a memcg cache, do not complain, because
541 	 * that's not critical at all as we can always proceed with the root
542 	 * cache.
543 	 */
544 	if (IS_ERR(s)) {
545 		kfree(cache_name);
546 		goto out_unlock;
547 	}
548 
549 	list_add(&s->memcg_params.list, &root_cache->memcg_params.list);
550 
551 	/*
552 	 * Since readers won't lock (see cache_from_memcg_idx()), we need a
553 	 * barrier here to ensure nobody will see the kmem_cache partially
554 	 * initialized.
555 	 */
556 	smp_wmb();
557 	arr->entries[idx] = s;
558 
559 out_unlock:
560 	mutex_unlock(&slab_mutex);
561 
562 	put_online_mems();
563 	put_online_cpus();
564 }
565 
memcg_deactivate_kmem_caches(struct mem_cgroup * memcg)566 void memcg_deactivate_kmem_caches(struct mem_cgroup *memcg)
567 {
568 	int idx;
569 	struct memcg_cache_array *arr;
570 	struct kmem_cache *s, *c;
571 
572 	idx = memcg_cache_id(memcg);
573 
574 	get_online_cpus();
575 	get_online_mems();
576 
577 	mutex_lock(&slab_mutex);
578 	list_for_each_entry(s, &slab_caches, list) {
579 		if (!is_root_cache(s))
580 			continue;
581 
582 		arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
583 						lockdep_is_held(&slab_mutex));
584 		c = arr->entries[idx];
585 		if (!c)
586 			continue;
587 
588 		__kmem_cache_shrink(c, true);
589 		arr->entries[idx] = NULL;
590 	}
591 	mutex_unlock(&slab_mutex);
592 
593 	put_online_mems();
594 	put_online_cpus();
595 }
596 
__shutdown_memcg_cache(struct kmem_cache * s,struct list_head * release,bool * need_rcu_barrier)597 static int __shutdown_memcg_cache(struct kmem_cache *s,
598 		struct list_head *release, bool *need_rcu_barrier)
599 {
600 	BUG_ON(is_root_cache(s));
601 
602 	if (shutdown_cache(s, release, need_rcu_barrier))
603 		return -EBUSY;
604 
605 	list_del(&s->memcg_params.list);
606 	return 0;
607 }
608 
memcg_destroy_kmem_caches(struct mem_cgroup * memcg)609 void memcg_destroy_kmem_caches(struct mem_cgroup *memcg)
610 {
611 	LIST_HEAD(release);
612 	bool need_rcu_barrier = false;
613 	struct kmem_cache *s, *s2;
614 
615 	get_online_cpus();
616 	get_online_mems();
617 
618 	mutex_lock(&slab_mutex);
619 	list_for_each_entry_safe(s, s2, &slab_caches, list) {
620 		if (is_root_cache(s) || s->memcg_params.memcg != memcg)
621 			continue;
622 		/*
623 		 * The cgroup is about to be freed and therefore has no charges
624 		 * left. Hence, all its caches must be empty by now.
625 		 */
626 		BUG_ON(__shutdown_memcg_cache(s, &release, &need_rcu_barrier));
627 	}
628 	mutex_unlock(&slab_mutex);
629 
630 	put_online_mems();
631 	put_online_cpus();
632 
633 	release_caches(&release, need_rcu_barrier);
634 }
635 
shutdown_memcg_caches(struct kmem_cache * s,struct list_head * release,bool * need_rcu_barrier)636 static int shutdown_memcg_caches(struct kmem_cache *s,
637 		struct list_head *release, bool *need_rcu_barrier)
638 {
639 	struct memcg_cache_array *arr;
640 	struct kmem_cache *c, *c2;
641 	LIST_HEAD(busy);
642 	int i;
643 
644 	BUG_ON(!is_root_cache(s));
645 
646 	/*
647 	 * First, shutdown active caches, i.e. caches that belong to online
648 	 * memory cgroups.
649 	 */
650 	arr = rcu_dereference_protected(s->memcg_params.memcg_caches,
651 					lockdep_is_held(&slab_mutex));
652 	for_each_memcg_cache_index(i) {
653 		c = arr->entries[i];
654 		if (!c)
655 			continue;
656 		if (__shutdown_memcg_cache(c, release, need_rcu_barrier))
657 			/*
658 			 * The cache still has objects. Move it to a temporary
659 			 * list so as not to try to destroy it for a second
660 			 * time while iterating over inactive caches below.
661 			 */
662 			list_move(&c->memcg_params.list, &busy);
663 		else
664 			/*
665 			 * The cache is empty and will be destroyed soon. Clear
666 			 * the pointer to it in the memcg_caches array so that
667 			 * it will never be accessed even if the root cache
668 			 * stays alive.
669 			 */
670 			arr->entries[i] = NULL;
671 	}
672 
673 	/*
674 	 * Second, shutdown all caches left from memory cgroups that are now
675 	 * offline.
676 	 */
677 	list_for_each_entry_safe(c, c2, &s->memcg_params.list,
678 				 memcg_params.list)
679 		__shutdown_memcg_cache(c, release, need_rcu_barrier);
680 
681 	list_splice(&busy, &s->memcg_params.list);
682 
683 	/*
684 	 * A cache being destroyed must be empty. In particular, this means
685 	 * that all per memcg caches attached to it must be empty too.
686 	 */
687 	if (!list_empty(&s->memcg_params.list))
688 		return -EBUSY;
689 	return 0;
690 }
691 #else
shutdown_memcg_caches(struct kmem_cache * s,struct list_head * release,bool * need_rcu_barrier)692 static inline int shutdown_memcg_caches(struct kmem_cache *s,
693 		struct list_head *release, bool *need_rcu_barrier)
694 {
695 	return 0;
696 }
697 #endif /* CONFIG_MEMCG_KMEM */
698 
slab_kmem_cache_release(struct kmem_cache * s)699 void slab_kmem_cache_release(struct kmem_cache *s)
700 {
701 	destroy_memcg_params(s);
702 	kfree_const(s->name);
703 	kmem_cache_free(kmem_cache, s);
704 }
705 
kmem_cache_destroy(struct kmem_cache * s)706 void kmem_cache_destroy(struct kmem_cache *s)
707 {
708 	LIST_HEAD(release);
709 	bool need_rcu_barrier = false;
710 	int err;
711 
712 	if (unlikely(!s))
713 		return;
714 
715 	get_online_cpus();
716 	get_online_mems();
717 
718 	mutex_lock(&slab_mutex);
719 
720 	s->refcount--;
721 	if (s->refcount)
722 		goto out_unlock;
723 
724 	err = shutdown_memcg_caches(s, &release, &need_rcu_barrier);
725 	if (!err)
726 		err = shutdown_cache(s, &release, &need_rcu_barrier);
727 
728 	if (err) {
729 		pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
730 		       s->name);
731 		dump_stack();
732 	}
733 out_unlock:
734 	mutex_unlock(&slab_mutex);
735 
736 	put_online_mems();
737 	put_online_cpus();
738 
739 	release_caches(&release, need_rcu_barrier);
740 }
741 EXPORT_SYMBOL(kmem_cache_destroy);
742 
743 /**
744  * kmem_cache_shrink - Shrink a cache.
745  * @cachep: The cache to shrink.
746  *
747  * Releases as many slabs as possible for a cache.
748  * To help debugging, a zero exit status indicates all slabs were released.
749  */
kmem_cache_shrink(struct kmem_cache * cachep)750 int kmem_cache_shrink(struct kmem_cache *cachep)
751 {
752 	int ret;
753 
754 	get_online_cpus();
755 	get_online_mems();
756 	kasan_cache_shrink(cachep);
757 	ret = __kmem_cache_shrink(cachep, false);
758 	put_online_mems();
759 	put_online_cpus();
760 	return ret;
761 }
762 EXPORT_SYMBOL(kmem_cache_shrink);
763 
slab_is_available(void)764 bool slab_is_available(void)
765 {
766 	return slab_state >= UP;
767 }
768 
769 #ifndef CONFIG_SLOB
770 /* Create a cache during boot when no slab services are available yet */
create_boot_cache(struct kmem_cache * s,const char * name,size_t size,unsigned long flags)771 void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t size,
772 		unsigned long flags)
773 {
774 	int err;
775 
776 	s->name = name;
777 	s->size = s->object_size = size;
778 	s->align = calculate_alignment(flags, ARCH_KMALLOC_MINALIGN, size);
779 
780 	slab_init_memcg_params(s);
781 
782 	err = __kmem_cache_create(s, flags);
783 
784 	if (err)
785 		panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
786 					name, size, err);
787 
788 	s->refcount = -1;	/* Exempt from merging for now */
789 }
790 
create_kmalloc_cache(const char * name,size_t size,unsigned long flags)791 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
792 				unsigned long flags)
793 {
794 	struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
795 
796 	if (!s)
797 		panic("Out of memory when creating slab %s\n", name);
798 
799 	create_boot_cache(s, name, size, flags);
800 	list_add(&s->list, &slab_caches);
801 	s->refcount = 1;
802 	return s;
803 }
804 
805 struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
806 EXPORT_SYMBOL(kmalloc_caches);
807 
808 #ifdef CONFIG_ZONE_DMA
809 struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
810 EXPORT_SYMBOL(kmalloc_dma_caches);
811 #endif
812 
813 /*
814  * Conversion table for small slabs sizes / 8 to the index in the
815  * kmalloc array. This is necessary for slabs < 192 since we have non power
816  * of two cache sizes there. The size of larger slabs can be determined using
817  * fls.
818  */
819 static s8 size_index[24] = {
820 	3,	/* 8 */
821 	4,	/* 16 */
822 	5,	/* 24 */
823 	5,	/* 32 */
824 	6,	/* 40 */
825 	6,	/* 48 */
826 	6,	/* 56 */
827 	6,	/* 64 */
828 	1,	/* 72 */
829 	1,	/* 80 */
830 	1,	/* 88 */
831 	1,	/* 96 */
832 	7,	/* 104 */
833 	7,	/* 112 */
834 	7,	/* 120 */
835 	7,	/* 128 */
836 	2,	/* 136 */
837 	2,	/* 144 */
838 	2,	/* 152 */
839 	2,	/* 160 */
840 	2,	/* 168 */
841 	2,	/* 176 */
842 	2,	/* 184 */
843 	2	/* 192 */
844 };
845 
size_index_elem(size_t bytes)846 static inline int size_index_elem(size_t bytes)
847 {
848 	return (bytes - 1) / 8;
849 }
850 
851 /*
852  * Find the kmem_cache structure that serves a given size of
853  * allocation
854  */
kmalloc_slab(size_t size,gfp_t flags)855 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
856 {
857 	int index;
858 
859 	if (unlikely(size > KMALLOC_MAX_SIZE)) {
860 		WARN_ON_ONCE(!(flags & __GFP_NOWARN));
861 		return NULL;
862 	}
863 
864 	if (size <= 192) {
865 		if (!size)
866 			return ZERO_SIZE_PTR;
867 
868 		index = size_index[size_index_elem(size)];
869 	} else
870 		index = fls(size - 1);
871 
872 #ifdef CONFIG_ZONE_DMA
873 	if (unlikely((flags & GFP_DMA)))
874 		return kmalloc_dma_caches[index];
875 
876 #endif
877 	return kmalloc_caches[index];
878 }
879 
880 /*
881  * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
882  * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
883  * kmalloc-67108864.
884  */
885 static struct {
886 	const char *name;
887 	unsigned long size;
888 } const kmalloc_info[] __initconst = {
889 	{NULL,                      0},		{"kmalloc-96",             96},
890 	{"kmalloc-192",           192},		{"kmalloc-8",               8},
891 	{"kmalloc-16",             16},		{"kmalloc-32",             32},
892 	{"kmalloc-64",             64},		{"kmalloc-128",           128},
893 	{"kmalloc-256",           256},		{"kmalloc-512",           512},
894 	{"kmalloc-1024",         1024},		{"kmalloc-2048",         2048},
895 	{"kmalloc-4096",         4096},		{"kmalloc-8192",         8192},
896 	{"kmalloc-16384",       16384},		{"kmalloc-32768",       32768},
897 	{"kmalloc-65536",       65536},		{"kmalloc-131072",     131072},
898 	{"kmalloc-262144",     262144},		{"kmalloc-524288",     524288},
899 	{"kmalloc-1048576",   1048576},		{"kmalloc-2097152",   2097152},
900 	{"kmalloc-4194304",   4194304},		{"kmalloc-8388608",   8388608},
901 	{"kmalloc-16777216", 16777216},		{"kmalloc-33554432", 33554432},
902 	{"kmalloc-67108864", 67108864}
903 };
904 
905 /*
906  * Patch up the size_index table if we have strange large alignment
907  * requirements for the kmalloc array. This is only the case for
908  * MIPS it seems. The standard arches will not generate any code here.
909  *
910  * Largest permitted alignment is 256 bytes due to the way we
911  * handle the index determination for the smaller caches.
912  *
913  * Make sure that nothing crazy happens if someone starts tinkering
914  * around with ARCH_KMALLOC_MINALIGN
915  */
setup_kmalloc_cache_index_table(void)916 void __init setup_kmalloc_cache_index_table(void)
917 {
918 	int i;
919 
920 	BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
921 		(KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
922 
923 	for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
924 		int elem = size_index_elem(i);
925 
926 		if (elem >= ARRAY_SIZE(size_index))
927 			break;
928 		size_index[elem] = KMALLOC_SHIFT_LOW;
929 	}
930 
931 	if (KMALLOC_MIN_SIZE >= 64) {
932 		/*
933 		 * The 96 byte size cache is not used if the alignment
934 		 * is 64 byte.
935 		 */
936 		for (i = 64 + 8; i <= 96; i += 8)
937 			size_index[size_index_elem(i)] = 7;
938 
939 	}
940 
941 	if (KMALLOC_MIN_SIZE >= 128) {
942 		/*
943 		 * The 192 byte sized cache is not used if the alignment
944 		 * is 128 byte. Redirect kmalloc to use the 256 byte cache
945 		 * instead.
946 		 */
947 		for (i = 128 + 8; i <= 192; i += 8)
948 			size_index[size_index_elem(i)] = 8;
949 	}
950 }
951 
new_kmalloc_cache(int idx,unsigned long flags)952 static void __init new_kmalloc_cache(int idx, unsigned long flags)
953 {
954 	kmalloc_caches[idx] = create_kmalloc_cache(kmalloc_info[idx].name,
955 					kmalloc_info[idx].size, flags);
956 }
957 
958 /*
959  * Create the kmalloc array. Some of the regular kmalloc arrays
960  * may already have been created because they were needed to
961  * enable allocations for slab creation.
962  */
create_kmalloc_caches(unsigned long flags)963 void __init create_kmalloc_caches(unsigned long flags)
964 {
965 	int i;
966 
967 	for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
968 		if (!kmalloc_caches[i])
969 			new_kmalloc_cache(i, flags);
970 
971 		/*
972 		 * Caches that are not of the two-to-the-power-of size.
973 		 * These have to be created immediately after the
974 		 * earlier power of two caches
975 		 */
976 		if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
977 			new_kmalloc_cache(1, flags);
978 		if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
979 			new_kmalloc_cache(2, flags);
980 	}
981 
982 	/* Kmalloc array is now usable */
983 	slab_state = UP;
984 
985 #ifdef CONFIG_ZONE_DMA
986 	for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
987 		struct kmem_cache *s = kmalloc_caches[i];
988 
989 		if (s) {
990 			int size = kmalloc_size(i);
991 			char *n = kasprintf(GFP_NOWAIT,
992 				 "dma-kmalloc-%d", size);
993 
994 			BUG_ON(!n);
995 			kmalloc_dma_caches[i] = create_kmalloc_cache(n,
996 				size, SLAB_CACHE_DMA | flags);
997 		}
998 	}
999 #endif
1000 }
1001 #endif /* !CONFIG_SLOB */
1002 
1003 /*
1004  * To avoid unnecessary overhead, we pass through large allocation requests
1005  * directly to the page allocator. We use __GFP_COMP, because we will need to
1006  * know the allocation order to free the pages properly in kfree.
1007  */
kmalloc_order(size_t size,gfp_t flags,unsigned int order)1008 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
1009 {
1010 	void *ret;
1011 	struct page *page;
1012 
1013 	flags |= __GFP_COMP;
1014 	page = alloc_kmem_pages(flags, order);
1015 	ret = page ? page_address(page) : NULL;
1016 	kmemleak_alloc(ret, size, 1, flags);
1017 	kasan_kmalloc_large(ret, size, flags);
1018 	return ret;
1019 }
1020 EXPORT_SYMBOL(kmalloc_order);
1021 
1022 #ifdef CONFIG_TRACING
kmalloc_order_trace(size_t size,gfp_t flags,unsigned int order)1023 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
1024 {
1025 	void *ret = kmalloc_order(size, flags, order);
1026 	trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
1027 	return ret;
1028 }
1029 EXPORT_SYMBOL(kmalloc_order_trace);
1030 #endif
1031 
1032 #ifdef CONFIG_SLABINFO
1033 
1034 #ifdef CONFIG_SLAB
1035 #define SLABINFO_RIGHTS (S_IWUSR | S_IRUSR)
1036 #else
1037 #define SLABINFO_RIGHTS S_IRUSR
1038 #endif
1039 
print_slabinfo_header(struct seq_file * m)1040 static void print_slabinfo_header(struct seq_file *m)
1041 {
1042 	/*
1043 	 * Output format version, so at least we can change it
1044 	 * without _too_ many complaints.
1045 	 */
1046 #ifdef CONFIG_DEBUG_SLAB
1047 	seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1048 #else
1049 	seq_puts(m, "slabinfo - version: 2.1\n");
1050 #endif
1051 	seq_puts(m, "# name            <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1052 	seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1053 	seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1054 #ifdef CONFIG_DEBUG_SLAB
1055 	seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
1056 	seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1057 #endif
1058 	seq_putc(m, '\n');
1059 }
1060 
slab_start(struct seq_file * m,loff_t * pos)1061 void *slab_start(struct seq_file *m, loff_t *pos)
1062 {
1063 	mutex_lock(&slab_mutex);
1064 	return seq_list_start(&slab_caches, *pos);
1065 }
1066 
slab_next(struct seq_file * m,void * p,loff_t * pos)1067 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1068 {
1069 	return seq_list_next(p, &slab_caches, pos);
1070 }
1071 
slab_stop(struct seq_file * m,void * p)1072 void slab_stop(struct seq_file *m, void *p)
1073 {
1074 	mutex_unlock(&slab_mutex);
1075 }
1076 
1077 static void
memcg_accumulate_slabinfo(struct kmem_cache * s,struct slabinfo * info)1078 memcg_accumulate_slabinfo(struct kmem_cache *s, struct slabinfo *info)
1079 {
1080 	struct kmem_cache *c;
1081 	struct slabinfo sinfo;
1082 
1083 	if (!is_root_cache(s))
1084 		return;
1085 
1086 	for_each_memcg_cache(c, s) {
1087 		memset(&sinfo, 0, sizeof(sinfo));
1088 		get_slabinfo(c, &sinfo);
1089 
1090 		info->active_slabs += sinfo.active_slabs;
1091 		info->num_slabs += sinfo.num_slabs;
1092 		info->shared_avail += sinfo.shared_avail;
1093 		info->active_objs += sinfo.active_objs;
1094 		info->num_objs += sinfo.num_objs;
1095 	}
1096 }
1097 
cache_show(struct kmem_cache * s,struct seq_file * m)1098 static void cache_show(struct kmem_cache *s, struct seq_file *m)
1099 {
1100 	struct slabinfo sinfo;
1101 
1102 	memset(&sinfo, 0, sizeof(sinfo));
1103 	get_slabinfo(s, &sinfo);
1104 
1105 	memcg_accumulate_slabinfo(s, &sinfo);
1106 
1107 	seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1108 		   cache_name(s), sinfo.active_objs, sinfo.num_objs, s->size,
1109 		   sinfo.objects_per_slab, (1 << sinfo.cache_order));
1110 
1111 	seq_printf(m, " : tunables %4u %4u %4u",
1112 		   sinfo.limit, sinfo.batchcount, sinfo.shared);
1113 	seq_printf(m, " : slabdata %6lu %6lu %6lu",
1114 		   sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1115 	slabinfo_show_stats(m, s);
1116 	seq_putc(m, '\n');
1117 }
1118 
slab_show(struct seq_file * m,void * p)1119 static int slab_show(struct seq_file *m, void *p)
1120 {
1121 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1122 
1123 	if (p == slab_caches.next)
1124 		print_slabinfo_header(m);
1125 	if (is_root_cache(s))
1126 		cache_show(s, m);
1127 	return 0;
1128 }
1129 
1130 #ifdef CONFIG_MEMCG_KMEM
memcg_slab_show(struct seq_file * m,void * p)1131 int memcg_slab_show(struct seq_file *m, void *p)
1132 {
1133 	struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1134 	struct mem_cgroup *memcg = mem_cgroup_from_css(seq_css(m));
1135 
1136 	if (p == slab_caches.next)
1137 		print_slabinfo_header(m);
1138 	if (!is_root_cache(s) && s->memcg_params.memcg == memcg)
1139 		cache_show(s, m);
1140 	return 0;
1141 }
1142 #endif
1143 
1144 /*
1145  * slabinfo_op - iterator that generates /proc/slabinfo
1146  *
1147  * Output layout:
1148  * cache-name
1149  * num-active-objs
1150  * total-objs
1151  * object size
1152  * num-active-slabs
1153  * total-slabs
1154  * num-pages-per-slab
1155  * + further values on SMP and with statistics enabled
1156  */
1157 static const struct seq_operations slabinfo_op = {
1158 	.start = slab_start,
1159 	.next = slab_next,
1160 	.stop = slab_stop,
1161 	.show = slab_show,
1162 };
1163 
slabinfo_open(struct inode * inode,struct file * file)1164 static int slabinfo_open(struct inode *inode, struct file *file)
1165 {
1166 	return seq_open(file, &slabinfo_op);
1167 }
1168 
1169 static const struct file_operations proc_slabinfo_operations = {
1170 	.open		= slabinfo_open,
1171 	.read		= seq_read,
1172 	.write          = slabinfo_write,
1173 	.llseek		= seq_lseek,
1174 	.release	= seq_release,
1175 };
1176 
slab_proc_init(void)1177 static int __init slab_proc_init(void)
1178 {
1179 	proc_create("slabinfo", SLABINFO_RIGHTS, NULL,
1180 						&proc_slabinfo_operations);
1181 	return 0;
1182 }
1183 module_init(slab_proc_init);
1184 #endif /* CONFIG_SLABINFO */
1185 
__do_krealloc(const void * p,size_t new_size,gfp_t flags)1186 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1187 					   gfp_t flags)
1188 {
1189 	void *ret;
1190 	size_t ks = 0;
1191 
1192 	if (p)
1193 		ks = ksize(p);
1194 
1195 	if (ks >= new_size) {
1196 		kasan_krealloc((void *)p, new_size, flags);
1197 		return (void *)p;
1198 	}
1199 
1200 	ret = kmalloc_track_caller(new_size, flags);
1201 	if (ret && p)
1202 		memcpy(ret, p, ks);
1203 
1204 	return ret;
1205 }
1206 
1207 /**
1208  * __krealloc - like krealloc() but don't free @p.
1209  * @p: object to reallocate memory for.
1210  * @new_size: how many bytes of memory are required.
1211  * @flags: the type of memory to allocate.
1212  *
1213  * This function is like krealloc() except it never frees the originally
1214  * allocated buffer. Use this if you don't want to free the buffer immediately
1215  * like, for example, with RCU.
1216  */
__krealloc(const void * p,size_t new_size,gfp_t flags)1217 void *__krealloc(const void *p, size_t new_size, gfp_t flags)
1218 {
1219 	if (unlikely(!new_size))
1220 		return ZERO_SIZE_PTR;
1221 
1222 	return __do_krealloc(p, new_size, flags);
1223 
1224 }
1225 EXPORT_SYMBOL(__krealloc);
1226 
1227 /**
1228  * krealloc - reallocate memory. The contents will remain unchanged.
1229  * @p: object to reallocate memory for.
1230  * @new_size: how many bytes of memory are required.
1231  * @flags: the type of memory to allocate.
1232  *
1233  * The contents of the object pointed to are preserved up to the
1234  * lesser of the new and old sizes.  If @p is %NULL, krealloc()
1235  * behaves exactly like kmalloc().  If @new_size is 0 and @p is not a
1236  * %NULL pointer, the object pointed to is freed.
1237  */
krealloc(const void * p,size_t new_size,gfp_t flags)1238 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1239 {
1240 	void *ret;
1241 
1242 	if (unlikely(!new_size)) {
1243 		kfree(p);
1244 		return ZERO_SIZE_PTR;
1245 	}
1246 
1247 	ret = __do_krealloc(p, new_size, flags);
1248 	if (ret && p != ret)
1249 		kfree(p);
1250 
1251 	return ret;
1252 }
1253 EXPORT_SYMBOL(krealloc);
1254 
1255 /**
1256  * kzfree - like kfree but zero memory
1257  * @p: object to free memory of
1258  *
1259  * The memory of the object @p points to is zeroed before freed.
1260  * If @p is %NULL, kzfree() does nothing.
1261  *
1262  * Note: this function zeroes the whole allocated buffer which can be a good
1263  * deal bigger than the requested buffer size passed to kmalloc(). So be
1264  * careful when using this function in performance sensitive code.
1265  */
kzfree(const void * p)1266 void kzfree(const void *p)
1267 {
1268 	size_t ks;
1269 	void *mem = (void *)p;
1270 
1271 	if (unlikely(ZERO_OR_NULL_PTR(mem)))
1272 		return;
1273 	ks = ksize(mem);
1274 	memzero_explicit(mem, ks);
1275 	kfree(mem);
1276 }
1277 EXPORT_SYMBOL(kzfree);
1278 
1279 /* Tracepoints definitions. */
1280 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1281 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1282 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1283 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1284 EXPORT_TRACEPOINT_SYMBOL(kfree);
1285 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1286