1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Slab allocator functions that are independent of the allocator strategy
4 *
5 * (C) 2012 Christoph Lameter <cl@linux.com>
6 */
7 #include <linux/slab.h>
8
9 #include <linux/mm.h>
10 #include <linux/poison.h>
11 #include <linux/interrupt.h>
12 #include <linux/memory.h>
13 #include <linux/cache.h>
14 #include <linux/compiler.h>
15 #include <linux/module.h>
16 #include <linux/cpu.h>
17 #include <linux/uaccess.h>
18 #include <linux/seq_file.h>
19 #include <linux/proc_fs.h>
20 #include <linux/debugfs.h>
21 #include <asm/cacheflush.h>
22 #include <asm/tlbflush.h>
23 #include <asm/page.h>
24 #include <linux/memcontrol.h>
25
26 #define CREATE_TRACE_POINTS
27 #include <trace/events/kmem.h>
28
29 #include "internal.h"
30
31 #include "slab.h"
32
33 enum slab_state slab_state;
34 LIST_HEAD(slab_caches);
35 DEFINE_MUTEX(slab_mutex);
36 struct kmem_cache *kmem_cache;
37
38 #ifdef CONFIG_HARDENED_USERCOPY
39 bool usercopy_fallback __ro_after_init =
40 IS_ENABLED(CONFIG_HARDENED_USERCOPY_FALLBACK);
41 module_param(usercopy_fallback, bool, 0400);
42 MODULE_PARM_DESC(usercopy_fallback,
43 "WARN instead of reject usercopy whitelist violations");
44 #endif
45
46 static LIST_HEAD(slab_caches_to_rcu_destroy);
47 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
48 static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
49 slab_caches_to_rcu_destroy_workfn);
50
51 /*
52 * Set of flags that will prevent slab merging
53 */
54 #define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
55 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
56 SLAB_FAILSLAB | SLAB_KASAN)
57
58 #define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
59 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
60
61 /*
62 * Merge control. If this is set then no merging of slab caches will occur.
63 */
64 static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
65
setup_slab_nomerge(char * str)66 static int __init setup_slab_nomerge(char *str)
67 {
68 slab_nomerge = true;
69 return 1;
70 }
71
72 #ifdef CONFIG_SLUB
73 __setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
74 #endif
75
76 __setup("slab_nomerge", setup_slab_nomerge);
77
78 /*
79 * Determine the size of a slab object
80 */
kmem_cache_size(struct kmem_cache * s)81 unsigned int kmem_cache_size(struct kmem_cache *s)
82 {
83 return s->object_size;
84 }
85 EXPORT_SYMBOL(kmem_cache_size);
86
87 #ifdef CONFIG_DEBUG_VM
kmem_cache_sanity_check(const char * name,unsigned int size)88 static int kmem_cache_sanity_check(const char *name, unsigned int size)
89 {
90 if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
91 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
92 return -EINVAL;
93 }
94
95 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
96 return 0;
97 }
98 #else
kmem_cache_sanity_check(const char * name,unsigned int size)99 static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
100 {
101 return 0;
102 }
103 #endif
104
__kmem_cache_free_bulk(struct kmem_cache * s,size_t nr,void ** p)105 void __kmem_cache_free_bulk(struct kmem_cache *s, size_t nr, void **p)
106 {
107 size_t i;
108
109 for (i = 0; i < nr; i++) {
110 if (s)
111 kmem_cache_free(s, p[i]);
112 else
113 kfree(p[i]);
114 }
115 }
116
__kmem_cache_alloc_bulk(struct kmem_cache * s,gfp_t flags,size_t nr,void ** p)117 int __kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t nr,
118 void **p)
119 {
120 size_t i;
121
122 for (i = 0; i < nr; i++) {
123 void *x = p[i] = kmem_cache_alloc(s, flags);
124 if (!x) {
125 __kmem_cache_free_bulk(s, i, p);
126 return 0;
127 }
128 }
129 return i;
130 }
131
132 /*
133 * Figure out what the alignment of the objects will be given a set of
134 * flags, a user specified alignment and the size of the objects.
135 */
calculate_alignment(slab_flags_t flags,unsigned int align,unsigned int size)136 static unsigned int calculate_alignment(slab_flags_t flags,
137 unsigned int align, unsigned int size)
138 {
139 /*
140 * If the user wants hardware cache aligned objects then follow that
141 * suggestion if the object is sufficiently large.
142 *
143 * The hardware cache alignment cannot override the specified
144 * alignment though. If that is greater then use it.
145 */
146 if (flags & SLAB_HWCACHE_ALIGN) {
147 unsigned int ralign;
148
149 ralign = cache_line_size();
150 while (size <= ralign / 2)
151 ralign /= 2;
152 align = max(align, ralign);
153 }
154
155 if (align < ARCH_SLAB_MINALIGN)
156 align = ARCH_SLAB_MINALIGN;
157
158 return ALIGN(align, sizeof(void *));
159 }
160
161 /*
162 * Find a mergeable slab cache
163 */
slab_unmergeable(struct kmem_cache * s)164 int slab_unmergeable(struct kmem_cache *s)
165 {
166 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
167 return 1;
168
169 if (s->ctor)
170 return 1;
171
172 if (s->usersize)
173 return 1;
174
175 /*
176 * We may have set a slab to be unmergeable during bootstrap.
177 */
178 if (s->refcount < 0)
179 return 1;
180
181 return 0;
182 }
183
find_mergeable(unsigned int size,unsigned int align,slab_flags_t flags,const char * name,void (* ctor)(void *))184 struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
185 slab_flags_t flags, const char *name, void (*ctor)(void *))
186 {
187 struct kmem_cache *s;
188
189 if (slab_nomerge)
190 return NULL;
191
192 if (ctor)
193 return NULL;
194
195 size = ALIGN(size, sizeof(void *));
196 align = calculate_alignment(flags, align, size);
197 size = ALIGN(size, align);
198 flags = kmem_cache_flags(size, flags, name);
199
200 if (flags & SLAB_NEVER_MERGE)
201 return NULL;
202
203 list_for_each_entry_reverse(s, &slab_caches, list) {
204 if (slab_unmergeable(s))
205 continue;
206
207 if (size > s->size)
208 continue;
209
210 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
211 continue;
212 /*
213 * Check if alignment is compatible.
214 * Courtesy of Adrian Drzewiecki
215 */
216 if ((s->size & ~(align - 1)) != s->size)
217 continue;
218
219 if (s->size - size >= sizeof(void *))
220 continue;
221
222 if (IS_ENABLED(CONFIG_SLAB) && align &&
223 (align > s->align || s->align % align))
224 continue;
225
226 return s;
227 }
228 return NULL;
229 }
230
create_cache(const char * name,unsigned int object_size,unsigned int align,slab_flags_t flags,unsigned int useroffset,unsigned int usersize,void (* ctor)(void *),struct kmem_cache * root_cache)231 static struct kmem_cache *create_cache(const char *name,
232 unsigned int object_size, unsigned int align,
233 slab_flags_t flags, unsigned int useroffset,
234 unsigned int usersize, void (*ctor)(void *),
235 struct kmem_cache *root_cache)
236 {
237 struct kmem_cache *s;
238 int err;
239
240 if (WARN_ON(useroffset + usersize > object_size))
241 useroffset = usersize = 0;
242
243 err = -ENOMEM;
244 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
245 if (!s)
246 goto out;
247
248 s->name = name;
249 s->size = s->object_size = object_size;
250 s->align = align;
251 s->ctor = ctor;
252 s->useroffset = useroffset;
253 s->usersize = usersize;
254
255 err = __kmem_cache_create(s, flags);
256 if (err)
257 goto out_free_cache;
258
259 s->refcount = 1;
260 list_add(&s->list, &slab_caches);
261 out:
262 if (err)
263 return ERR_PTR(err);
264 return s;
265
266 out_free_cache:
267 kmem_cache_free(kmem_cache, s);
268 goto out;
269 }
270
271 /**
272 * kmem_cache_create_usercopy - Create a cache with a region suitable
273 * for copying to userspace
274 * @name: A string which is used in /proc/slabinfo to identify this cache.
275 * @size: The size of objects to be created in this cache.
276 * @align: The required alignment for the objects.
277 * @flags: SLAB flags
278 * @useroffset: Usercopy region offset
279 * @usersize: Usercopy region size
280 * @ctor: A constructor for the objects.
281 *
282 * Cannot be called within a interrupt, but can be interrupted.
283 * The @ctor is run when new pages are allocated by the cache.
284 *
285 * The flags are
286 *
287 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
288 * to catch references to uninitialised memory.
289 *
290 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
291 * for buffer overruns.
292 *
293 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
294 * cacheline. This can be beneficial if you're counting cycles as closely
295 * as davem.
296 *
297 * Return: a pointer to the cache on success, NULL on failure.
298 */
299 struct kmem_cache *
kmem_cache_create_usercopy(const char * name,unsigned int size,unsigned int align,slab_flags_t flags,unsigned int useroffset,unsigned int usersize,void (* ctor)(void *))300 kmem_cache_create_usercopy(const char *name,
301 unsigned int size, unsigned int align,
302 slab_flags_t flags,
303 unsigned int useroffset, unsigned int usersize,
304 void (*ctor)(void *))
305 {
306 struct kmem_cache *s = NULL;
307 const char *cache_name;
308 int err;
309
310 get_online_cpus();
311 get_online_mems();
312
313 mutex_lock(&slab_mutex);
314
315 err = kmem_cache_sanity_check(name, size);
316 if (err) {
317 goto out_unlock;
318 }
319
320 /* Refuse requests with allocator specific flags */
321 if (flags & ~SLAB_FLAGS_PERMITTED) {
322 err = -EINVAL;
323 goto out_unlock;
324 }
325
326 /*
327 * Some allocators will constraint the set of valid flags to a subset
328 * of all flags. We expect them to define CACHE_CREATE_MASK in this
329 * case, and we'll just provide them with a sanitized version of the
330 * passed flags.
331 */
332 flags &= CACHE_CREATE_MASK;
333
334 /* Fail closed on bad usersize of useroffset values. */
335 if (WARN_ON(!usersize && useroffset) ||
336 WARN_ON(size < usersize || size - usersize < useroffset))
337 usersize = useroffset = 0;
338
339 if (!usersize)
340 s = __kmem_cache_alias(name, size, align, flags, ctor);
341 if (s)
342 goto out_unlock;
343
344 cache_name = kstrdup_const(name, GFP_KERNEL);
345 if (!cache_name) {
346 err = -ENOMEM;
347 goto out_unlock;
348 }
349
350 s = create_cache(cache_name, size,
351 calculate_alignment(flags, align, size),
352 flags, useroffset, usersize, ctor, NULL);
353 if (IS_ERR(s)) {
354 err = PTR_ERR(s);
355 kfree_const(cache_name);
356 }
357
358 out_unlock:
359 mutex_unlock(&slab_mutex);
360
361 put_online_mems();
362 put_online_cpus();
363
364 if (err) {
365 if (flags & SLAB_PANIC)
366 panic("kmem_cache_create: Failed to create slab '%s'. Error %d\n",
367 name, err);
368 else {
369 pr_warn("kmem_cache_create(%s) failed with error %d\n",
370 name, err);
371 dump_stack();
372 }
373 return NULL;
374 }
375 return s;
376 }
377 EXPORT_SYMBOL(kmem_cache_create_usercopy);
378
379 /**
380 * kmem_cache_create - Create a cache.
381 * @name: A string which is used in /proc/slabinfo to identify this cache.
382 * @size: The size of objects to be created in this cache.
383 * @align: The required alignment for the objects.
384 * @flags: SLAB flags
385 * @ctor: A constructor for the objects.
386 *
387 * Cannot be called within a interrupt, but can be interrupted.
388 * The @ctor is run when new pages are allocated by the cache.
389 *
390 * The flags are
391 *
392 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
393 * to catch references to uninitialised memory.
394 *
395 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
396 * for buffer overruns.
397 *
398 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
399 * cacheline. This can be beneficial if you're counting cycles as closely
400 * as davem.
401 *
402 * Return: a pointer to the cache on success, NULL on failure.
403 */
404 struct kmem_cache *
kmem_cache_create(const char * name,unsigned int size,unsigned int align,slab_flags_t flags,void (* ctor)(void *))405 kmem_cache_create(const char *name, unsigned int size, unsigned int align,
406 slab_flags_t flags, void (*ctor)(void *))
407 {
408 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
409 ctor);
410 }
411 EXPORT_SYMBOL(kmem_cache_create);
412
slab_caches_to_rcu_destroy_workfn(struct work_struct * work)413 static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
414 {
415 LIST_HEAD(to_destroy);
416 struct kmem_cache *s, *s2;
417
418 /*
419 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
420 * @slab_caches_to_rcu_destroy list. The slab pages are freed
421 * through RCU and the associated kmem_cache are dereferenced
422 * while freeing the pages, so the kmem_caches should be freed only
423 * after the pending RCU operations are finished. As rcu_barrier()
424 * is a pretty slow operation, we batch all pending destructions
425 * asynchronously.
426 */
427 mutex_lock(&slab_mutex);
428 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
429 mutex_unlock(&slab_mutex);
430
431 if (list_empty(&to_destroy))
432 return;
433
434 rcu_barrier();
435
436 list_for_each_entry_safe(s, s2, &to_destroy, list) {
437 #ifdef SLAB_SUPPORTS_SYSFS
438 sysfs_slab_release(s);
439 #else
440 slab_kmem_cache_release(s);
441 #endif
442 }
443 }
444
shutdown_cache(struct kmem_cache * s)445 static int shutdown_cache(struct kmem_cache *s)
446 {
447 /* free asan quarantined objects */
448 kasan_cache_shutdown(s);
449
450 if (__kmem_cache_shutdown(s) != 0)
451 return -EBUSY;
452
453 list_del(&s->list);
454
455 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
456 #ifdef SLAB_SUPPORTS_SYSFS
457 sysfs_slab_unlink(s);
458 #endif
459 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
460 schedule_work(&slab_caches_to_rcu_destroy_work);
461 } else {
462 #ifdef SLAB_SUPPORTS_SYSFS
463 sysfs_slab_unlink(s);
464 sysfs_slab_release(s);
465 #else
466 slab_kmem_cache_release(s);
467 #endif
468 }
469
470 return 0;
471 }
472
slab_kmem_cache_release(struct kmem_cache * s)473 void slab_kmem_cache_release(struct kmem_cache *s)
474 {
475 __kmem_cache_release(s);
476 kfree_const(s->name);
477 kmem_cache_free(kmem_cache, s);
478 }
479
kmem_cache_destroy(struct kmem_cache * s)480 void kmem_cache_destroy(struct kmem_cache *s)
481 {
482 int err;
483
484 if (unlikely(!s))
485 return;
486
487 get_online_cpus();
488 get_online_mems();
489
490 mutex_lock(&slab_mutex);
491
492 s->refcount--;
493 if (s->refcount)
494 goto out_unlock;
495
496 err = shutdown_cache(s);
497 if (err) {
498 pr_err("kmem_cache_destroy %s: Slab cache still has objects\n",
499 s->name);
500 dump_stack();
501 }
502 out_unlock:
503 mutex_unlock(&slab_mutex);
504
505 put_online_mems();
506 put_online_cpus();
507 }
508 EXPORT_SYMBOL(kmem_cache_destroy);
509
510 /**
511 * kmem_cache_shrink - Shrink a cache.
512 * @cachep: The cache to shrink.
513 *
514 * Releases as many slabs as possible for a cache.
515 * To help debugging, a zero exit status indicates all slabs were released.
516 *
517 * Return: %0 if all slabs were released, non-zero otherwise
518 */
kmem_cache_shrink(struct kmem_cache * cachep)519 int kmem_cache_shrink(struct kmem_cache *cachep)
520 {
521 int ret;
522
523 get_online_cpus();
524 get_online_mems();
525 kasan_cache_shrink(cachep);
526 ret = __kmem_cache_shrink(cachep);
527 put_online_mems();
528 put_online_cpus();
529 return ret;
530 }
531 EXPORT_SYMBOL(kmem_cache_shrink);
532
slab_is_available(void)533 bool slab_is_available(void)
534 {
535 return slab_state >= UP;
536 }
537
538 #ifndef CONFIG_SLOB
539 /* Create a cache during boot when no slab services are available yet */
create_boot_cache(struct kmem_cache * s,const char * name,unsigned int size,slab_flags_t flags,unsigned int useroffset,unsigned int usersize)540 void __init create_boot_cache(struct kmem_cache *s, const char *name,
541 unsigned int size, slab_flags_t flags,
542 unsigned int useroffset, unsigned int usersize)
543 {
544 int err;
545 unsigned int align = ARCH_KMALLOC_MINALIGN;
546
547 s->name = name;
548 s->size = s->object_size = size;
549
550 /*
551 * For power of two sizes, guarantee natural alignment for kmalloc
552 * caches, regardless of SL*B debugging options.
553 */
554 if (is_power_of_2(size))
555 align = max(align, size);
556 s->align = calculate_alignment(flags, align, size);
557
558 s->useroffset = useroffset;
559 s->usersize = usersize;
560
561 err = __kmem_cache_create(s, flags);
562
563 if (err)
564 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
565 name, size, err);
566
567 s->refcount = -1; /* Exempt from merging for now */
568 }
569
create_kmalloc_cache(const char * name,unsigned int size,slab_flags_t flags,unsigned int useroffset,unsigned int usersize)570 struct kmem_cache *__init create_kmalloc_cache(const char *name,
571 unsigned int size, slab_flags_t flags,
572 unsigned int useroffset, unsigned int usersize)
573 {
574 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
575
576 if (!s)
577 panic("Out of memory when creating slab %s\n", name);
578
579 create_boot_cache(s, name, size, flags, useroffset, usersize);
580 list_add(&s->list, &slab_caches);
581 s->refcount = 1;
582 return s;
583 }
584
585 struct kmem_cache *
586 kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
587 { /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
588 EXPORT_SYMBOL(kmalloc_caches);
589
590 /*
591 * Conversion table for small slabs sizes / 8 to the index in the
592 * kmalloc array. This is necessary for slabs < 192 since we have non power
593 * of two cache sizes there. The size of larger slabs can be determined using
594 * fls.
595 */
596 static u8 size_index[24] __ro_after_init = {
597 3, /* 8 */
598 4, /* 16 */
599 5, /* 24 */
600 5, /* 32 */
601 6, /* 40 */
602 6, /* 48 */
603 6, /* 56 */
604 6, /* 64 */
605 1, /* 72 */
606 1, /* 80 */
607 1, /* 88 */
608 1, /* 96 */
609 7, /* 104 */
610 7, /* 112 */
611 7, /* 120 */
612 7, /* 128 */
613 2, /* 136 */
614 2, /* 144 */
615 2, /* 152 */
616 2, /* 160 */
617 2, /* 168 */
618 2, /* 176 */
619 2, /* 184 */
620 2 /* 192 */
621 };
622
size_index_elem(unsigned int bytes)623 static inline unsigned int size_index_elem(unsigned int bytes)
624 {
625 return (bytes - 1) / 8;
626 }
627
628 /*
629 * Find the kmem_cache structure that serves a given size of
630 * allocation
631 */
kmalloc_slab(size_t size,gfp_t flags)632 struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
633 {
634 unsigned int index;
635
636 if (size <= 192) {
637 if (!size)
638 return ZERO_SIZE_PTR;
639
640 index = size_index[size_index_elem(size)];
641 } else {
642 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
643 return NULL;
644 index = fls(size - 1);
645 }
646
647 return kmalloc_caches[kmalloc_type(flags)][index];
648 }
649
650 #ifdef CONFIG_ZONE_DMA
651 #define INIT_KMALLOC_INFO(__size, __short_size) \
652 { \
653 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
654 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
655 .name[KMALLOC_DMA] = "dma-kmalloc-" #__short_size, \
656 .size = __size, \
657 }
658 #else
659 #define INIT_KMALLOC_INFO(__size, __short_size) \
660 { \
661 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
662 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
663 .size = __size, \
664 }
665 #endif
666
667 /*
668 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
669 * kmalloc_index() supports up to 2^26=64MB, so the final entry of the table is
670 * kmalloc-67108864.
671 */
672 const struct kmalloc_info_struct kmalloc_info[] __initconst = {
673 INIT_KMALLOC_INFO(0, 0),
674 INIT_KMALLOC_INFO(96, 96),
675 INIT_KMALLOC_INFO(192, 192),
676 INIT_KMALLOC_INFO(8, 8),
677 INIT_KMALLOC_INFO(16, 16),
678 INIT_KMALLOC_INFO(32, 32),
679 INIT_KMALLOC_INFO(64, 64),
680 INIT_KMALLOC_INFO(128, 128),
681 INIT_KMALLOC_INFO(256, 256),
682 INIT_KMALLOC_INFO(512, 512),
683 INIT_KMALLOC_INFO(1024, 1k),
684 INIT_KMALLOC_INFO(2048, 2k),
685 INIT_KMALLOC_INFO(4096, 4k),
686 INIT_KMALLOC_INFO(8192, 8k),
687 INIT_KMALLOC_INFO(16384, 16k),
688 INIT_KMALLOC_INFO(32768, 32k),
689 INIT_KMALLOC_INFO(65536, 64k),
690 INIT_KMALLOC_INFO(131072, 128k),
691 INIT_KMALLOC_INFO(262144, 256k),
692 INIT_KMALLOC_INFO(524288, 512k),
693 INIT_KMALLOC_INFO(1048576, 1M),
694 INIT_KMALLOC_INFO(2097152, 2M),
695 INIT_KMALLOC_INFO(4194304, 4M),
696 INIT_KMALLOC_INFO(8388608, 8M),
697 INIT_KMALLOC_INFO(16777216, 16M),
698 INIT_KMALLOC_INFO(33554432, 32M),
699 INIT_KMALLOC_INFO(67108864, 64M)
700 };
701
702 /*
703 * Patch up the size_index table if we have strange large alignment
704 * requirements for the kmalloc array. This is only the case for
705 * MIPS it seems. The standard arches will not generate any code here.
706 *
707 * Largest permitted alignment is 256 bytes due to the way we
708 * handle the index determination for the smaller caches.
709 *
710 * Make sure that nothing crazy happens if someone starts tinkering
711 * around with ARCH_KMALLOC_MINALIGN
712 */
setup_kmalloc_cache_index_table(void)713 void __init setup_kmalloc_cache_index_table(void)
714 {
715 unsigned int i;
716
717 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
718 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
719
720 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
721 unsigned int elem = size_index_elem(i);
722
723 if (elem >= ARRAY_SIZE(size_index))
724 break;
725 size_index[elem] = KMALLOC_SHIFT_LOW;
726 }
727
728 if (KMALLOC_MIN_SIZE >= 64) {
729 /*
730 * The 96 byte size cache is not used if the alignment
731 * is 64 byte.
732 */
733 for (i = 64 + 8; i <= 96; i += 8)
734 size_index[size_index_elem(i)] = 7;
735
736 }
737
738 if (KMALLOC_MIN_SIZE >= 128) {
739 /*
740 * The 192 byte sized cache is not used if the alignment
741 * is 128 byte. Redirect kmalloc to use the 256 byte cache
742 * instead.
743 */
744 for (i = 128 + 8; i <= 192; i += 8)
745 size_index[size_index_elem(i)] = 8;
746 }
747 }
748
749 static void __init
new_kmalloc_cache(int idx,enum kmalloc_cache_type type,slab_flags_t flags)750 new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
751 {
752 if (type == KMALLOC_RECLAIM)
753 flags |= SLAB_RECLAIM_ACCOUNT;
754
755 kmalloc_caches[type][idx] = create_kmalloc_cache(
756 kmalloc_info[idx].name[type],
757 kmalloc_info[idx].size, flags, 0,
758 kmalloc_info[idx].size);
759 }
760
761 /*
762 * Create the kmalloc array. Some of the regular kmalloc arrays
763 * may already have been created because they were needed to
764 * enable allocations for slab creation.
765 */
create_kmalloc_caches(slab_flags_t flags)766 void __init create_kmalloc_caches(slab_flags_t flags)
767 {
768 int i;
769 enum kmalloc_cache_type type;
770
771 for (type = KMALLOC_NORMAL; type <= KMALLOC_RECLAIM; type++) {
772 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
773 if (!kmalloc_caches[type][i])
774 new_kmalloc_cache(i, type, flags);
775
776 /*
777 * Caches that are not of the two-to-the-power-of size.
778 * These have to be created immediately after the
779 * earlier power of two caches
780 */
781 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
782 !kmalloc_caches[type][1])
783 new_kmalloc_cache(1, type, flags);
784 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
785 !kmalloc_caches[type][2])
786 new_kmalloc_cache(2, type, flags);
787 }
788 }
789
790 /* Kmalloc array is now usable */
791 slab_state = UP;
792
793 #ifdef CONFIG_ZONE_DMA
794 for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
795 struct kmem_cache *s = kmalloc_caches[KMALLOC_NORMAL][i];
796
797 if (s) {
798 kmalloc_caches[KMALLOC_DMA][i] = create_kmalloc_cache(
799 kmalloc_info[i].name[KMALLOC_DMA],
800 kmalloc_info[i].size,
801 SLAB_CACHE_DMA | flags, 0,
802 kmalloc_info[i].size);
803 }
804 }
805 #endif
806 }
807 #endif /* !CONFIG_SLOB */
808
kmalloc_fix_flags(gfp_t flags)809 gfp_t kmalloc_fix_flags(gfp_t flags)
810 {
811 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
812
813 flags &= ~GFP_SLAB_BUG_MASK;
814 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
815 invalid_mask, &invalid_mask, flags, &flags);
816 dump_stack();
817
818 return flags;
819 }
820
821 /*
822 * To avoid unnecessary overhead, we pass through large allocation requests
823 * directly to the page allocator. We use __GFP_COMP, because we will need to
824 * know the allocation order to free the pages properly in kfree.
825 */
kmalloc_order(size_t size,gfp_t flags,unsigned int order)826 void *kmalloc_order(size_t size, gfp_t flags, unsigned int order)
827 {
828 void *ret = NULL;
829 struct page *page;
830
831 if (unlikely(flags & GFP_SLAB_BUG_MASK))
832 flags = kmalloc_fix_flags(flags);
833
834 flags |= __GFP_COMP;
835 page = alloc_pages(flags, order);
836 if (likely(page)) {
837 ret = page_address(page);
838 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
839 PAGE_SIZE << order);
840 }
841 ret = kasan_kmalloc_large(ret, size, flags);
842 /* As ret might get tagged, call kmemleak hook after KASAN. */
843 kmemleak_alloc(ret, size, 1, flags);
844 return ret;
845 }
846 EXPORT_SYMBOL(kmalloc_order);
847
848 #ifdef CONFIG_TRACING
kmalloc_order_trace(size_t size,gfp_t flags,unsigned int order)849 void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
850 {
851 void *ret = kmalloc_order(size, flags, order);
852 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << order, flags);
853 return ret;
854 }
855 EXPORT_SYMBOL(kmalloc_order_trace);
856 #endif
857
858 #ifdef CONFIG_SLAB_FREELIST_RANDOM
859 /* Randomize a generic freelist */
freelist_randomize(struct rnd_state * state,unsigned int * list,unsigned int count)860 static void freelist_randomize(struct rnd_state *state, unsigned int *list,
861 unsigned int count)
862 {
863 unsigned int rand;
864 unsigned int i;
865
866 for (i = 0; i < count; i++)
867 list[i] = i;
868
869 /* Fisher-Yates shuffle */
870 for (i = count - 1; i > 0; i--) {
871 rand = prandom_u32_state(state);
872 rand %= (i + 1);
873 swap(list[i], list[rand]);
874 }
875 }
876
877 /* Create a random sequence per cache */
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)878 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
879 gfp_t gfp)
880 {
881 struct rnd_state state;
882
883 if (count < 2 || cachep->random_seq)
884 return 0;
885
886 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
887 if (!cachep->random_seq)
888 return -ENOMEM;
889
890 /* Get best entropy at this stage of boot */
891 prandom_seed_state(&state, get_random_long());
892
893 freelist_randomize(&state, cachep->random_seq, count);
894 return 0;
895 }
896
897 /* Destroy the per-cache random freelist sequence */
cache_random_seq_destroy(struct kmem_cache * cachep)898 void cache_random_seq_destroy(struct kmem_cache *cachep)
899 {
900 kfree(cachep->random_seq);
901 cachep->random_seq = NULL;
902 }
903 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
904
905 #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
906 #ifdef CONFIG_SLAB
907 #define SLABINFO_RIGHTS (0600)
908 #else
909 #define SLABINFO_RIGHTS (0400)
910 #endif
911
print_slabinfo_header(struct seq_file * m)912 static void print_slabinfo_header(struct seq_file *m)
913 {
914 /*
915 * Output format version, so at least we can change it
916 * without _too_ many complaints.
917 */
918 #ifdef CONFIG_DEBUG_SLAB
919 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
920 #else
921 seq_puts(m, "slabinfo - version: 2.1\n");
922 #endif
923 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
924 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
925 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
926 #ifdef CONFIG_DEBUG_SLAB
927 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
928 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
929 #endif
930 seq_putc(m, '\n');
931 }
932
slab_start(struct seq_file * m,loff_t * pos)933 void *slab_start(struct seq_file *m, loff_t *pos)
934 {
935 mutex_lock(&slab_mutex);
936 return seq_list_start(&slab_caches, *pos);
937 }
938
slab_next(struct seq_file * m,void * p,loff_t * pos)939 void *slab_next(struct seq_file *m, void *p, loff_t *pos)
940 {
941 return seq_list_next(p, &slab_caches, pos);
942 }
943
slab_stop(struct seq_file * m,void * p)944 void slab_stop(struct seq_file *m, void *p)
945 {
946 mutex_unlock(&slab_mutex);
947 }
948
cache_show(struct kmem_cache * s,struct seq_file * m)949 static void cache_show(struct kmem_cache *s, struct seq_file *m)
950 {
951 struct slabinfo sinfo;
952
953 memset(&sinfo, 0, sizeof(sinfo));
954 get_slabinfo(s, &sinfo);
955
956 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
957 s->name, sinfo.active_objs, sinfo.num_objs, s->size,
958 sinfo.objects_per_slab, (1 << sinfo.cache_order));
959
960 seq_printf(m, " : tunables %4u %4u %4u",
961 sinfo.limit, sinfo.batchcount, sinfo.shared);
962 seq_printf(m, " : slabdata %6lu %6lu %6lu",
963 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
964 slabinfo_show_stats(m, s);
965 seq_putc(m, '\n');
966 }
967
slab_show(struct seq_file * m,void * p)968 static int slab_show(struct seq_file *m, void *p)
969 {
970 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
971
972 if (p == slab_caches.next)
973 print_slabinfo_header(m);
974 cache_show(s, m);
975 return 0;
976 }
977
dump_unreclaimable_slab(void)978 void dump_unreclaimable_slab(void)
979 {
980 struct kmem_cache *s, *s2;
981 struct slabinfo sinfo;
982
983 /*
984 * Here acquiring slab_mutex is risky since we don't prefer to get
985 * sleep in oom path. But, without mutex hold, it may introduce a
986 * risk of crash.
987 * Use mutex_trylock to protect the list traverse, dump nothing
988 * without acquiring the mutex.
989 */
990 if (!mutex_trylock(&slab_mutex)) {
991 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
992 return;
993 }
994
995 pr_info("Unreclaimable slab info:\n");
996 pr_info("Name Used Total\n");
997
998 list_for_each_entry_safe(s, s2, &slab_caches, list) {
999 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1000 continue;
1001
1002 get_slabinfo(s, &sinfo);
1003
1004 if (sinfo.num_objs > 0)
1005 pr_info("%-17s %10luKB %10luKB\n", s->name,
1006 (sinfo.active_objs * s->size) / 1024,
1007 (sinfo.num_objs * s->size) / 1024);
1008 }
1009 mutex_unlock(&slab_mutex);
1010 }
1011
1012 #if defined(CONFIG_MEMCG_KMEM)
memcg_slab_show(struct seq_file * m,void * p)1013 int memcg_slab_show(struct seq_file *m, void *p)
1014 {
1015 /*
1016 * Deprecated.
1017 * Please, take a look at tools/cgroup/slabinfo.py .
1018 */
1019 return 0;
1020 }
1021 #endif
1022
1023 /*
1024 * slabinfo_op - iterator that generates /proc/slabinfo
1025 *
1026 * Output layout:
1027 * cache-name
1028 * num-active-objs
1029 * total-objs
1030 * object size
1031 * num-active-slabs
1032 * total-slabs
1033 * num-pages-per-slab
1034 * + further values on SMP and with statistics enabled
1035 */
1036 static const struct seq_operations slabinfo_op = {
1037 .start = slab_start,
1038 .next = slab_next,
1039 .stop = slab_stop,
1040 .show = slab_show,
1041 };
1042
slabinfo_open(struct inode * inode,struct file * file)1043 static int slabinfo_open(struct inode *inode, struct file *file)
1044 {
1045 return seq_open(file, &slabinfo_op);
1046 }
1047
1048 static const struct proc_ops slabinfo_proc_ops = {
1049 .proc_flags = PROC_ENTRY_PERMANENT,
1050 .proc_open = slabinfo_open,
1051 .proc_read = seq_read,
1052 .proc_write = slabinfo_write,
1053 .proc_lseek = seq_lseek,
1054 .proc_release = seq_release,
1055 };
1056
slab_proc_init(void)1057 static int __init slab_proc_init(void)
1058 {
1059 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1060 return 0;
1061 }
1062 module_init(slab_proc_init);
1063
1064 #endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
1065
__do_krealloc(const void * p,size_t new_size,gfp_t flags)1066 static __always_inline void *__do_krealloc(const void *p, size_t new_size,
1067 gfp_t flags)
1068 {
1069 void *ret;
1070 size_t ks;
1071
1072 ks = ksize(p);
1073
1074 if (ks >= new_size) {
1075 p = kasan_krealloc((void *)p, new_size, flags);
1076 return (void *)p;
1077 }
1078
1079 ret = kmalloc_track_caller(new_size, flags);
1080 if (ret && p)
1081 memcpy(ret, p, ks);
1082
1083 return ret;
1084 }
1085
1086 /**
1087 * krealloc - reallocate memory. The contents will remain unchanged.
1088 * @p: object to reallocate memory for.
1089 * @new_size: how many bytes of memory are required.
1090 * @flags: the type of memory to allocate.
1091 *
1092 * The contents of the object pointed to are preserved up to the
1093 * lesser of the new and old sizes. If @p is %NULL, krealloc()
1094 * behaves exactly like kmalloc(). If @new_size is 0 and @p is not a
1095 * %NULL pointer, the object pointed to is freed.
1096 *
1097 * Return: pointer to the allocated memory or %NULL in case of error
1098 */
krealloc(const void * p,size_t new_size,gfp_t flags)1099 void *krealloc(const void *p, size_t new_size, gfp_t flags)
1100 {
1101 void *ret;
1102
1103 if (unlikely(!new_size)) {
1104 kfree(p);
1105 return ZERO_SIZE_PTR;
1106 }
1107
1108 ret = __do_krealloc(p, new_size, flags);
1109 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1110 kfree(p);
1111
1112 return ret;
1113 }
1114 EXPORT_SYMBOL(krealloc);
1115
1116 /**
1117 * kfree_sensitive - Clear sensitive information in memory before freeing
1118 * @p: object to free memory of
1119 *
1120 * The memory of the object @p points to is zeroed before freed.
1121 * If @p is %NULL, kfree_sensitive() does nothing.
1122 *
1123 * Note: this function zeroes the whole allocated buffer which can be a good
1124 * deal bigger than the requested buffer size passed to kmalloc(). So be
1125 * careful when using this function in performance sensitive code.
1126 */
kfree_sensitive(const void * p)1127 void kfree_sensitive(const void *p)
1128 {
1129 size_t ks;
1130 void *mem = (void *)p;
1131
1132 ks = ksize(mem);
1133 if (ks)
1134 memzero_explicit(mem, ks);
1135 kfree(mem);
1136 }
1137 EXPORT_SYMBOL(kfree_sensitive);
1138
1139 /**
1140 * ksize - get the actual amount of memory allocated for a given object
1141 * @objp: Pointer to the object
1142 *
1143 * kmalloc may internally round up allocations and return more memory
1144 * than requested. ksize() can be used to determine the actual amount of
1145 * memory allocated. The caller may use this additional memory, even though
1146 * a smaller amount of memory was initially specified with the kmalloc call.
1147 * The caller must guarantee that objp points to a valid object previously
1148 * allocated with either kmalloc() or kmem_cache_alloc(). The object
1149 * must not be freed during the duration of the call.
1150 *
1151 * Return: size of the actual memory used by @objp in bytes
1152 */
ksize(const void * objp)1153 size_t ksize(const void *objp)
1154 {
1155 size_t size;
1156
1157 /*
1158 * We need to check that the pointed to object is valid, and only then
1159 * unpoison the shadow memory below. We use __kasan_check_read(), to
1160 * generate a more useful report at the time ksize() is called (rather
1161 * than later where behaviour is undefined due to potential
1162 * use-after-free or double-free).
1163 *
1164 * If the pointed to memory is invalid we return 0, to avoid users of
1165 * ksize() writing to and potentially corrupting the memory region.
1166 *
1167 * We want to perform the check before __ksize(), to avoid potentially
1168 * crashing in __ksize() due to accessing invalid metadata.
1169 */
1170 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !__kasan_check_read(objp, 1))
1171 return 0;
1172
1173 size = __ksize(objp);
1174 /*
1175 * We assume that ksize callers could use whole allocated area,
1176 * so we need to unpoison this area.
1177 */
1178 kasan_unpoison_shadow(objp, size);
1179 return size;
1180 }
1181 EXPORT_SYMBOL(ksize);
1182
1183 /* Tracepoints definitions. */
1184 EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1185 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1186 EXPORT_TRACEPOINT_SYMBOL(kmalloc_node);
1187 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc_node);
1188 EXPORT_TRACEPOINT_SYMBOL(kfree);
1189 EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1190
should_failslab(struct kmem_cache * s,gfp_t gfpflags)1191 int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1192 {
1193 if (__should_failslab(s, gfpflags))
1194 return -ENOMEM;
1195 return 0;
1196 }
1197 ALLOW_ERROR_INJECTION(should_failslab, ERRNO);
1198