• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef MM_SLAB_H
3 #define MM_SLAB_H
4 
5 #include <linux/reciprocal_div.h>
6 #include <linux/list_lru.h>
7 #include <linux/local_lock.h>
8 #include <linux/random.h>
9 #include <linux/kobject.h>
10 #include <linux/sched/mm.h>
11 #include <linux/memcontrol.h>
12 #include <linux/kfence.h>
13 #include <linux/kasan.h>
14 #include <linux/stackdepot.h>
15 
16 #undef CREATE_TRACE_POINTS
17 #include <trace/hooks/mm.h>
18 
19 /*
20  * Internal slab definitions
21  */
22 
23 #ifdef CONFIG_64BIT
24 # ifdef system_has_cmpxchg128
25 # define system_has_freelist_aba()	system_has_cmpxchg128()
26 # define try_cmpxchg_freelist		try_cmpxchg128
27 # endif
28 #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg128
29 typedef u128 freelist_full_t;
30 #else /* CONFIG_64BIT */
31 # ifdef system_has_cmpxchg64
32 # define system_has_freelist_aba()	system_has_cmpxchg64()
33 # define try_cmpxchg_freelist		try_cmpxchg64
34 # endif
35 #define this_cpu_try_cmpxchg_freelist	this_cpu_try_cmpxchg64
36 typedef u64 freelist_full_t;
37 #endif /* CONFIG_64BIT */
38 
39 #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
40 #undef system_has_freelist_aba
41 #endif
42 
43 /*
44  * Freelist pointer and counter to cmpxchg together, avoids the typical ABA
45  * problems with cmpxchg of just a pointer.
46  */
47 typedef union {
48 	struct {
49 		void *freelist;
50 		unsigned long counter;
51 	};
52 	freelist_full_t full;
53 } freelist_aba_t;
54 
55 /* Reuses the bits in struct page */
56 struct slab {
57 	unsigned long __page_flags;
58 
59 	struct kmem_cache *slab_cache;
60 	union {
61 		struct {
62 			union {
63 				struct list_head slab_list;
64 #ifdef CONFIG_SLUB_CPU_PARTIAL
65 				struct {
66 					struct slab *next;
67 					int slabs;	/* Nr of slabs left */
68 				};
69 #endif
70 			};
71 			/* Double-word boundary */
72 			union {
73 				struct {
74 					void *freelist;		/* first free object */
75 					union {
76 						unsigned long counters;
77 						struct {
78 							unsigned inuse:16;
79 							unsigned objects:15;
80 							/*
81 							 * If slab debugging is enabled then the
82 							 * frozen bit can be reused to indicate
83 							 * that the slab was corrupted
84 							 */
85 							unsigned frozen:1;
86 						};
87 					};
88 				};
89 #ifdef system_has_freelist_aba
90 				freelist_aba_t freelist_counter;
91 #endif
92 			};
93 		};
94 		struct rcu_head rcu_head;
95 	};
96 
97 	unsigned int __page_type;
98 	atomic_t __page_refcount;
99 #ifdef CONFIG_SLAB_OBJ_EXT
100 	unsigned long obj_exts;
101 #endif
102 };
103 
104 #define SLAB_MATCH(pg, sl)						\
105 	static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl))
106 SLAB_MATCH(flags, __page_flags);
107 SLAB_MATCH(compound_head, slab_cache);	/* Ensure bit 0 is clear */
108 SLAB_MATCH(_refcount, __page_refcount);
109 #ifdef CONFIG_MEMCG
110 SLAB_MATCH(memcg_data, obj_exts);
111 #elif defined(CONFIG_SLAB_OBJ_EXT)
112 SLAB_MATCH(_unused_slab_obj_exts, obj_exts);
113 #endif
114 #undef SLAB_MATCH
115 static_assert(sizeof(struct slab) <= sizeof(struct page));
116 #if defined(system_has_freelist_aba)
117 static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t)));
118 #endif
119 
120 /**
121  * folio_slab - Converts from folio to slab.
122  * @folio: The folio.
123  *
124  * Currently struct slab is a different representation of a folio where
125  * folio_test_slab() is true.
126  *
127  * Return: The slab which contains this folio.
128  */
129 #define folio_slab(folio)	(_Generic((folio),			\
130 	const struct folio *:	(const struct slab *)(folio),		\
131 	struct folio *:		(struct slab *)(folio)))
132 
133 /**
134  * slab_folio - The folio allocated for a slab
135  * @slab: The slab.
136  *
137  * Slabs are allocated as folios that contain the individual objects and are
138  * using some fields in the first struct page of the folio - those fields are
139  * now accessed by struct slab. It is occasionally necessary to convert back to
140  * a folio in order to communicate with the rest of the mm.  Please use this
141  * helper function instead of casting yourself, as the implementation may change
142  * in the future.
143  */
144 #define slab_folio(s)		(_Generic((s),				\
145 	const struct slab *:	(const struct folio *)s,		\
146 	struct slab *:		(struct folio *)s))
147 
148 /**
149  * page_slab - Converts from first struct page to slab.
150  * @p: The first (either head of compound or single) page of slab.
151  *
152  * A temporary wrapper to convert struct page to struct slab in situations where
153  * we know the page is the compound head, or single order-0 page.
154  *
155  * Long-term ideally everything would work with struct slab directly or go
156  * through folio to struct slab.
157  *
158  * Return: The slab which contains this page
159  */
160 #define page_slab(p)		(_Generic((p),				\
161 	const struct page *:	(const struct slab *)(p),		\
162 	struct page *:		(struct slab *)(p)))
163 
164 /**
165  * slab_page - The first struct page allocated for a slab
166  * @slab: The slab.
167  *
168  * A convenience wrapper for converting slab to the first struct page of the
169  * underlying folio, to communicate with code not yet converted to folio or
170  * struct slab.
171  */
172 #define slab_page(s) folio_page(slab_folio(s), 0)
173 
174 /*
175  * If network-based swap is enabled, sl*b must keep track of whether pages
176  * were allocated from pfmemalloc reserves.
177  */
slab_test_pfmemalloc(const struct slab * slab)178 static inline bool slab_test_pfmemalloc(const struct slab *slab)
179 {
180 	return folio_test_active(slab_folio(slab));
181 }
182 
slab_set_pfmemalloc(struct slab * slab)183 static inline void slab_set_pfmemalloc(struct slab *slab)
184 {
185 	folio_set_active(slab_folio(slab));
186 }
187 
slab_clear_pfmemalloc(struct slab * slab)188 static inline void slab_clear_pfmemalloc(struct slab *slab)
189 {
190 	folio_clear_active(slab_folio(slab));
191 }
192 
__slab_clear_pfmemalloc(struct slab * slab)193 static inline void __slab_clear_pfmemalloc(struct slab *slab)
194 {
195 	__folio_clear_active(slab_folio(slab));
196 }
197 
slab_address(const struct slab * slab)198 static inline void *slab_address(const struct slab *slab)
199 {
200 	return folio_address(slab_folio(slab));
201 }
202 
slab_nid(const struct slab * slab)203 static inline int slab_nid(const struct slab *slab)
204 {
205 	return folio_nid(slab_folio(slab));
206 }
207 
slab_pgdat(const struct slab * slab)208 static inline pg_data_t *slab_pgdat(const struct slab *slab)
209 {
210 	return folio_pgdat(slab_folio(slab));
211 }
212 
virt_to_slab(const void * addr)213 static inline struct slab *virt_to_slab(const void *addr)
214 {
215 	struct folio *folio = virt_to_folio(addr);
216 
217 	if (!folio_test_slab(folio))
218 		return NULL;
219 
220 	return folio_slab(folio);
221 }
222 
slab_order(const struct slab * slab)223 static inline int slab_order(const struct slab *slab)
224 {
225 	return folio_order(slab_folio(slab));
226 }
227 
slab_size(const struct slab * slab)228 static inline size_t slab_size(const struct slab *slab)
229 {
230 	return PAGE_SIZE << slab_order(slab);
231 }
232 
233 #ifdef CONFIG_SLUB_CPU_PARTIAL
234 #define slub_percpu_partial(c)			((c)->partial)
235 
236 #define slub_set_percpu_partial(c, p)		\
237 ({						\
238 	slub_percpu_partial(c) = (p)->next;	\
239 })
240 
241 #define slub_percpu_partial_read_once(c)	READ_ONCE(slub_percpu_partial(c))
242 #else
243 #define slub_percpu_partial(c)			NULL
244 
245 #define slub_set_percpu_partial(c, p)
246 
247 #define slub_percpu_partial_read_once(c)	NULL
248 #endif // CONFIG_SLUB_CPU_PARTIAL
249 
250 /*
251  * Word size structure that can be atomically updated or read and that
252  * contains both the order and the number of objects that a slab of the
253  * given order would contain.
254  */
255 struct kmem_cache_order_objects {
256 	unsigned int x;
257 };
258 
259 /*
260  * Slab cache management.
261  */
262 struct kmem_cache {
263 #ifndef CONFIG_SLUB_TINY
264 	struct kmem_cache_cpu __percpu *cpu_slab;
265 #endif
266 	/* Used for retrieving partial slabs, etc. */
267 	slab_flags_t flags;
268 	unsigned long min_partial;
269 	unsigned int size;		/* Object size including metadata */
270 	unsigned int object_size;	/* Object size without metadata */
271 	struct reciprocal_value reciprocal_size;
272 	unsigned int offset;		/* Free pointer offset */
273 #ifdef CONFIG_SLUB_CPU_PARTIAL
274 	/* Number of per cpu partial objects to keep around */
275 	unsigned int cpu_partial;
276 	/* Number of per cpu partial slabs to keep around */
277 	unsigned int cpu_partial_slabs;
278 #endif
279 	struct kmem_cache_order_objects oo;
280 
281 	/* Allocation and freeing of slabs */
282 	struct kmem_cache_order_objects min;
283 	gfp_t allocflags;		/* gfp flags to use on each alloc */
284 	int refcount;			/* Refcount for slab cache destroy */
285 	void (*ctor)(void *object);	/* Object constructor */
286 	unsigned int inuse;		/* Offset to metadata */
287 	unsigned int align;		/* Alignment */
288 	unsigned int red_left_pad;	/* Left redzone padding size */
289 	const char *name;		/* Name (only for display!) */
290 	struct list_head list;		/* List of slab caches */
291 #ifdef CONFIG_SYSFS
292 	struct kobject kobj;		/* For sysfs */
293 #endif
294 #ifdef CONFIG_SLAB_FREELIST_HARDENED
295 	unsigned long random;
296 #endif
297 
298 #ifdef CONFIG_NUMA
299 	/*
300 	 * Defragmentation by allocating from a remote node.
301 	 */
302 	unsigned int remote_node_defrag_ratio;
303 #endif
304 
305 #ifdef CONFIG_SLAB_FREELIST_RANDOM
306 	unsigned int *random_seq;
307 #endif
308 
309 #ifdef CONFIG_KASAN_GENERIC
310 	struct kasan_cache kasan_info;
311 #endif
312 
313 #ifdef CONFIG_HARDENED_USERCOPY
314 	unsigned int useroffset;	/* Usercopy region offset */
315 	unsigned int usersize;		/* Usercopy region size */
316 #endif
317 
318 	struct kmem_cache_node *node[MAX_NUMNODES];
319 	ANDROID_OEM_DATA(1);
320 };
321 
322 #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
323 #define SLAB_SUPPORTS_SYSFS 1
324 void sysfs_slab_unlink(struct kmem_cache *s);
325 void sysfs_slab_release(struct kmem_cache *s);
326 #else
sysfs_slab_unlink(struct kmem_cache * s)327 static inline void sysfs_slab_unlink(struct kmem_cache *s) { }
sysfs_slab_release(struct kmem_cache * s)328 static inline void sysfs_slab_release(struct kmem_cache *s) { }
329 #endif
330 
331 void *fixup_red_left(struct kmem_cache *s, void *p);
332 
nearest_obj(struct kmem_cache * cache,const struct slab * slab,void * x)333 static inline void *nearest_obj(struct kmem_cache *cache,
334 				const struct slab *slab, void *x)
335 {
336 	void *object = x - (x - slab_address(slab)) % cache->size;
337 	void *last_object = slab_address(slab) +
338 		(slab->objects - 1) * cache->size;
339 	void *result = (unlikely(object > last_object)) ? last_object : object;
340 
341 	result = fixup_red_left(cache, result);
342 	return result;
343 }
344 
345 /* Determine object index from a given position */
__obj_to_index(const struct kmem_cache * cache,void * addr,void * obj)346 static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
347 					  void *addr, void *obj)
348 {
349 	return reciprocal_divide(kasan_reset_tag(obj) - addr,
350 				 cache->reciprocal_size);
351 }
352 
obj_to_index(const struct kmem_cache * cache,const struct slab * slab,void * obj)353 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
354 					const struct slab *slab, void *obj)
355 {
356 	if (is_kfence_address(obj))
357 		return 0;
358 	return __obj_to_index(cache, slab_address(slab), obj);
359 }
360 
objs_per_slab(const struct kmem_cache * cache,const struct slab * slab)361 static inline int objs_per_slab(const struct kmem_cache *cache,
362 				const struct slab *slab)
363 {
364 	return slab->objects;
365 }
366 
367 /*
368  * State of the slab allocator.
369  *
370  * This is used to describe the states of the allocator during bootup.
371  * Allocators use this to gradually bootstrap themselves. Most allocators
372  * have the problem that the structures used for managing slab caches are
373  * allocated from slab caches themselves.
374  */
375 enum slab_state {
376 	DOWN,			/* No slab functionality yet */
377 	PARTIAL,		/* SLUB: kmem_cache_node available */
378 	UP,			/* Slab caches usable but not all extras yet */
379 	FULL			/* Everything is working */
380 };
381 
382 extern enum slab_state slab_state;
383 
384 /* The slab cache mutex protects the management structures during changes */
385 extern struct mutex slab_mutex;
386 
387 /* The list of all slab caches on the system */
388 extern struct list_head slab_caches;
389 
390 /* The slab cache that manages slab cache information */
391 extern struct kmem_cache *kmem_cache;
392 
393 /* A table of kmalloc cache names and sizes */
394 extern const struct kmalloc_info_struct {
395 	const char *name[NR_KMALLOC_TYPES];
396 	unsigned int size;
397 } kmalloc_info[];
398 
399 /* Kmalloc array related functions */
400 void setup_kmalloc_cache_index_table(void);
401 void create_kmalloc_caches(void);
402 
403 extern u8 kmalloc_size_index[24];
404 
size_index_elem(unsigned int bytes)405 static inline unsigned int size_index_elem(unsigned int bytes)
406 {
407 	return (bytes - 1) / 8;
408 }
409 
410 /*
411  * Find the kmem_cache structure that serves a given size of
412  * allocation
413  *
414  * This assumes size is larger than zero and not larger than
415  * KMALLOC_MAX_CACHE_SIZE and the caller must check that.
416  */
417 static inline struct kmem_cache *
kmalloc_slab(size_t size,kmem_buckets * b,gfp_t flags,unsigned long caller)418 kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller)
419 {
420 	unsigned int index;
421 	struct kmem_cache *s = NULL;
422 
423 	if (!b)
424 		b = &kmalloc_caches[kmalloc_type(flags, caller)];
425 	if (size <= 192)
426 		index = kmalloc_size_index[size_index_elem(size)];
427 	else
428 		index = fls(size - 1);
429 
430 	trace_android_vh_kmalloc_slab(index, flags, &s);
431 	if (s)
432 		return s;
433 
434 	return (*b)[index];
435 }
436 
437 gfp_t kmalloc_fix_flags(gfp_t flags);
438 
439 /* Functions provided by the slab allocators */
440 int do_kmem_cache_create(struct kmem_cache *s, const char *name,
441 			 unsigned int size, struct kmem_cache_args *args,
442 			 slab_flags_t flags);
443 
444 void __init kmem_cache_init(void);
445 extern void create_boot_cache(struct kmem_cache *, const char *name,
446 			unsigned int size, slab_flags_t flags,
447 			unsigned int useroffset, unsigned int usersize);
448 
449 int slab_unmergeable(struct kmem_cache *s);
450 struct kmem_cache *find_mergeable(unsigned size, unsigned align,
451 		slab_flags_t flags, const char *name, void (*ctor)(void *));
452 struct kmem_cache *
453 __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
454 		   slab_flags_t flags, void (*ctor)(void *));
455 
456 slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name);
457 
is_kmalloc_cache(struct kmem_cache * s)458 static inline bool is_kmalloc_cache(struct kmem_cache *s)
459 {
460 	return (s->flags & SLAB_KMALLOC);
461 }
462 
is_kmalloc_normal(struct kmem_cache * s)463 static inline bool is_kmalloc_normal(struct kmem_cache *s)
464 {
465 	if (!is_kmalloc_cache(s))
466 		return false;
467 	return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT));
468 }
469 
470 /* Legal flag mask for kmem_cache_create(), for various configurations */
471 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \
472 			 SLAB_CACHE_DMA32 | SLAB_PANIC | \
473 			 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
474 
475 #ifdef CONFIG_SLUB_DEBUG
476 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
477 			  SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
478 #else
479 #define SLAB_DEBUG_FLAGS (0)
480 #endif
481 
482 #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
483 			  SLAB_TEMPORARY | SLAB_ACCOUNT | \
484 			  SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE)
485 
486 /* Common flags available with current configuration */
487 #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
488 
489 /* Common flags permitted for kmem_cache_create */
490 #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
491 			      SLAB_RED_ZONE | \
492 			      SLAB_POISON | \
493 			      SLAB_STORE_USER | \
494 			      SLAB_TRACE | \
495 			      SLAB_CONSISTENCY_CHECKS | \
496 			      SLAB_NOLEAKTRACE | \
497 			      SLAB_RECLAIM_ACCOUNT | \
498 			      SLAB_TEMPORARY | \
499 			      SLAB_ACCOUNT | \
500 			      SLAB_KMALLOC | \
501 			      SLAB_NO_MERGE | \
502 			      SLAB_NO_USER_FLAGS)
503 
504 bool __kmem_cache_empty(struct kmem_cache *);
505 int __kmem_cache_shutdown(struct kmem_cache *);
506 void __kmem_cache_release(struct kmem_cache *);
507 int __kmem_cache_shrink(struct kmem_cache *);
508 void slab_kmem_cache_release(struct kmem_cache *);
509 
510 struct seq_file;
511 struct file;
512 
513 struct slabinfo {
514 	unsigned long active_objs;
515 	unsigned long num_objs;
516 	unsigned long active_slabs;
517 	unsigned long num_slabs;
518 	unsigned long shared_avail;
519 	unsigned int limit;
520 	unsigned int batchcount;
521 	unsigned int shared;
522 	unsigned int objects_per_slab;
523 	unsigned int cache_order;
524 };
525 
526 void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
527 
528 /*
529  * Tracking user of a slab.
530  */
531 #define TRACK_ADDRS_COUNT 16
532 struct track {
533 	unsigned long addr;	/* Called from address */
534 #ifdef CONFIG_STACKDEPOT
535 	depot_stack_handle_t handle;
536 #endif
537 	int cpu;		/* Was running on cpu */
538 	int pid;		/* Pid context */
539 	unsigned long when;	/* When did the operation occur */
540 };
541 
542 enum track_item { TRACK_ALLOC, TRACK_FREE };
543 
544 #ifdef CONFIG_SLUB_DEBUG
545 #ifdef CONFIG_SLUB_DEBUG_ON
546 DECLARE_STATIC_KEY_TRUE(slub_debug_enabled);
547 #else
548 DECLARE_STATIC_KEY_FALSE(slub_debug_enabled);
549 #endif
550 extern void print_tracking(struct kmem_cache *s, void *object);
551 long validate_slab_cache(struct kmem_cache *s);
552 extern struct track *get_track(struct kmem_cache *s, void *object,
553 				enum track_item alloc);
554 extern unsigned long get_each_kmemcache_object(struct kmem_cache *s,
555 		int (*fn)(struct kmem_cache *, void *, void *),
556 		void *private);
557 
__slub_debug_enabled(void)558 static inline bool __slub_debug_enabled(void)
559 {
560 	return static_branch_unlikely(&slub_debug_enabled);
561 }
562 #else
print_tracking(struct kmem_cache * s,void * object)563 static inline void print_tracking(struct kmem_cache *s, void *object)
564 {
565 }
__slub_debug_enabled(void)566 static inline bool __slub_debug_enabled(void)
567 {
568 	return false;
569 }
570 #endif
571 
572 /*
573  * Returns true if any of the specified slab_debug flags is enabled for the
574  * cache. Use only for flags parsed by setup_slub_debug() as it also enables
575  * the static key.
576  */
kmem_cache_debug_flags(struct kmem_cache * s,slab_flags_t flags)577 static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags)
578 {
579 	if (IS_ENABLED(CONFIG_SLUB_DEBUG))
580 		VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS));
581 	if (__slub_debug_enabled())
582 		return s->flags & flags;
583 	return false;
584 }
585 
586 #if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT)
587 bool slab_in_kunit_test(void);
588 #else
slab_in_kunit_test(void)589 static inline bool slab_in_kunit_test(void) { return false; }
590 #endif
591 
592 #ifdef CONFIG_SLAB_OBJ_EXT
593 
594 /*
595  * slab_obj_exts - get the pointer to the slab object extension vector
596  * associated with a slab.
597  * @slab: a pointer to the slab struct
598  *
599  * Returns a pointer to the object extension vector associated with the slab,
600  * or NULL if no such vector has been associated yet.
601  */
slab_obj_exts(struct slab * slab)602 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
603 {
604 	unsigned long obj_exts = READ_ONCE(slab->obj_exts);
605 
606 #ifdef CONFIG_MEMCG
607 	VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS),
608 							slab_page(slab));
609 	VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab));
610 #endif
611 	return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK);
612 }
613 
614 int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s,
615                         gfp_t gfp, bool new_slab);
616 
617 #else /* CONFIG_SLAB_OBJ_EXT */
618 
slab_obj_exts(struct slab * slab)619 static inline struct slabobj_ext *slab_obj_exts(struct slab *slab)
620 {
621 	return NULL;
622 }
623 
624 #endif /* CONFIG_SLAB_OBJ_EXT */
625 
cache_vmstat_idx(struct kmem_cache * s)626 static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s)
627 {
628 	return (s->flags & SLAB_RECLAIM_ACCOUNT) ?
629 		NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B;
630 }
631 
632 #ifdef CONFIG_MEMCG
633 bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru,
634 				  gfp_t flags, size_t size, void **p);
635 void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab,
636 			    void **p, int objects, struct slabobj_ext *obj_exts);
637 #endif
638 
639 size_t __ksize(const void *objp);
640 
slab_ksize(const struct kmem_cache * s)641 static inline size_t slab_ksize(const struct kmem_cache *s)
642 {
643 #ifdef CONFIG_SLUB_DEBUG
644 	/*
645 	 * Debugging requires use of the padding between object
646 	 * and whatever may come after it.
647 	 */
648 	if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
649 		return s->object_size;
650 #endif
651 	if (s->flags & SLAB_KASAN)
652 		return s->object_size;
653 	/*
654 	 * If we have the need to store the freelist pointer
655 	 * back there or track user information then we can
656 	 * only use the space before that information.
657 	 */
658 	if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
659 		return s->inuse;
660 	/*
661 	 * Else we can use all the padding etc for the allocation
662 	 */
663 	return s->size;
664 }
665 
666 #ifdef CONFIG_SLUB_DEBUG
667 void dump_unreclaimable_slab(void);
668 #else
dump_unreclaimable_slab(void)669 static inline void dump_unreclaimable_slab(void)
670 {
671 }
672 #endif
673 
674 void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
675 
676 #ifdef CONFIG_SLAB_FREELIST_RANDOM
677 int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
678 			gfp_t gfp);
679 void cache_random_seq_destroy(struct kmem_cache *cachep);
680 #else
cache_random_seq_create(struct kmem_cache * cachep,unsigned int count,gfp_t gfp)681 static inline int cache_random_seq_create(struct kmem_cache *cachep,
682 					unsigned int count, gfp_t gfp)
683 {
684 	return 0;
685 }
cache_random_seq_destroy(struct kmem_cache * cachep)686 static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
687 #endif /* CONFIG_SLAB_FREELIST_RANDOM */
688 
slab_want_init_on_alloc(gfp_t flags,struct kmem_cache * c)689 static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c)
690 {
691 	if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON,
692 				&init_on_alloc)) {
693 		if (c->ctor)
694 			return false;
695 		if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))
696 			return flags & __GFP_ZERO;
697 		return true;
698 	}
699 	return flags & __GFP_ZERO;
700 }
701 
slab_want_init_on_free(struct kmem_cache * c)702 static inline bool slab_want_init_on_free(struct kmem_cache *c)
703 {
704 	if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON,
705 				&init_on_free))
706 		return !(c->ctor ||
707 			 (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)));
708 	return false;
709 }
710 
711 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG)
712 void debugfs_slab_release(struct kmem_cache *);
713 #else
debugfs_slab_release(struct kmem_cache * s)714 static inline void debugfs_slab_release(struct kmem_cache *s) { }
715 #endif
716 
717 #ifdef CONFIG_PRINTK
718 #define KS_ADDRS_COUNT 16
719 struct kmem_obj_info {
720 	void *kp_ptr;
721 	struct slab *kp_slab;
722 	void *kp_objp;
723 	unsigned long kp_data_offset;
724 	struct kmem_cache *kp_slab_cache;
725 	void *kp_ret;
726 	void *kp_stack[KS_ADDRS_COUNT];
727 	void *kp_free_stack[KS_ADDRS_COUNT];
728 };
729 void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab);
730 #endif
731 
732 void __check_heap_object(const void *ptr, unsigned long n,
733 			 const struct slab *slab, bool to_user);
734 
735 #ifdef CONFIG_SLUB_DEBUG
736 void skip_orig_size_check(struct kmem_cache *s, const void *object);
737 #endif
738 
739 #endif /* MM_SLAB_H */
740