• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3  *
4  * (C) SGI 2006, Christoph Lameter
5  * 	Cleaned up and restructured to ease the addition of alternative
6  * 	implementations of SLAB allocators.
7  */
8 
9 #ifndef _LINUX_SLAB_H
10 #define	_LINUX_SLAB_H
11 
12 #include <linux/gfp.h>
13 #include <linux/types.h>
14 
15 /*
16  * Flags to pass to kmem_cache_create().
17  * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
18  */
19 #define SLAB_DEBUG_FREE		0x00000100UL	/* DEBUG: Perform (expensive) checks on free */
20 #define SLAB_RED_ZONE		0x00000400UL	/* DEBUG: Red zone objs in a cache */
21 #define SLAB_POISON		0x00000800UL	/* DEBUG: Poison objects */
22 #define SLAB_HWCACHE_ALIGN	0x00002000UL	/* Align objs on cache lines */
23 #define SLAB_CACHE_DMA		0x00004000UL	/* Use GFP_DMA memory */
24 #define SLAB_STORE_USER		0x00010000UL	/* DEBUG: Store the last owner for bug hunting */
25 #define SLAB_PANIC		0x00040000UL	/* Panic if kmem_cache_create() fails */
26 /*
27  * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
28  *
29  * This delays freeing the SLAB page by a grace period, it does _NOT_
30  * delay object freeing. This means that if you do kmem_cache_free()
31  * that memory location is free to be reused at any time. Thus it may
32  * be possible to see another object there in the same RCU grace period.
33  *
34  * This feature only ensures the memory location backing the object
35  * stays valid, the trick to using this is relying on an independent
36  * object validation pass. Something like:
37  *
38  *  rcu_read_lock()
39  * again:
40  *  obj = lockless_lookup(key);
41  *  if (obj) {
42  *    if (!try_get_ref(obj)) // might fail for free objects
43  *      goto again;
44  *
45  *    if (obj->key != key) { // not the object we expected
46  *      put_ref(obj);
47  *      goto again;
48  *    }
49  *  }
50  *  rcu_read_unlock();
51  *
52  * See also the comment on struct slab_rcu in mm/slab.c.
53  */
54 #define SLAB_DESTROY_BY_RCU	0x00080000UL	/* Defer freeing slabs to RCU */
55 #define SLAB_MEM_SPREAD		0x00100000UL	/* Spread some memory over cpuset */
56 #define SLAB_TRACE		0x00200000UL	/* Trace allocations and frees */
57 
58 /* Flag to prevent checks on free */
59 #ifdef CONFIG_DEBUG_OBJECTS
60 # define SLAB_DEBUG_OBJECTS	0x00400000UL
61 #else
62 # define SLAB_DEBUG_OBJECTS	0x00000000UL
63 #endif
64 
65 /* The following flags affect the page allocator grouping pages by mobility */
66 #define SLAB_RECLAIM_ACCOUNT	0x00020000UL		/* Objects are reclaimable */
67 #define SLAB_TEMPORARY		SLAB_RECLAIM_ACCOUNT	/* Objects are short-lived */
68 /*
69  * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
70  *
71  * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
72  *
73  * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
74  * Both make kfree a no-op.
75  */
76 #define ZERO_SIZE_PTR ((void *)16)
77 
78 #define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
79 				(unsigned long)ZERO_SIZE_PTR)
80 
81 /*
82  * struct kmem_cache related prototypes
83  */
84 void __init kmem_cache_init(void);
85 int slab_is_available(void);
86 
87 struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
88 			unsigned long,
89 			void (*)(void *));
90 void kmem_cache_destroy(struct kmem_cache *);
91 int kmem_cache_shrink(struct kmem_cache *);
92 void kmem_cache_free(struct kmem_cache *, void *);
93 unsigned int kmem_cache_size(struct kmem_cache *);
94 const char *kmem_cache_name(struct kmem_cache *);
95 int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
96 
97 /*
98  * Please use this macro to create slab caches. Simply specify the
99  * name of the structure and maybe some flags that are listed above.
100  *
101  * The alignment of the struct determines object alignment. If you
102  * f.e. add ____cacheline_aligned_in_smp to the struct declaration
103  * then the objects will be properly aligned in SMP configurations.
104  */
105 #define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
106 		sizeof(struct __struct), __alignof__(struct __struct),\
107 		(__flags), NULL)
108 
109 /*
110  * The largest kmalloc size supported by the slab allocators is
111  * 32 megabyte (2^25) or the maximum allocatable page order if that is
112  * less than 32 MB.
113  *
114  * WARNING: Its not easy to increase this value since the allocators have
115  * to do various tricks to work around compiler limitations in order to
116  * ensure proper constant folding.
117  */
118 #define KMALLOC_SHIFT_HIGH	((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
119 				(MAX_ORDER + PAGE_SHIFT - 1) : 25)
120 
121 #define KMALLOC_MAX_SIZE	(1UL << KMALLOC_SHIFT_HIGH)
122 #define KMALLOC_MAX_ORDER	(KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
123 
124 /*
125  * Common kmalloc functions provided by all allocators
126  */
127 void * __must_check __krealloc(const void *, size_t, gfp_t);
128 void * __must_check krealloc(const void *, size_t, gfp_t);
129 void kfree(const void *);
130 void kzfree(const void *);
131 size_t ksize(const void *);
132 
133 /*
134  * Allocator specific definitions. These are mainly used to establish optimized
135  * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
136  * selecting the appropriate general cache at compile time.
137  *
138  * Allocators must define at least:
139  *
140  *	kmem_cache_alloc()
141  *	__kmalloc()
142  *	kmalloc()
143  *
144  * Those wishing to support NUMA must also define:
145  *
146  *	kmem_cache_alloc_node()
147  *	kmalloc_node()
148  *
149  * See each allocator definition file for additional comments and
150  * implementation notes.
151  */
152 #ifdef CONFIG_SLUB
153 #include <linux/slub_def.h>
154 #elif defined(CONFIG_SLOB)
155 #include <linux/slob_def.h>
156 #else
157 #include <linux/slab_def.h>
158 #endif
159 
160 /**
161  * kcalloc - allocate memory for an array. The memory is set to zero.
162  * @n: number of elements.
163  * @size: element size.
164  * @flags: the type of memory to allocate.
165  *
166  * The @flags argument may be one of:
167  *
168  * %GFP_USER - Allocate memory on behalf of user.  May sleep.
169  *
170  * %GFP_KERNEL - Allocate normal kernel ram.  May sleep.
171  *
172  * %GFP_ATOMIC - Allocation will not sleep.  May use emergency pools.
173  *   For example, use this inside interrupt handlers.
174  *
175  * %GFP_HIGHUSER - Allocate pages from high memory.
176  *
177  * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
178  *
179  * %GFP_NOFS - Do not make any fs calls while trying to get memory.
180  *
181  * %GFP_NOWAIT - Allocation will not sleep.
182  *
183  * %GFP_THISNODE - Allocate node-local memory only.
184  *
185  * %GFP_DMA - Allocation suitable for DMA.
186  *   Should only be used for kmalloc() caches. Otherwise, use a
187  *   slab created with SLAB_DMA.
188  *
189  * Also it is possible to set different flags by OR'ing
190  * in one or more of the following additional @flags:
191  *
192  * %__GFP_COLD - Request cache-cold pages instead of
193  *   trying to return cache-warm pages.
194  *
195  * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
196  *
197  * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
198  *   (think twice before using).
199  *
200  * %__GFP_NORETRY - If memory is not immediately available,
201  *   then give up at once.
202  *
203  * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
204  *
205  * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
206  *
207  * There are other flags available as well, but these are not intended
208  * for general use, and so are not documented here. For a full list of
209  * potential flags, always refer to linux/gfp.h.
210  */
kcalloc(size_t n,size_t size,gfp_t flags)211 static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
212 {
213 	if (size != 0 && n > ULONG_MAX / size)
214 		return NULL;
215 	return __kmalloc(n * size, flags | __GFP_ZERO);
216 }
217 
218 #if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
219 /**
220  * kmalloc_node - allocate memory from a specific node
221  * @size: how many bytes of memory are required.
222  * @flags: the type of memory to allocate (see kcalloc).
223  * @node: node to allocate from.
224  *
225  * kmalloc() for non-local nodes, used to allocate from a specific node
226  * if available. Equivalent to kmalloc() in the non-NUMA single-node
227  * case.
228  */
kmalloc_node(size_t size,gfp_t flags,int node)229 static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
230 {
231 	return kmalloc(size, flags);
232 }
233 
__kmalloc_node(size_t size,gfp_t flags,int node)234 static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
235 {
236 	return __kmalloc(size, flags);
237 }
238 
239 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
240 
kmem_cache_alloc_node(struct kmem_cache * cachep,gfp_t flags,int node)241 static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
242 					gfp_t flags, int node)
243 {
244 	return kmem_cache_alloc(cachep, flags);
245 }
246 #endif /* !CONFIG_NUMA && !CONFIG_SLOB */
247 
248 /*
249  * kmalloc_track_caller is a special version of kmalloc that records the
250  * calling function of the routine calling it for slab leak tracking instead
251  * of just the calling function (confusing, eh?).
252  * It's useful when the call to kmalloc comes from a widely-used standard
253  * allocator where we care about the real place the memory allocation
254  * request comes from.
255  */
256 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
257 extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
258 #define kmalloc_track_caller(size, flags) \
259 	__kmalloc_track_caller(size, flags, _RET_IP_)
260 #else
261 #define kmalloc_track_caller(size, flags) \
262 	__kmalloc(size, flags)
263 #endif /* DEBUG_SLAB */
264 
265 #ifdef CONFIG_NUMA
266 /*
267  * kmalloc_node_track_caller is a special version of kmalloc_node that
268  * records the calling function of the routine calling it for slab leak
269  * tracking instead of just the calling function (confusing, eh?).
270  * It's useful when the call to kmalloc_node comes from a widely-used
271  * standard allocator where we care about the real place the memory
272  * allocation request comes from.
273  */
274 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
275 extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
276 #define kmalloc_node_track_caller(size, flags, node) \
277 	__kmalloc_node_track_caller(size, flags, node, \
278 			_RET_IP_)
279 #else
280 #define kmalloc_node_track_caller(size, flags, node) \
281 	__kmalloc_node(size, flags, node)
282 #endif
283 
284 #else /* CONFIG_NUMA */
285 
286 #define kmalloc_node_track_caller(size, flags, node) \
287 	kmalloc_track_caller(size, flags)
288 
289 #endif /* CONFIG_NUMA */
290 
291 /*
292  * Shortcuts
293  */
kmem_cache_zalloc(struct kmem_cache * k,gfp_t flags)294 static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
295 {
296 	return kmem_cache_alloc(k, flags | __GFP_ZERO);
297 }
298 
299 /**
300  * kzalloc - allocate memory. The memory is set to zero.
301  * @size: how many bytes of memory are required.
302  * @flags: the type of memory to allocate (see kmalloc).
303  */
kzalloc(size_t size,gfp_t flags)304 static inline void *kzalloc(size_t size, gfp_t flags)
305 {
306 	return kmalloc(size, flags | __GFP_ZERO);
307 }
308 
309 /**
310  * kzalloc_node - allocate zeroed memory from a particular memory node.
311  * @size: how many bytes of memory are required.
312  * @flags: the type of memory to allocate (see kmalloc).
313  * @node: memory node from which to allocate
314  */
kzalloc_node(size_t size,gfp_t flags,int node)315 static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
316 {
317 	return kmalloc_node(size, flags | __GFP_ZERO, node);
318 }
319 
320 #endif	/* _LINUX_SLAB_H */
321