• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef _LINUX_SLAB_DEF_H
2 #define	_LINUX_SLAB_DEF_H
3 
4 /*
5  * Definitions unique to the original Linux SLAB allocator.
6  *
7  * What we provide here is a way to optimize the frequent kmalloc
8  * calls in the kernel by selecting the appropriate general cache
9  * if kmalloc was called with a size that can be established at
10  * compile time.
11  */
12 
13 #include <linux/init.h>
14 #include <asm/page.h>		/* kmalloc_sizes.h needs PAGE_SIZE */
15 #include <asm/cache.h>		/* kmalloc_sizes.h needs L1_CACHE_BYTES */
16 #include <linux/compiler.h>
17 
18 /*
19  * struct kmem_cache
20  *
21  * manages a cache.
22  */
23 
24 struct kmem_cache {
25 /* 1) Cache tunables. Protected by cache_chain_mutex */
26 	unsigned int batchcount;
27 	unsigned int limit;
28 	unsigned int shared;
29 
30 	unsigned int buffer_size;
31 	u32 reciprocal_buffer_size;
32 /* 2) touched by every alloc & free from the backend */
33 
34 	unsigned int flags;		/* constant flags */
35 	unsigned int num;		/* # of objs per slab */
36 
37 /* 3) cache_grow/shrink */
38 	/* order of pgs per slab (2^n) */
39 	unsigned int gfporder;
40 
41 	/* force GFP flags, e.g. GFP_DMA */
42 	gfp_t gfpflags;
43 
44 	size_t colour;			/* cache colouring range */
45 	unsigned int colour_off;	/* colour offset */
46 	struct kmem_cache *slabp_cache;
47 	unsigned int slab_size;
48 	unsigned int dflags;		/* dynamic flags */
49 
50 	/* constructor func */
51 	void (*ctor)(void *obj);
52 
53 /* 4) cache creation/removal */
54 	const char *name;
55 	struct list_head next;
56 
57 /* 5) statistics */
58 #ifdef CONFIG_DEBUG_SLAB
59 	unsigned long num_active;
60 	unsigned long num_allocations;
61 	unsigned long high_mark;
62 	unsigned long grown;
63 	unsigned long reaped;
64 	unsigned long errors;
65 	unsigned long max_freeable;
66 	unsigned long node_allocs;
67 	unsigned long node_frees;
68 	unsigned long node_overflow;
69 	atomic_t allochit;
70 	atomic_t allocmiss;
71 	atomic_t freehit;
72 	atomic_t freemiss;
73 
74 	/*
75 	 * If debugging is enabled, then the allocator can add additional
76 	 * fields and/or padding to every object. buffer_size contains the total
77 	 * object size including these internal fields, the following two
78 	 * variables contain the offset to the user object and its size.
79 	 */
80 	int obj_offset;
81 	int obj_size;
82 #endif /* CONFIG_DEBUG_SLAB */
83 
84 /* 6) per-cpu/per-node data, touched during every alloc/free */
85 	/*
86 	 * We put array[] at the end of kmem_cache, because we want to size
87 	 * this array to nr_cpu_ids slots instead of NR_CPUS
88 	 * (see kmem_cache_init())
89 	 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
90 	 * is statically defined, so we reserve the max number of cpus.
91 	 */
92 	struct kmem_list3 **nodelists;
93 	struct array_cache *array[NR_CPUS];
94 	/*
95 	 * Do not add fields after array[]
96 	 */
97 };
98 
99 /* Size description struct for general caches. */
100 struct cache_sizes {
101 	size_t		 	cs_size;
102 	struct kmem_cache	*cs_cachep;
103 #ifdef CONFIG_ZONE_DMA
104 	struct kmem_cache	*cs_dmacachep;
105 #endif
106 };
107 extern struct cache_sizes malloc_sizes[];
108 
109 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
110 void *__kmalloc(size_t size, gfp_t flags);
111 
112 #ifdef CONFIG_TRACING
113 extern void *kmem_cache_alloc_trace(size_t size,
114 				    struct kmem_cache *cachep, gfp_t flags);
115 extern size_t slab_buffer_size(struct kmem_cache *cachep);
116 #else
117 static __always_inline void *
kmem_cache_alloc_trace(size_t size,struct kmem_cache * cachep,gfp_t flags)118 kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
119 {
120 	return kmem_cache_alloc(cachep, flags);
121 }
slab_buffer_size(struct kmem_cache * cachep)122 static inline size_t slab_buffer_size(struct kmem_cache *cachep)
123 {
124 	return 0;
125 }
126 #endif
127 
kmalloc(size_t size,gfp_t flags)128 static __always_inline void *kmalloc(size_t size, gfp_t flags)
129 {
130 	struct kmem_cache *cachep;
131 	void *ret;
132 
133 	if (__builtin_constant_p(size)) {
134 		int i = 0;
135 
136 		if (!size)
137 			return ZERO_SIZE_PTR;
138 
139 #define CACHE(x) \
140 		if (size <= x) \
141 			goto found; \
142 		else \
143 			i++;
144 #include <linux/kmalloc_sizes.h>
145 #undef CACHE
146 		return NULL;
147 found:
148 #ifdef CONFIG_ZONE_DMA
149 		if (flags & GFP_DMA)
150 			cachep = malloc_sizes[i].cs_dmacachep;
151 		else
152 #endif
153 			cachep = malloc_sizes[i].cs_cachep;
154 
155 		ret = kmem_cache_alloc_trace(size, cachep, flags);
156 
157 		return ret;
158 	}
159 	return __kmalloc(size, flags);
160 }
161 
162 #ifdef CONFIG_NUMA
163 extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
164 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
165 
166 #ifdef CONFIG_TRACING
167 extern void *kmem_cache_alloc_node_trace(size_t size,
168 					 struct kmem_cache *cachep,
169 					 gfp_t flags,
170 					 int nodeid);
171 #else
172 static __always_inline void *
kmem_cache_alloc_node_trace(size_t size,struct kmem_cache * cachep,gfp_t flags,int nodeid)173 kmem_cache_alloc_node_trace(size_t size,
174 			    struct kmem_cache *cachep,
175 			    gfp_t flags,
176 			    int nodeid)
177 {
178 	return kmem_cache_alloc_node(cachep, flags, nodeid);
179 }
180 #endif
181 
kmalloc_node(size_t size,gfp_t flags,int node)182 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
183 {
184 	struct kmem_cache *cachep;
185 
186 	if (__builtin_constant_p(size)) {
187 		int i = 0;
188 
189 		if (!size)
190 			return ZERO_SIZE_PTR;
191 
192 #define CACHE(x) \
193 		if (size <= x) \
194 			goto found; \
195 		else \
196 			i++;
197 #include <linux/kmalloc_sizes.h>
198 #undef CACHE
199 		return NULL;
200 found:
201 #ifdef CONFIG_ZONE_DMA
202 		if (flags & GFP_DMA)
203 			cachep = malloc_sizes[i].cs_dmacachep;
204 		else
205 #endif
206 			cachep = malloc_sizes[i].cs_cachep;
207 
208 		return kmem_cache_alloc_node_trace(size, cachep, flags, node);
209 	}
210 	return __kmalloc_node(size, flags, node);
211 }
212 
213 #endif	/* CONFIG_NUMA */
214 
215 #endif	/* _LINUX_SLAB_DEF_H */
216