• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  linux/mm/mempool.c
3  *
4  *  memory buffer pool support. Such pools are mostly used
5  *  for guaranteed, deadlock-free memory allocations during
6  *  extreme VM load.
7  *
8  *  started by Ingo Molnar, Copyright (C) 2001
9  */
10 
11 #include <linux/mm.h>
12 #include <linux/slab.h>
13 #include <linux/module.h>
14 #include <linux/mempool.h>
15 #include <linux/blkdev.h>
16 #include <linux/writeback.h>
17 
add_element(mempool_t * pool,void * element)18 static void add_element(mempool_t *pool, void *element)
19 {
20 	BUG_ON(pool->curr_nr >= pool->min_nr);
21 	pool->elements[pool->curr_nr++] = element;
22 }
23 
remove_element(mempool_t * pool)24 static void *remove_element(mempool_t *pool)
25 {
26 	BUG_ON(pool->curr_nr <= 0);
27 	return pool->elements[--pool->curr_nr];
28 }
29 
free_pool(mempool_t * pool)30 static void free_pool(mempool_t *pool)
31 {
32 	while (pool->curr_nr) {
33 		void *element = remove_element(pool);
34 		pool->free(element, pool->pool_data);
35 	}
36 	kfree(pool->elements);
37 	kfree(pool);
38 }
39 
40 /**
41  * mempool_create - create a memory pool
42  * @min_nr:    the minimum number of elements guaranteed to be
43  *             allocated for this pool.
44  * @alloc_fn:  user-defined element-allocation function.
45  * @free_fn:   user-defined element-freeing function.
46  * @pool_data: optional private data available to the user-defined functions.
47  *
48  * this function creates and allocates a guaranteed size, preallocated
49  * memory pool. The pool can be used from the mempool_alloc() and mempool_free()
50  * functions. This function might sleep. Both the alloc_fn() and the free_fn()
51  * functions might sleep - as long as the mempool_alloc() function is not called
52  * from IRQ contexts.
53  */
mempool_create(int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data)54 mempool_t *mempool_create(int min_nr, mempool_alloc_t *alloc_fn,
55 				mempool_free_t *free_fn, void *pool_data)
56 {
57 	return  mempool_create_node(min_nr,alloc_fn,free_fn, pool_data,-1);
58 }
59 EXPORT_SYMBOL(mempool_create);
60 
mempool_create_node(int min_nr,mempool_alloc_t * alloc_fn,mempool_free_t * free_fn,void * pool_data,int node_id)61 mempool_t *mempool_create_node(int min_nr, mempool_alloc_t *alloc_fn,
62 			mempool_free_t *free_fn, void *pool_data, int node_id)
63 {
64 	mempool_t *pool;
65 	pool = kmalloc_node(sizeof(*pool), GFP_KERNEL | __GFP_ZERO, node_id);
66 	if (!pool)
67 		return NULL;
68 	pool->elements = kmalloc_node(min_nr * sizeof(void *),
69 					GFP_KERNEL, node_id);
70 	if (!pool->elements) {
71 		kfree(pool);
72 		return NULL;
73 	}
74 	spin_lock_init(&pool->lock);
75 	pool->min_nr = min_nr;
76 	pool->pool_data = pool_data;
77 	init_waitqueue_head(&pool->wait);
78 	pool->alloc = alloc_fn;
79 	pool->free = free_fn;
80 
81 	/*
82 	 * First pre-allocate the guaranteed number of buffers.
83 	 */
84 	while (pool->curr_nr < pool->min_nr) {
85 		void *element;
86 
87 		element = pool->alloc(GFP_KERNEL, pool->pool_data);
88 		if (unlikely(!element)) {
89 			free_pool(pool);
90 			return NULL;
91 		}
92 		add_element(pool, element);
93 	}
94 	return pool;
95 }
96 EXPORT_SYMBOL(mempool_create_node);
97 
98 /**
99  * mempool_resize - resize an existing memory pool
100  * @pool:       pointer to the memory pool which was allocated via
101  *              mempool_create().
102  * @new_min_nr: the new minimum number of elements guaranteed to be
103  *              allocated for this pool.
104  * @gfp_mask:   the usual allocation bitmask.
105  *
106  * This function shrinks/grows the pool. In the case of growing,
107  * it cannot be guaranteed that the pool will be grown to the new
108  * size immediately, but new mempool_free() calls will refill it.
109  *
110  * Note, the caller must guarantee that no mempool_destroy is called
111  * while this function is running. mempool_alloc() & mempool_free()
112  * might be called (eg. from IRQ contexts) while this function executes.
113  */
mempool_resize(mempool_t * pool,int new_min_nr,gfp_t gfp_mask)114 int mempool_resize(mempool_t *pool, int new_min_nr, gfp_t gfp_mask)
115 {
116 	void *element;
117 	void **new_elements;
118 	unsigned long flags;
119 
120 	BUG_ON(new_min_nr <= 0);
121 
122 	spin_lock_irqsave(&pool->lock, flags);
123 	if (new_min_nr <= pool->min_nr) {
124 		while (new_min_nr < pool->curr_nr) {
125 			element = remove_element(pool);
126 			spin_unlock_irqrestore(&pool->lock, flags);
127 			pool->free(element, pool->pool_data);
128 			spin_lock_irqsave(&pool->lock, flags);
129 		}
130 		pool->min_nr = new_min_nr;
131 		goto out_unlock;
132 	}
133 	spin_unlock_irqrestore(&pool->lock, flags);
134 
135 	/* Grow the pool */
136 	new_elements = kmalloc(new_min_nr * sizeof(*new_elements), gfp_mask);
137 	if (!new_elements)
138 		return -ENOMEM;
139 
140 	spin_lock_irqsave(&pool->lock, flags);
141 	if (unlikely(new_min_nr <= pool->min_nr)) {
142 		/* Raced, other resize will do our work */
143 		spin_unlock_irqrestore(&pool->lock, flags);
144 		kfree(new_elements);
145 		goto out;
146 	}
147 	memcpy(new_elements, pool->elements,
148 			pool->curr_nr * sizeof(*new_elements));
149 	kfree(pool->elements);
150 	pool->elements = new_elements;
151 	pool->min_nr = new_min_nr;
152 
153 	while (pool->curr_nr < pool->min_nr) {
154 		spin_unlock_irqrestore(&pool->lock, flags);
155 		element = pool->alloc(gfp_mask, pool->pool_data);
156 		if (!element)
157 			goto out;
158 		spin_lock_irqsave(&pool->lock, flags);
159 		if (pool->curr_nr < pool->min_nr) {
160 			add_element(pool, element);
161 		} else {
162 			spin_unlock_irqrestore(&pool->lock, flags);
163 			pool->free(element, pool->pool_data);	/* Raced */
164 			goto out;
165 		}
166 	}
167 out_unlock:
168 	spin_unlock_irqrestore(&pool->lock, flags);
169 out:
170 	return 0;
171 }
172 EXPORT_SYMBOL(mempool_resize);
173 
174 /**
175  * mempool_destroy - deallocate a memory pool
176  * @pool:      pointer to the memory pool which was allocated via
177  *             mempool_create().
178  *
179  * this function only sleeps if the free_fn() function sleeps. The caller
180  * has to guarantee that all elements have been returned to the pool (ie:
181  * freed) prior to calling mempool_destroy().
182  */
mempool_destroy(mempool_t * pool)183 void mempool_destroy(mempool_t *pool)
184 {
185 	/* Check for outstanding elements */
186 	BUG_ON(pool->curr_nr != pool->min_nr);
187 	free_pool(pool);
188 }
189 EXPORT_SYMBOL(mempool_destroy);
190 
191 /**
192  * mempool_alloc - allocate an element from a specific memory pool
193  * @pool:      pointer to the memory pool which was allocated via
194  *             mempool_create().
195  * @gfp_mask:  the usual allocation bitmask.
196  *
197  * this function only sleeps if the alloc_fn() function sleeps or
198  * returns NULL. Note that due to preallocation, this function
199  * *never* fails when called from process contexts. (it might
200  * fail if called from an IRQ context.)
201  */
mempool_alloc(mempool_t * pool,gfp_t gfp_mask)202 void * mempool_alloc(mempool_t *pool, gfp_t gfp_mask)
203 {
204 	void *element;
205 	unsigned long flags;
206 	wait_queue_t wait;
207 	gfp_t gfp_temp;
208 
209 	might_sleep_if(gfp_mask & __GFP_WAIT);
210 
211 	gfp_mask |= __GFP_NOMEMALLOC;	/* don't allocate emergency reserves */
212 	gfp_mask |= __GFP_NORETRY;	/* don't loop in __alloc_pages */
213 	gfp_mask |= __GFP_NOWARN;	/* failures are OK */
214 
215 	gfp_temp = gfp_mask & ~(__GFP_WAIT|__GFP_IO);
216 
217 repeat_alloc:
218 
219 	element = pool->alloc(gfp_temp, pool->pool_data);
220 	if (likely(element != NULL))
221 		return element;
222 
223 	spin_lock_irqsave(&pool->lock, flags);
224 	if (likely(pool->curr_nr)) {
225 		element = remove_element(pool);
226 		spin_unlock_irqrestore(&pool->lock, flags);
227 		return element;
228 	}
229 	spin_unlock_irqrestore(&pool->lock, flags);
230 
231 	/* We must not sleep in the GFP_ATOMIC case */
232 	if (!(gfp_mask & __GFP_WAIT))
233 		return NULL;
234 
235 	/* Now start performing page reclaim */
236 	gfp_temp = gfp_mask;
237 	init_wait(&wait);
238 	prepare_to_wait(&pool->wait, &wait, TASK_UNINTERRUPTIBLE);
239 	smp_mb();
240 	if (!pool->curr_nr) {
241 		/*
242 		 * FIXME: this should be io_schedule().  The timeout is there
243 		 * as a workaround for some DM problems in 2.6.18.
244 		 */
245 		io_schedule_timeout(5*HZ);
246 	}
247 	finish_wait(&pool->wait, &wait);
248 
249 	goto repeat_alloc;
250 }
251 EXPORT_SYMBOL(mempool_alloc);
252 
253 /**
254  * mempool_free - return an element to the pool.
255  * @element:   pool element pointer.
256  * @pool:      pointer to the memory pool which was allocated via
257  *             mempool_create().
258  *
259  * this function only sleeps if the free_fn() function sleeps.
260  */
mempool_free(void * element,mempool_t * pool)261 void mempool_free(void *element, mempool_t *pool)
262 {
263 	unsigned long flags;
264 
265 	if (unlikely(element == NULL))
266 		return;
267 
268 	smp_mb();
269 	if (pool->curr_nr < pool->min_nr) {
270 		spin_lock_irqsave(&pool->lock, flags);
271 		if (pool->curr_nr < pool->min_nr) {
272 			add_element(pool, element);
273 			spin_unlock_irqrestore(&pool->lock, flags);
274 			wake_up(&pool->wait);
275 			return;
276 		}
277 		spin_unlock_irqrestore(&pool->lock, flags);
278 	}
279 	pool->free(element, pool->pool_data);
280 }
281 EXPORT_SYMBOL(mempool_free);
282 
283 /*
284  * A commonly used alloc and free fn.
285  */
mempool_alloc_slab(gfp_t gfp_mask,void * pool_data)286 void *mempool_alloc_slab(gfp_t gfp_mask, void *pool_data)
287 {
288 	struct kmem_cache *mem = pool_data;
289 	return kmem_cache_alloc(mem, gfp_mask);
290 }
291 EXPORT_SYMBOL(mempool_alloc_slab);
292 
mempool_free_slab(void * element,void * pool_data)293 void mempool_free_slab(void *element, void *pool_data)
294 {
295 	struct kmem_cache *mem = pool_data;
296 	kmem_cache_free(mem, element);
297 }
298 EXPORT_SYMBOL(mempool_free_slab);
299 
300 /*
301  * A commonly used alloc and free fn that kmalloc/kfrees the amount of memory
302  * specified by pool_data
303  */
mempool_kmalloc(gfp_t gfp_mask,void * pool_data)304 void *mempool_kmalloc(gfp_t gfp_mask, void *pool_data)
305 {
306 	size_t size = (size_t)(long)pool_data;
307 	return kmalloc(size, gfp_mask);
308 }
309 EXPORT_SYMBOL(mempool_kmalloc);
310 
mempool_kzalloc(gfp_t gfp_mask,void * pool_data)311 void *mempool_kzalloc(gfp_t gfp_mask, void *pool_data)
312 {
313 	size_t size = (size_t) pool_data;
314 	return kzalloc(size, gfp_mask);
315 }
316 EXPORT_SYMBOL(mempool_kzalloc);
317 
mempool_kfree(void * element,void * pool_data)318 void mempool_kfree(void *element, void *pool_data)
319 {
320 	kfree(element);
321 }
322 EXPORT_SYMBOL(mempool_kfree);
323 
324 /*
325  * A simple mempool-backed page allocator that allocates pages
326  * of the order specified by pool_data.
327  */
mempool_alloc_pages(gfp_t gfp_mask,void * pool_data)328 void *mempool_alloc_pages(gfp_t gfp_mask, void *pool_data)
329 {
330 	int order = (int)(long)pool_data;
331 	return alloc_pages(gfp_mask, order);
332 }
333 EXPORT_SYMBOL(mempool_alloc_pages);
334 
mempool_free_pages(void * element,void * pool_data)335 void mempool_free_pages(void *element, void *pool_data)
336 {
337 	int order = (int)(long)pool_data;
338 	__free_pages(element, order);
339 }
340 EXPORT_SYMBOL(mempool_free_pages);
341