• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Basic general purpose allocator for managing special purpose
3  * memory, for example, memory that is not managed by the regular
4  * kmalloc/kfree interface.  Uses for this includes on-device special
5  * memory, uncached memory etc.
6  *
7  * It is safe to use the allocator in NMI handlers and other special
8  * unblockable contexts that could otherwise deadlock on locks.  This
9  * is implemented by using atomic operations and retries on any
10  * conflicts.  The disadvantage is that there may be livelocks in
11  * extreme cases.  For better scalability, one allocator can be used
12  * for each CPU.
13  *
14  * The lockless operation only works if there is enough memory
15  * available.  If new memory is added to the pool a lock has to be
16  * still taken.  So any user relying on locklessness has to ensure
17  * that sufficient memory is preallocated.
18  *
19  * The basic atomic operation of this allocator is cmpxchg on long.
20  * On architectures that don't have NMI-safe cmpxchg implementation,
21  * the allocator can NOT be used in NMI handler.  So code uses the
22  * allocator in NMI handler should depend on
23  * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
24  *
25  * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26  *
27  * This source code is licensed under the GNU General Public License,
28  * Version 2.  See the file COPYING for more details.
29  */
30 
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 #include <linux/of_device.h>
38 #include <linux/vmalloc.h>
39 
chunk_size(const struct gen_pool_chunk * chunk)40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
41 {
42 	return chunk->end_addr - chunk->start_addr + 1;
43 }
44 
set_bits_ll(unsigned long * addr,unsigned long mask_to_set)45 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
46 {
47 	unsigned long val, nval;
48 
49 	nval = *addr;
50 	do {
51 		val = nval;
52 		if (val & mask_to_set)
53 			return -EBUSY;
54 		cpu_relax();
55 	} while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
56 
57 	return 0;
58 }
59 
clear_bits_ll(unsigned long * addr,unsigned long mask_to_clear)60 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
61 {
62 	unsigned long val, nval;
63 
64 	nval = *addr;
65 	do {
66 		val = nval;
67 		if ((val & mask_to_clear) != mask_to_clear)
68 			return -EBUSY;
69 		cpu_relax();
70 	} while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
71 
72 	return 0;
73 }
74 
75 /*
76  * bitmap_set_ll - set the specified number of bits at the specified position
77  * @map: pointer to a bitmap
78  * @start: a bit position in @map
79  * @nr: number of bits to set
80  *
81  * Set @nr bits start from @start in @map lock-lessly. Several users
82  * can set/clear the same bitmap simultaneously without lock. If two
83  * users set the same bit, one user will return remain bits, otherwise
84  * return 0.
85  */
bitmap_set_ll(unsigned long * map,unsigned long start,unsigned long nr)86 static int bitmap_set_ll(unsigned long *map, unsigned long start, unsigned long nr)
87 {
88 	unsigned long *p = map + BIT_WORD(start);
89 	const unsigned long size = start + nr;
90 	int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
91 	unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
92 
93 	while (nr >= bits_to_set) {
94 		if (set_bits_ll(p, mask_to_set))
95 			return nr;
96 		nr -= bits_to_set;
97 		bits_to_set = BITS_PER_LONG;
98 		mask_to_set = ~0UL;
99 		p++;
100 	}
101 	if (nr) {
102 		mask_to_set &= BITMAP_LAST_WORD_MASK(size);
103 		if (set_bits_ll(p, mask_to_set))
104 			return nr;
105 	}
106 
107 	return 0;
108 }
109 
110 /*
111  * bitmap_clear_ll - clear the specified number of bits at the specified position
112  * @map: pointer to a bitmap
113  * @start: a bit position in @map
114  * @nr: number of bits to set
115  *
116  * Clear @nr bits start from @start in @map lock-lessly. Several users
117  * can set/clear the same bitmap simultaneously without lock. If two
118  * users clear the same bit, one user will return remain bits,
119  * otherwise return 0.
120  */
121 static unsigned long
bitmap_clear_ll(unsigned long * map,unsigned long start,unsigned long nr)122 bitmap_clear_ll(unsigned long *map, unsigned long start, unsigned long nr)
123 {
124 	unsigned long *p = map + BIT_WORD(start);
125 	const unsigned long size = start + nr;
126 	int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
127 	unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
128 
129 	while (nr >= bits_to_clear) {
130 		if (clear_bits_ll(p, mask_to_clear))
131 			return nr;
132 		nr -= bits_to_clear;
133 		bits_to_clear = BITS_PER_LONG;
134 		mask_to_clear = ~0UL;
135 		p++;
136 	}
137 	if (nr) {
138 		mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
139 		if (clear_bits_ll(p, mask_to_clear))
140 			return nr;
141 	}
142 
143 	return 0;
144 }
145 
146 /**
147  * gen_pool_create - create a new special memory pool
148  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
149  * @nid: node id of the node the pool structure should be allocated on, or -1
150  *
151  * Create a new special memory pool that can be used to manage special purpose
152  * memory not managed by the regular kmalloc/kfree interface.
153  */
gen_pool_create(int min_alloc_order,int nid)154 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
155 {
156 	struct gen_pool *pool;
157 
158 	pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
159 	if (pool != NULL) {
160 		spin_lock_init(&pool->lock);
161 		INIT_LIST_HEAD(&pool->chunks);
162 		pool->min_alloc_order = min_alloc_order;
163 		pool->algo = gen_pool_first_fit;
164 		pool->data = NULL;
165 		pool->name = NULL;
166 	}
167 	return pool;
168 }
169 EXPORT_SYMBOL(gen_pool_create);
170 
171 /**
172  * gen_pool_add_virt - add a new chunk of special memory to the pool
173  * @pool: pool to add new memory chunk to
174  * @virt: virtual starting address of memory chunk to add to pool
175  * @phys: physical starting address of memory chunk to add to pool
176  * @size: size in bytes of the memory chunk to add to pool
177  * @nid: node id of the node the chunk structure and bitmap should be
178  *       allocated on, or -1
179  *
180  * Add a new chunk of special memory to the specified pool.
181  *
182  * Returns 0 on success or a -ve errno on failure.
183  */
gen_pool_add_virt(struct gen_pool * pool,unsigned long virt,phys_addr_t phys,size_t size,int nid)184 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
185 		 size_t size, int nid)
186 {
187 	struct gen_pool_chunk *chunk;
188 	unsigned long nbits = size >> pool->min_alloc_order;
189 	unsigned long nbytes = sizeof(struct gen_pool_chunk) +
190 				BITS_TO_LONGS(nbits) * sizeof(long);
191 
192 	chunk = vzalloc_node(nbytes, nid);
193 	if (unlikely(chunk == NULL))
194 		return -ENOMEM;
195 
196 	chunk->phys_addr = phys;
197 	chunk->start_addr = virt;
198 	chunk->end_addr = virt + size - 1;
199 	atomic_long_set(&chunk->avail, size);
200 
201 	spin_lock(&pool->lock);
202 	list_add_rcu(&chunk->next_chunk, &pool->chunks);
203 	spin_unlock(&pool->lock);
204 
205 	return 0;
206 }
207 EXPORT_SYMBOL(gen_pool_add_virt);
208 
209 /**
210  * gen_pool_virt_to_phys - return the physical address of memory
211  * @pool: pool to allocate from
212  * @addr: starting address of memory
213  *
214  * Returns the physical address on success, or -1 on error.
215  */
gen_pool_virt_to_phys(struct gen_pool * pool,unsigned long addr)216 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
217 {
218 	struct gen_pool_chunk *chunk;
219 	phys_addr_t paddr = -1;
220 
221 	rcu_read_lock();
222 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
223 		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
224 			paddr = chunk->phys_addr + (addr - chunk->start_addr);
225 			break;
226 		}
227 	}
228 	rcu_read_unlock();
229 
230 	return paddr;
231 }
232 EXPORT_SYMBOL(gen_pool_virt_to_phys);
233 
234 /**
235  * gen_pool_destroy - destroy a special memory pool
236  * @pool: pool to destroy
237  *
238  * Destroy the specified special memory pool. Verifies that there are no
239  * outstanding allocations.
240  */
gen_pool_destroy(struct gen_pool * pool)241 void gen_pool_destroy(struct gen_pool *pool)
242 {
243 	struct list_head *_chunk, *_next_chunk;
244 	struct gen_pool_chunk *chunk;
245 	int order = pool->min_alloc_order;
246 	unsigned long bit, end_bit;
247 
248 	list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
249 		chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
250 		list_del(&chunk->next_chunk);
251 
252 		end_bit = chunk_size(chunk) >> order;
253 		bit = find_next_bit(chunk->bits, end_bit, 0);
254 		BUG_ON(bit < end_bit);
255 
256 		vfree(chunk);
257 	}
258 	kfree_const(pool->name);
259 	kfree(pool);
260 }
261 EXPORT_SYMBOL(gen_pool_destroy);
262 
263 /**
264  * gen_pool_alloc - allocate special memory from the pool
265  * @pool: pool to allocate from
266  * @size: number of bytes to allocate from the pool
267  *
268  * Allocate the requested number of bytes from the specified pool.
269  * Uses the pool allocation function (with first-fit algorithm by default).
270  * Can not be used in NMI handler on architectures without
271  * NMI-safe cmpxchg implementation.
272  */
gen_pool_alloc(struct gen_pool * pool,size_t size)273 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
274 {
275 	struct gen_pool_chunk *chunk;
276 	unsigned long addr = 0;
277 	int order = pool->min_alloc_order;
278 	unsigned long nbits, start_bit, end_bit, remain;
279 
280 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
281 	BUG_ON(in_nmi());
282 #endif
283 
284 	if (size == 0)
285 		return 0;
286 
287 	nbits = (size + (1UL << order) - 1) >> order;
288 	rcu_read_lock();
289 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
290 		if (size > atomic_long_read(&chunk->avail))
291 			continue;
292 
293 		start_bit = 0;
294 		end_bit = chunk_size(chunk) >> order;
295 retry:
296 		start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
297 				pool->data);
298 		if (start_bit >= end_bit)
299 			continue;
300 		remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
301 		if (remain) {
302 			remain = bitmap_clear_ll(chunk->bits, start_bit,
303 						 nbits - remain);
304 			BUG_ON(remain);
305 			goto retry;
306 		}
307 
308 		addr = chunk->start_addr + ((unsigned long)start_bit << order);
309 		size = nbits << order;
310 		atomic_long_sub(size, &chunk->avail);
311 		break;
312 	}
313 	rcu_read_unlock();
314 	return addr;
315 }
316 EXPORT_SYMBOL(gen_pool_alloc);
317 
318 /**
319  * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
320  * @pool: pool to allocate from
321  * @size: number of bytes to allocate from the pool
322  * @dma: dma-view physical address return value.  Use NULL if unneeded.
323  *
324  * Allocate the requested number of bytes from the specified pool.
325  * Uses the pool allocation function (with first-fit algorithm by default).
326  * Can not be used in NMI handler on architectures without
327  * NMI-safe cmpxchg implementation.
328  */
gen_pool_dma_alloc(struct gen_pool * pool,size_t size,dma_addr_t * dma)329 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
330 {
331 	unsigned long vaddr;
332 
333 	if (!pool)
334 		return NULL;
335 
336 	vaddr = gen_pool_alloc(pool, size);
337 	if (!vaddr)
338 		return NULL;
339 
340 	if (dma)
341 		*dma = gen_pool_virt_to_phys(pool, vaddr);
342 
343 	return (void *)vaddr;
344 }
345 EXPORT_SYMBOL(gen_pool_dma_alloc);
346 
347 /**
348  * gen_pool_free - free allocated special memory back to the pool
349  * @pool: pool to free to
350  * @addr: starting address of memory to free back to pool
351  * @size: size in bytes of memory to free
352  *
353  * Free previously allocated special memory back to the specified
354  * pool.  Can not be used in NMI handler on architectures without
355  * NMI-safe cmpxchg implementation.
356  */
gen_pool_free(struct gen_pool * pool,unsigned long addr,size_t size)357 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
358 {
359 	struct gen_pool_chunk *chunk;
360 	int order = pool->min_alloc_order;
361 	unsigned long start_bit, nbits, remain;
362 
363 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
364 	BUG_ON(in_nmi());
365 #endif
366 
367 	nbits = (size + (1UL << order) - 1) >> order;
368 	rcu_read_lock();
369 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
370 		if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
371 			BUG_ON(addr + size - 1 > chunk->end_addr);
372 			start_bit = (addr - chunk->start_addr) >> order;
373 			remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
374 			BUG_ON(remain);
375 			size = nbits << order;
376 			atomic_long_add(size, &chunk->avail);
377 			rcu_read_unlock();
378 			return;
379 		}
380 	}
381 	rcu_read_unlock();
382 	BUG();
383 }
384 EXPORT_SYMBOL(gen_pool_free);
385 
386 /**
387  * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
388  * @pool:	the generic memory pool
389  * @func:	func to call
390  * @data:	additional data used by @func
391  *
392  * Call @func for every chunk of generic memory pool.  The @func is
393  * called with rcu_read_lock held.
394  */
gen_pool_for_each_chunk(struct gen_pool * pool,void (* func)(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data),void * data)395 void gen_pool_for_each_chunk(struct gen_pool *pool,
396 	void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
397 	void *data)
398 {
399 	struct gen_pool_chunk *chunk;
400 
401 	rcu_read_lock();
402 	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
403 		func(pool, chunk, data);
404 	rcu_read_unlock();
405 }
406 EXPORT_SYMBOL(gen_pool_for_each_chunk);
407 
408 /**
409  * addr_in_gen_pool - checks if an address falls within the range of a pool
410  * @pool:	the generic memory pool
411  * @start:	start address
412  * @size:	size of the region
413  *
414  * Check if the range of addresses falls within the specified pool. Returns
415  * true if the entire range is contained in the pool and false otherwise.
416  */
addr_in_gen_pool(struct gen_pool * pool,unsigned long start,size_t size)417 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
418 			size_t size)
419 {
420 	bool found = false;
421 	unsigned long end = start + size - 1;
422 	struct gen_pool_chunk *chunk;
423 
424 	rcu_read_lock();
425 	list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
426 		if (start >= chunk->start_addr && start <= chunk->end_addr) {
427 			if (end <= chunk->end_addr) {
428 				found = true;
429 				break;
430 			}
431 		}
432 	}
433 	rcu_read_unlock();
434 	return found;
435 }
436 
437 /**
438  * gen_pool_avail - get available free space of the pool
439  * @pool: pool to get available free space
440  *
441  * Return available free space of the specified pool.
442  */
gen_pool_avail(struct gen_pool * pool)443 size_t gen_pool_avail(struct gen_pool *pool)
444 {
445 	struct gen_pool_chunk *chunk;
446 	size_t avail = 0;
447 
448 	rcu_read_lock();
449 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
450 		avail += atomic_long_read(&chunk->avail);
451 	rcu_read_unlock();
452 	return avail;
453 }
454 EXPORT_SYMBOL_GPL(gen_pool_avail);
455 
456 /**
457  * gen_pool_size - get size in bytes of memory managed by the pool
458  * @pool: pool to get size
459  *
460  * Return size in bytes of memory managed by the pool.
461  */
gen_pool_size(struct gen_pool * pool)462 size_t gen_pool_size(struct gen_pool *pool)
463 {
464 	struct gen_pool_chunk *chunk;
465 	size_t size = 0;
466 
467 	rcu_read_lock();
468 	list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
469 		size += chunk_size(chunk);
470 	rcu_read_unlock();
471 	return size;
472 }
473 EXPORT_SYMBOL_GPL(gen_pool_size);
474 
475 /**
476  * gen_pool_set_algo - set the allocation algorithm
477  * @pool: pool to change allocation algorithm
478  * @algo: custom algorithm function
479  * @data: additional data used by @algo
480  *
481  * Call @algo for each memory allocation in the pool.
482  * If @algo is NULL use gen_pool_first_fit as default
483  * memory allocation function.
484  */
gen_pool_set_algo(struct gen_pool * pool,genpool_algo_t algo,void * data)485 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
486 {
487 	rcu_read_lock();
488 
489 	pool->algo = algo;
490 	if (!pool->algo)
491 		pool->algo = gen_pool_first_fit;
492 
493 	pool->data = data;
494 
495 	rcu_read_unlock();
496 }
497 EXPORT_SYMBOL(gen_pool_set_algo);
498 
499 /**
500  * gen_pool_first_fit - find the first available region
501  * of memory matching the size requirement (no alignment constraint)
502  * @map: The address to base the search on
503  * @size: The bitmap size in bits
504  * @start: The bitnumber to start searching at
505  * @nr: The number of zeroed bits we're looking for
506  * @data: additional data - unused
507  */
gen_pool_first_fit(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data)508 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
509 		unsigned long start, unsigned int nr, void *data)
510 {
511 	return bitmap_find_next_zero_area(map, size, start, nr, 0);
512 }
513 EXPORT_SYMBOL(gen_pool_first_fit);
514 
515 /**
516  * gen_pool_first_fit_order_align - find the first available region
517  * of memory matching the size requirement. The region will be aligned
518  * to the order of the size specified.
519  * @map: The address to base the search on
520  * @size: The bitmap size in bits
521  * @start: The bitnumber to start searching at
522  * @nr: The number of zeroed bits we're looking for
523  * @data: additional data - unused
524  */
gen_pool_first_fit_order_align(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data)525 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
526 		unsigned long size, unsigned long start,
527 		unsigned int nr, void *data)
528 {
529 	unsigned long align_mask = roundup_pow_of_two(nr) - 1;
530 
531 	return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
532 }
533 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
534 
535 /**
536  * gen_pool_best_fit - find the best fitting region of memory
537  * macthing the size requirement (no alignment constraint)
538  * @map: The address to base the search on
539  * @size: The bitmap size in bits
540  * @start: The bitnumber to start searching at
541  * @nr: The number of zeroed bits we're looking for
542  * @data: additional data - unused
543  *
544  * Iterate over the bitmap to find the smallest free region
545  * which we can allocate the memory.
546  */
gen_pool_best_fit(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data)547 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
548 		unsigned long start, unsigned int nr, void *data)
549 {
550 	unsigned long start_bit = size;
551 	unsigned long len = size + 1;
552 	unsigned long index;
553 
554 	index = bitmap_find_next_zero_area(map, size, start, nr, 0);
555 
556 	while (index < size) {
557 		unsigned long next_bit = find_next_bit(map, size, index + nr);
558 		if ((next_bit - index) < len) {
559 			len = next_bit - index;
560 			start_bit = index;
561 			if (len == nr)
562 				return start_bit;
563 		}
564 		index = bitmap_find_next_zero_area(map, size,
565 						   next_bit + 1, nr, 0);
566 	}
567 
568 	return start_bit;
569 }
570 EXPORT_SYMBOL(gen_pool_best_fit);
571 
devm_gen_pool_release(struct device * dev,void * res)572 static void devm_gen_pool_release(struct device *dev, void *res)
573 {
574 	gen_pool_destroy(*(struct gen_pool **)res);
575 }
576 
devm_gen_pool_match(struct device * dev,void * res,void * data)577 static int devm_gen_pool_match(struct device *dev, void *res, void *data)
578 {
579 	struct gen_pool **p = res;
580 
581 	/* NULL data matches only a pool without an assigned name */
582 	if (!data && !(*p)->name)
583 		return 1;
584 
585 	if (!data || !(*p)->name)
586 		return 0;
587 
588 	return !strcmp((*p)->name, data);
589 }
590 
591 /**
592  * gen_pool_get - Obtain the gen_pool (if any) for a device
593  * @dev: device to retrieve the gen_pool from
594  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
595  *
596  * Returns the gen_pool for the device if one is present, or NULL.
597  */
gen_pool_get(struct device * dev,const char * name)598 struct gen_pool *gen_pool_get(struct device *dev, const char *name)
599 {
600 	struct gen_pool **p;
601 
602 	p = devres_find(dev, devm_gen_pool_release, devm_gen_pool_match,
603 			(void *)name);
604 	if (!p)
605 		return NULL;
606 	return *p;
607 }
608 EXPORT_SYMBOL_GPL(gen_pool_get);
609 
610 /**
611  * devm_gen_pool_create - managed gen_pool_create
612  * @dev: device that provides the gen_pool
613  * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
614  * @nid: node selector for allocated gen_pool, %NUMA_NO_NODE for all nodes
615  * @name: name of a gen_pool or NULL, identifies a particular gen_pool on device
616  *
617  * Create a new special memory pool that can be used to manage special purpose
618  * memory not managed by the regular kmalloc/kfree interface. The pool will be
619  * automatically destroyed by the device management code.
620  */
devm_gen_pool_create(struct device * dev,int min_alloc_order,int nid,const char * name)621 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
622 				      int nid, const char *name)
623 {
624 	struct gen_pool **ptr, *pool;
625 	const char *pool_name = NULL;
626 
627 	/* Check that genpool to be created is uniquely addressed on device */
628 	if (gen_pool_get(dev, name))
629 		return ERR_PTR(-EINVAL);
630 
631 	if (name) {
632 		pool_name = kstrdup_const(name, GFP_KERNEL);
633 		if (!pool_name)
634 			return ERR_PTR(-ENOMEM);
635 	}
636 
637 	ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
638 	if (!ptr)
639 		goto free_pool_name;
640 
641 	pool = gen_pool_create(min_alloc_order, nid);
642 	if (!pool)
643 		goto free_devres;
644 
645 	*ptr = pool;
646 	pool->name = pool_name;
647 	devres_add(dev, ptr);
648 
649 	return pool;
650 
651 free_devres:
652 	devres_free(ptr);
653 free_pool_name:
654 	kfree_const(pool_name);
655 
656 	return ERR_PTR(-ENOMEM);
657 }
658 EXPORT_SYMBOL(devm_gen_pool_create);
659 
660 #ifdef CONFIG_OF
661 /**
662  * of_gen_pool_get - find a pool by phandle property
663  * @np: device node
664  * @propname: property name containing phandle(s)
665  * @index: index into the phandle array
666  *
667  * Returns the pool that contains the chunk starting at the physical
668  * address of the device tree node pointed at by the phandle property,
669  * or NULL if not found.
670  */
of_gen_pool_get(struct device_node * np,const char * propname,int index)671 struct gen_pool *of_gen_pool_get(struct device_node *np,
672 	const char *propname, int index)
673 {
674 	struct platform_device *pdev;
675 	struct device_node *np_pool, *parent;
676 	const char *name = NULL;
677 	struct gen_pool *pool = NULL;
678 
679 	np_pool = of_parse_phandle(np, propname, index);
680 	if (!np_pool)
681 		return NULL;
682 
683 	pdev = of_find_device_by_node(np_pool);
684 	if (!pdev) {
685 		/* Check if named gen_pool is created by parent node device */
686 		parent = of_get_parent(np_pool);
687 		pdev = of_find_device_by_node(parent);
688 		of_node_put(parent);
689 
690 		of_property_read_string(np_pool, "label", &name);
691 		if (!name)
692 			name = np_pool->name;
693 	}
694 	if (pdev)
695 		pool = gen_pool_get(&pdev->dev, name);
696 	of_node_put(np_pool);
697 
698 	return pool;
699 }
700 EXPORT_SYMBOL_GPL(of_gen_pool_get);
701 #endif /* CONFIG_OF */
702