1 /*
2 * Basic general purpose allocator for managing special purpose
3 * memory, for example, memory that is not managed by the regular
4 * kmalloc/kfree interface. Uses for this includes on-device special
5 * memory, uncached memory etc.
6 *
7 * It is safe to use the allocator in NMI handlers and other special
8 * unblockable contexts that could otherwise deadlock on locks. This
9 * is implemented by using atomic operations and retries on any
10 * conflicts. The disadvantage is that there may be livelocks in
11 * extreme cases. For better scalability, one allocator can be used
12 * for each CPU.
13 *
14 * The lockless operation only works if there is enough memory
15 * available. If new memory is added to the pool a lock has to be
16 * still taken. So any user relying on locklessness has to ensure
17 * that sufficient memory is preallocated.
18 *
19 * The basic atomic operation of this allocator is cmpxchg on long.
20 * On architectures that don't have NMI-safe cmpxchg implementation,
21 * the allocator can NOT be used in NMI handler. So code uses the
22 * allocator in NMI handler should depend on
23 * CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG.
24 *
25 * Copyright 2005 (C) Jes Sorensen <jes@trained-monkey.org>
26 *
27 * This source code is licensed under the GNU General Public License,
28 * Version 2. See the file COPYING for more details.
29 */
30
31 #include <linux/slab.h>
32 #include <linux/export.h>
33 #include <linux/bitmap.h>
34 #include <linux/rculist.h>
35 #include <linux/interrupt.h>
36 #include <linux/genalloc.h>
37 #include <linux/of_address.h>
38 #include <linux/of_device.h>
39
chunk_size(const struct gen_pool_chunk * chunk)40 static inline size_t chunk_size(const struct gen_pool_chunk *chunk)
41 {
42 return chunk->end_addr - chunk->start_addr + 1;
43 }
44
set_bits_ll(unsigned long * addr,unsigned long mask_to_set)45 static int set_bits_ll(unsigned long *addr, unsigned long mask_to_set)
46 {
47 unsigned long val, nval;
48
49 nval = *addr;
50 do {
51 val = nval;
52 if (val & mask_to_set)
53 return -EBUSY;
54 cpu_relax();
55 } while ((nval = cmpxchg(addr, val, val | mask_to_set)) != val);
56
57 return 0;
58 }
59
clear_bits_ll(unsigned long * addr,unsigned long mask_to_clear)60 static int clear_bits_ll(unsigned long *addr, unsigned long mask_to_clear)
61 {
62 unsigned long val, nval;
63
64 nval = *addr;
65 do {
66 val = nval;
67 if ((val & mask_to_clear) != mask_to_clear)
68 return -EBUSY;
69 cpu_relax();
70 } while ((nval = cmpxchg(addr, val, val & ~mask_to_clear)) != val);
71
72 return 0;
73 }
74
75 /*
76 * bitmap_set_ll - set the specified number of bits at the specified position
77 * @map: pointer to a bitmap
78 * @start: a bit position in @map
79 * @nr: number of bits to set
80 *
81 * Set @nr bits start from @start in @map lock-lessly. Several users
82 * can set/clear the same bitmap simultaneously without lock. If two
83 * users set the same bit, one user will return remain bits, otherwise
84 * return 0.
85 */
bitmap_set_ll(unsigned long * map,int start,int nr)86 static int bitmap_set_ll(unsigned long *map, int start, int nr)
87 {
88 unsigned long *p = map + BIT_WORD(start);
89 const int size = start + nr;
90 int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
91 unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
92
93 while (nr - bits_to_set >= 0) {
94 if (set_bits_ll(p, mask_to_set))
95 return nr;
96 nr -= bits_to_set;
97 bits_to_set = BITS_PER_LONG;
98 mask_to_set = ~0UL;
99 p++;
100 }
101 if (nr) {
102 mask_to_set &= BITMAP_LAST_WORD_MASK(size);
103 if (set_bits_ll(p, mask_to_set))
104 return nr;
105 }
106
107 return 0;
108 }
109
110 /*
111 * bitmap_clear_ll - clear the specified number of bits at the specified position
112 * @map: pointer to a bitmap
113 * @start: a bit position in @map
114 * @nr: number of bits to set
115 *
116 * Clear @nr bits start from @start in @map lock-lessly. Several users
117 * can set/clear the same bitmap simultaneously without lock. If two
118 * users clear the same bit, one user will return remain bits,
119 * otherwise return 0.
120 */
bitmap_clear_ll(unsigned long * map,int start,int nr)121 static int bitmap_clear_ll(unsigned long *map, int start, int nr)
122 {
123 unsigned long *p = map + BIT_WORD(start);
124 const int size = start + nr;
125 int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
126 unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
127
128 while (nr - bits_to_clear >= 0) {
129 if (clear_bits_ll(p, mask_to_clear))
130 return nr;
131 nr -= bits_to_clear;
132 bits_to_clear = BITS_PER_LONG;
133 mask_to_clear = ~0UL;
134 p++;
135 }
136 if (nr) {
137 mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
138 if (clear_bits_ll(p, mask_to_clear))
139 return nr;
140 }
141
142 return 0;
143 }
144
145 /**
146 * gen_pool_create - create a new special memory pool
147 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
148 * @nid: node id of the node the pool structure should be allocated on, or -1
149 *
150 * Create a new special memory pool that can be used to manage special purpose
151 * memory not managed by the regular kmalloc/kfree interface.
152 */
gen_pool_create(int min_alloc_order,int nid)153 struct gen_pool *gen_pool_create(int min_alloc_order, int nid)
154 {
155 struct gen_pool *pool;
156
157 pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid);
158 if (pool != NULL) {
159 spin_lock_init(&pool->lock);
160 INIT_LIST_HEAD(&pool->chunks);
161 pool->min_alloc_order = min_alloc_order;
162 pool->algo = gen_pool_first_fit;
163 pool->data = NULL;
164 }
165 return pool;
166 }
167 EXPORT_SYMBOL(gen_pool_create);
168
169 /**
170 * gen_pool_add_virt - add a new chunk of special memory to the pool
171 * @pool: pool to add new memory chunk to
172 * @virt: virtual starting address of memory chunk to add to pool
173 * @phys: physical starting address of memory chunk to add to pool
174 * @size: size in bytes of the memory chunk to add to pool
175 * @nid: node id of the node the chunk structure and bitmap should be
176 * allocated on, or -1
177 *
178 * Add a new chunk of special memory to the specified pool.
179 *
180 * Returns 0 on success or a -ve errno on failure.
181 */
gen_pool_add_virt(struct gen_pool * pool,unsigned long virt,phys_addr_t phys,size_t size,int nid)182 int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys,
183 size_t size, int nid)
184 {
185 struct gen_pool_chunk *chunk;
186 int nbits = size >> pool->min_alloc_order;
187 int nbytes = sizeof(struct gen_pool_chunk) +
188 BITS_TO_LONGS(nbits) * sizeof(long);
189
190 chunk = kzalloc_node(nbytes, GFP_KERNEL, nid);
191 if (unlikely(chunk == NULL))
192 return -ENOMEM;
193
194 chunk->phys_addr = phys;
195 chunk->start_addr = virt;
196 chunk->end_addr = virt + size - 1;
197 atomic_long_set(&chunk->avail, size);
198
199 spin_lock(&pool->lock);
200 list_add_rcu(&chunk->next_chunk, &pool->chunks);
201 spin_unlock(&pool->lock);
202
203 return 0;
204 }
205 EXPORT_SYMBOL(gen_pool_add_virt);
206
207 /**
208 * gen_pool_virt_to_phys - return the physical address of memory
209 * @pool: pool to allocate from
210 * @addr: starting address of memory
211 *
212 * Returns the physical address on success, or -1 on error.
213 */
gen_pool_virt_to_phys(struct gen_pool * pool,unsigned long addr)214 phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr)
215 {
216 struct gen_pool_chunk *chunk;
217 phys_addr_t paddr = -1;
218
219 rcu_read_lock();
220 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
221 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
222 paddr = chunk->phys_addr + (addr - chunk->start_addr);
223 break;
224 }
225 }
226 rcu_read_unlock();
227
228 return paddr;
229 }
230 EXPORT_SYMBOL(gen_pool_virt_to_phys);
231
232 /**
233 * gen_pool_destroy - destroy a special memory pool
234 * @pool: pool to destroy
235 *
236 * Destroy the specified special memory pool. Verifies that there are no
237 * outstanding allocations.
238 */
gen_pool_destroy(struct gen_pool * pool)239 void gen_pool_destroy(struct gen_pool *pool)
240 {
241 struct list_head *_chunk, *_next_chunk;
242 struct gen_pool_chunk *chunk;
243 int order = pool->min_alloc_order;
244 int bit, end_bit;
245
246 list_for_each_safe(_chunk, _next_chunk, &pool->chunks) {
247 chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk);
248 list_del(&chunk->next_chunk);
249
250 end_bit = chunk_size(chunk) >> order;
251 bit = find_next_bit(chunk->bits, end_bit, 0);
252 BUG_ON(bit < end_bit);
253
254 kfree(chunk);
255 }
256 kfree(pool);
257 return;
258 }
259 EXPORT_SYMBOL(gen_pool_destroy);
260
261 /**
262 * gen_pool_alloc - allocate special memory from the pool
263 * @pool: pool to allocate from
264 * @size: number of bytes to allocate from the pool
265 *
266 * Allocate the requested number of bytes from the specified pool.
267 * Uses the pool allocation function (with first-fit algorithm by default).
268 * Can not be used in NMI handler on architectures without
269 * NMI-safe cmpxchg implementation.
270 */
gen_pool_alloc(struct gen_pool * pool,size_t size)271 unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size)
272 {
273 struct gen_pool_chunk *chunk;
274 unsigned long addr = 0;
275 int order = pool->min_alloc_order;
276 int nbits, start_bit = 0, end_bit, remain;
277
278 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
279 BUG_ON(in_nmi());
280 #endif
281
282 if (size == 0)
283 return 0;
284
285 nbits = (size + (1UL << order) - 1) >> order;
286 rcu_read_lock();
287 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
288 if (size > atomic_long_read(&chunk->avail))
289 continue;
290
291 end_bit = chunk_size(chunk) >> order;
292 retry:
293 start_bit = pool->algo(chunk->bits, end_bit, start_bit, nbits,
294 pool->data);
295 if (start_bit >= end_bit)
296 continue;
297 remain = bitmap_set_ll(chunk->bits, start_bit, nbits);
298 if (remain) {
299 remain = bitmap_clear_ll(chunk->bits, start_bit,
300 nbits - remain);
301 BUG_ON(remain);
302 goto retry;
303 }
304
305 addr = chunk->start_addr + ((unsigned long)start_bit << order);
306 size = nbits << order;
307 atomic_long_sub(size, &chunk->avail);
308 break;
309 }
310 rcu_read_unlock();
311 return addr;
312 }
313 EXPORT_SYMBOL(gen_pool_alloc);
314
315 /**
316 * gen_pool_dma_alloc - allocate special memory from the pool for DMA usage
317 * @pool: pool to allocate from
318 * @size: number of bytes to allocate from the pool
319 * @dma: dma-view physical address return value. Use NULL if unneeded.
320 *
321 * Allocate the requested number of bytes from the specified pool.
322 * Uses the pool allocation function (with first-fit algorithm by default).
323 * Can not be used in NMI handler on architectures without
324 * NMI-safe cmpxchg implementation.
325 */
gen_pool_dma_alloc(struct gen_pool * pool,size_t size,dma_addr_t * dma)326 void *gen_pool_dma_alloc(struct gen_pool *pool, size_t size, dma_addr_t *dma)
327 {
328 unsigned long vaddr;
329
330 if (!pool)
331 return NULL;
332
333 vaddr = gen_pool_alloc(pool, size);
334 if (!vaddr)
335 return NULL;
336
337 if (dma)
338 *dma = gen_pool_virt_to_phys(pool, vaddr);
339
340 return (void *)vaddr;
341 }
342 EXPORT_SYMBOL(gen_pool_dma_alloc);
343
344 /**
345 * gen_pool_free - free allocated special memory back to the pool
346 * @pool: pool to free to
347 * @addr: starting address of memory to free back to pool
348 * @size: size in bytes of memory to free
349 *
350 * Free previously allocated special memory back to the specified
351 * pool. Can not be used in NMI handler on architectures without
352 * NMI-safe cmpxchg implementation.
353 */
gen_pool_free(struct gen_pool * pool,unsigned long addr,size_t size)354 void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size)
355 {
356 struct gen_pool_chunk *chunk;
357 int order = pool->min_alloc_order;
358 int start_bit, nbits, remain;
359
360 #ifndef CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG
361 BUG_ON(in_nmi());
362 #endif
363
364 nbits = (size + (1UL << order) - 1) >> order;
365 rcu_read_lock();
366 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk) {
367 if (addr >= chunk->start_addr && addr <= chunk->end_addr) {
368 BUG_ON(addr + size - 1 > chunk->end_addr);
369 start_bit = (addr - chunk->start_addr) >> order;
370 remain = bitmap_clear_ll(chunk->bits, start_bit, nbits);
371 BUG_ON(remain);
372 size = nbits << order;
373 atomic_long_add(size, &chunk->avail);
374 rcu_read_unlock();
375 return;
376 }
377 }
378 rcu_read_unlock();
379 BUG();
380 }
381 EXPORT_SYMBOL(gen_pool_free);
382
383 /**
384 * gen_pool_for_each_chunk - call func for every chunk of generic memory pool
385 * @pool: the generic memory pool
386 * @func: func to call
387 * @data: additional data used by @func
388 *
389 * Call @func for every chunk of generic memory pool. The @func is
390 * called with rcu_read_lock held.
391 */
gen_pool_for_each_chunk(struct gen_pool * pool,void (* func)(struct gen_pool * pool,struct gen_pool_chunk * chunk,void * data),void * data)392 void gen_pool_for_each_chunk(struct gen_pool *pool,
393 void (*func)(struct gen_pool *pool, struct gen_pool_chunk *chunk, void *data),
394 void *data)
395 {
396 struct gen_pool_chunk *chunk;
397
398 rcu_read_lock();
399 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk)
400 func(pool, chunk, data);
401 rcu_read_unlock();
402 }
403 EXPORT_SYMBOL(gen_pool_for_each_chunk);
404
405 /**
406 * addr_in_gen_pool - checks if an address falls within the range of a pool
407 * @pool: the generic memory pool
408 * @start: start address
409 * @size: size of the region
410 *
411 * Check if the range of addresses falls within the specified pool. Returns
412 * true if the entire range is contained in the pool and false otherwise.
413 */
addr_in_gen_pool(struct gen_pool * pool,unsigned long start,size_t size)414 bool addr_in_gen_pool(struct gen_pool *pool, unsigned long start,
415 size_t size)
416 {
417 bool found = false;
418 unsigned long end = start + size;
419 struct gen_pool_chunk *chunk;
420
421 rcu_read_lock();
422 list_for_each_entry_rcu(chunk, &(pool)->chunks, next_chunk) {
423 if (start >= chunk->start_addr && start <= chunk->end_addr) {
424 if (end <= chunk->end_addr) {
425 found = true;
426 break;
427 }
428 }
429 }
430 rcu_read_unlock();
431 return found;
432 }
433
434 /**
435 * gen_pool_avail - get available free space of the pool
436 * @pool: pool to get available free space
437 *
438 * Return available free space of the specified pool.
439 */
gen_pool_avail(struct gen_pool * pool)440 size_t gen_pool_avail(struct gen_pool *pool)
441 {
442 struct gen_pool_chunk *chunk;
443 size_t avail = 0;
444
445 rcu_read_lock();
446 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
447 avail += atomic_long_read(&chunk->avail);
448 rcu_read_unlock();
449 return avail;
450 }
451 EXPORT_SYMBOL_GPL(gen_pool_avail);
452
453 /**
454 * gen_pool_size - get size in bytes of memory managed by the pool
455 * @pool: pool to get size
456 *
457 * Return size in bytes of memory managed by the pool.
458 */
gen_pool_size(struct gen_pool * pool)459 size_t gen_pool_size(struct gen_pool *pool)
460 {
461 struct gen_pool_chunk *chunk;
462 size_t size = 0;
463
464 rcu_read_lock();
465 list_for_each_entry_rcu(chunk, &pool->chunks, next_chunk)
466 size += chunk_size(chunk);
467 rcu_read_unlock();
468 return size;
469 }
470 EXPORT_SYMBOL_GPL(gen_pool_size);
471
472 /**
473 * gen_pool_set_algo - set the allocation algorithm
474 * @pool: pool to change allocation algorithm
475 * @algo: custom algorithm function
476 * @data: additional data used by @algo
477 *
478 * Call @algo for each memory allocation in the pool.
479 * If @algo is NULL use gen_pool_first_fit as default
480 * memory allocation function.
481 */
gen_pool_set_algo(struct gen_pool * pool,genpool_algo_t algo,void * data)482 void gen_pool_set_algo(struct gen_pool *pool, genpool_algo_t algo, void *data)
483 {
484 rcu_read_lock();
485
486 pool->algo = algo;
487 if (!pool->algo)
488 pool->algo = gen_pool_first_fit;
489
490 pool->data = data;
491
492 rcu_read_unlock();
493 }
494 EXPORT_SYMBOL(gen_pool_set_algo);
495
496 /**
497 * gen_pool_first_fit - find the first available region
498 * of memory matching the size requirement (no alignment constraint)
499 * @map: The address to base the search on
500 * @size: The bitmap size in bits
501 * @start: The bitnumber to start searching at
502 * @nr: The number of zeroed bits we're looking for
503 * @data: additional data - unused
504 */
gen_pool_first_fit(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data)505 unsigned long gen_pool_first_fit(unsigned long *map, unsigned long size,
506 unsigned long start, unsigned int nr, void *data)
507 {
508 return bitmap_find_next_zero_area(map, size, start, nr, 0);
509 }
510 EXPORT_SYMBOL(gen_pool_first_fit);
511
512 /**
513 * gen_pool_first_fit_order_align - find the first available region
514 * of memory matching the size requirement. The region will be aligned
515 * to the order of the size specified.
516 * @map: The address to base the search on
517 * @size: The bitmap size in bits
518 * @start: The bitnumber to start searching at
519 * @nr: The number of zeroed bits we're looking for
520 * @data: additional data - unused
521 */
gen_pool_first_fit_order_align(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data)522 unsigned long gen_pool_first_fit_order_align(unsigned long *map,
523 unsigned long size, unsigned long start,
524 unsigned int nr, void *data)
525 {
526 unsigned long align_mask = roundup_pow_of_two(nr) - 1;
527
528 return bitmap_find_next_zero_area(map, size, start, nr, align_mask);
529 }
530 EXPORT_SYMBOL(gen_pool_first_fit_order_align);
531
532 /**
533 * gen_pool_best_fit - find the best fitting region of memory
534 * macthing the size requirement (no alignment constraint)
535 * @map: The address to base the search on
536 * @size: The bitmap size in bits
537 * @start: The bitnumber to start searching at
538 * @nr: The number of zeroed bits we're looking for
539 * @data: additional data - unused
540 *
541 * Iterate over the bitmap to find the smallest free region
542 * which we can allocate the memory.
543 */
gen_pool_best_fit(unsigned long * map,unsigned long size,unsigned long start,unsigned int nr,void * data)544 unsigned long gen_pool_best_fit(unsigned long *map, unsigned long size,
545 unsigned long start, unsigned int nr, void *data)
546 {
547 unsigned long start_bit = size;
548 unsigned long len = size + 1;
549 unsigned long index;
550
551 index = bitmap_find_next_zero_area(map, size, start, nr, 0);
552
553 while (index < size) {
554 int next_bit = find_next_bit(map, size, index + nr);
555 if ((next_bit - index) < len) {
556 len = next_bit - index;
557 start_bit = index;
558 if (len == nr)
559 return start_bit;
560 }
561 index = bitmap_find_next_zero_area(map, size,
562 next_bit + 1, nr, 0);
563 }
564
565 return start_bit;
566 }
567 EXPORT_SYMBOL(gen_pool_best_fit);
568
devm_gen_pool_release(struct device * dev,void * res)569 static void devm_gen_pool_release(struct device *dev, void *res)
570 {
571 gen_pool_destroy(*(struct gen_pool **)res);
572 }
573
574 /**
575 * devm_gen_pool_create - managed gen_pool_create
576 * @dev: device that provides the gen_pool
577 * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents
578 * @nid: node id of the node the pool structure should be allocated on, or -1
579 *
580 * Create a new special memory pool that can be used to manage special purpose
581 * memory not managed by the regular kmalloc/kfree interface. The pool will be
582 * automatically destroyed by the device management code.
583 */
devm_gen_pool_create(struct device * dev,int min_alloc_order,int nid)584 struct gen_pool *devm_gen_pool_create(struct device *dev, int min_alloc_order,
585 int nid)
586 {
587 struct gen_pool **ptr, *pool;
588
589 ptr = devres_alloc(devm_gen_pool_release, sizeof(*ptr), GFP_KERNEL);
590
591 pool = gen_pool_create(min_alloc_order, nid);
592 if (pool) {
593 *ptr = pool;
594 devres_add(dev, ptr);
595 } else {
596 devres_free(ptr);
597 }
598
599 return pool;
600 }
601 EXPORT_SYMBOL(devm_gen_pool_create);
602
603 /**
604 * dev_get_gen_pool - Obtain the gen_pool (if any) for a device
605 * @dev: device to retrieve the gen_pool from
606 *
607 * Returns the gen_pool for the device if one is present, or NULL.
608 */
dev_get_gen_pool(struct device * dev)609 struct gen_pool *dev_get_gen_pool(struct device *dev)
610 {
611 struct gen_pool **p = devres_find(dev, devm_gen_pool_release, NULL,
612 NULL);
613
614 if (!p)
615 return NULL;
616 return *p;
617 }
618 EXPORT_SYMBOL_GPL(dev_get_gen_pool);
619
620 #ifdef CONFIG_OF
621 /**
622 * of_get_named_gen_pool - find a pool by phandle property
623 * @np: device node
624 * @propname: property name containing phandle(s)
625 * @index: index into the phandle array
626 *
627 * Returns the pool that contains the chunk starting at the physical
628 * address of the device tree node pointed at by the phandle property,
629 * or NULL if not found.
630 */
of_get_named_gen_pool(struct device_node * np,const char * propname,int index)631 struct gen_pool *of_get_named_gen_pool(struct device_node *np,
632 const char *propname, int index)
633 {
634 struct platform_device *pdev;
635 struct device_node *np_pool;
636
637 np_pool = of_parse_phandle(np, propname, index);
638 if (!np_pool)
639 return NULL;
640 pdev = of_find_device_by_node(np_pool);
641 of_node_put(np_pool);
642 if (!pdev)
643 return NULL;
644 return dev_get_gen_pool(&pdev->dev);
645 }
646 EXPORT_SYMBOL_GPL(of_get_named_gen_pool);
647 #endif /* CONFIG_OF */
648