Lines Matching +full:dma +full:- +full:pool
1 // SPDX-License-Identifier: GPL-2.0-only
3 * DMA Pool allocator
9 * This allocator returns small blocks of a given size which are DMA-able by
14 * The current design of this allocator is fairly simple. The pool is
15 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
17 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
23 #include <linux/dma-mapping.h>
45 dma_addr_t dma; member
48 struct dma_pool { /* the pool */
66 dma_addr_t dma; member
74 struct dma_pool *pool; in pools_show() local
77 size = sysfs_emit(buf, "poolinfo - 0.1\n"); in pools_show()
80 list_for_each_entry(pool, &dev->dma_pools, pools) { in pools_show()
81 /* per-pool info, no real statistics yet */ in pools_show()
82 size += sysfs_emit_at(buf, size, "%-16s %4zu %4zu %4u %2zu\n", in pools_show()
83 pool->name, pool->nr_active, in pools_show()
84 pool->nr_blocks, pool->size, in pools_show()
85 pool->nr_pages); in pools_show()
95 static void pool_check_block(struct dma_pool *pool, struct dma_block *block, in pool_check_block() argument
101 for (i = sizeof(struct dma_block); i < pool->size; i++) { in pool_check_block()
104 dev_err(pool->dev, "%s %s, %p (corrupted)\n", __func__, in pool_check_block()
105 pool->name, block); in pool_check_block()
112 data, pool->size, 1); in pool_check_block()
117 memset(block, POOL_POISON_ALLOCATED, pool->size); in pool_check_block()
120 static struct dma_page *pool_find_page(struct dma_pool *pool, dma_addr_t dma) in pool_find_page() argument
124 list_for_each_entry(page, &pool->page_list, page_list) { in pool_find_page()
125 if (dma < page->dma) in pool_find_page()
127 if ((dma - page->dma) < pool->allocation) in pool_find_page()
133 static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma) in pool_block_err() argument
135 struct dma_block *block = pool->next_block; in pool_block_err()
138 page = pool_find_page(pool, dma); in pool_block_err()
140 dev_err(pool->dev, "%s %s, %p/%pad (bad dma)\n", in pool_block_err()
141 __func__, pool->name, vaddr, &dma); in pool_block_err()
147 block = block->next_block; in pool_block_err()
150 dev_err(pool->dev, "%s %s, dma %pad already free\n", in pool_block_err()
151 __func__, pool->name, &dma); in pool_block_err()
155 memset(vaddr, POOL_POISON_FREED, pool->size); in pool_block_err()
159 static void pool_init_page(struct dma_pool *pool, struct dma_page *page) in pool_init_page() argument
161 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_init_page()
164 static void pool_check_block(struct dma_pool *pool, struct dma_block *block, in pool_check_block() argument
169 static bool pool_block_err(struct dma_pool *pool, void *vaddr, dma_addr_t dma) in pool_block_err() argument
172 memset(vaddr, 0, pool->size); in pool_block_err()
176 static void pool_init_page(struct dma_pool *pool, struct dma_page *page) in pool_init_page() argument
181 static struct dma_block *pool_block_pop(struct dma_pool *pool) in pool_block_pop() argument
183 struct dma_block *block = pool->next_block; in pool_block_pop()
186 pool->next_block = block->next_block; in pool_block_pop()
187 pool->nr_active++; in pool_block_pop()
192 static void pool_block_push(struct dma_pool *pool, struct dma_block *block, in pool_block_push() argument
193 dma_addr_t dma) in pool_block_push() argument
195 block->dma = dma; in pool_block_push()
196 block->next_block = pool->next_block; in pool_block_push()
197 pool->next_block = block; in pool_block_push()
202 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
203 * @name: name of pool, for diagnostics
204 * @dev: device that will be doing the DMA
205 * @size: size of the blocks in this pool.
212 * DMA mappings, accessible by the device and its driver without using
218 * addressing restrictions on individual DMA transfers, such as not crossing
221 * Return: a dma allocation pool with the requested characteristics, or
236 else if (align & (align - 1)) in dma_pool_create()
249 else if ((boundary < size) || (boundary & (boundary - 1))) in dma_pool_create()
258 strscpy(retval->name, name, sizeof(retval->name)); in dma_pool_create()
260 retval->dev = dev; in dma_pool_create()
262 INIT_LIST_HEAD(&retval->page_list); in dma_pool_create()
263 spin_lock_init(&retval->lock); in dma_pool_create()
264 retval->size = size; in dma_pool_create()
265 retval->boundary = boundary; in dma_pool_create()
266 retval->allocation = allocation; in dma_pool_create()
267 INIT_LIST_HEAD(&retval->pools); in dma_pool_create()
270 * pools_lock ensures that the ->dma_pools list does not get corrupted. in dma_pool_create()
279 empty = list_empty(&dev->dma_pools); in dma_pool_create()
280 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create()
288 list_del(&retval->pools); in dma_pool_create()
300 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument
302 unsigned int next_boundary = pool->boundary, offset = 0; in pool_initialise_page()
305 pool_init_page(pool, page); in pool_initialise_page()
306 while (offset + pool->size <= pool->allocation) { in pool_initialise_page()
307 if (offset + pool->size > next_boundary) { in pool_initialise_page()
309 next_boundary += pool->boundary; in pool_initialise_page()
313 block = page->vaddr + offset; in pool_initialise_page()
314 block->dma = page->dma + offset; in pool_initialise_page()
315 block->next_block = NULL; in pool_initialise_page()
318 last->next_block = block; in pool_initialise_page()
323 offset += pool->size; in pool_initialise_page()
324 pool->nr_blocks++; in pool_initialise_page()
327 last->next_block = pool->next_block; in pool_initialise_page()
328 pool->next_block = first; in pool_initialise_page()
330 list_add(&page->page_list, &pool->page_list); in pool_initialise_page()
331 pool->nr_pages++; in pool_initialise_page()
334 static struct dma_page *pool_alloc_page(struct dma_pool *pool, gfp_t mem_flags) in pool_alloc_page() argument
342 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
343 &page->dma, mem_flags); in pool_alloc_page()
344 if (!page->vaddr) { in pool_alloc_page()
353 * dma_pool_destroy - destroys a pool of dma memory blocks.
354 * @pool: dma pool that will be destroyed
357 * Caller guarantees that no more memory from the pool is in use,
358 * and that nothing will try to use the pool after this call.
360 void dma_pool_destroy(struct dma_pool *pool) in dma_pool_destroy() argument
365 if (unlikely(!pool)) in dma_pool_destroy()
370 list_del(&pool->pools); in dma_pool_destroy()
371 empty = list_empty(&pool->dev->dma_pools); in dma_pool_destroy()
374 device_remove_file(pool->dev, &dev_attr_pools); in dma_pool_destroy()
377 if (pool->nr_active) { in dma_pool_destroy()
378 dev_err(pool->dev, "%s %s busy\n", __func__, pool->name); in dma_pool_destroy()
382 list_for_each_entry_safe(page, tmp, &pool->page_list, page_list) { in dma_pool_destroy()
384 dma_free_coherent(pool->dev, pool->allocation, in dma_pool_destroy()
385 page->vaddr, page->dma); in dma_pool_destroy()
386 list_del(&page->page_list); in dma_pool_destroy()
390 kfree(pool); in dma_pool_destroy()
395 * dma_pool_alloc - get a block of consistent memory
396 * @pool: dma pool that will produce the block
398 * @handle: pointer to dma address of block
401 * and reports its dma address through the handle.
404 void *dma_pool_alloc(struct dma_pool *pool, gfp_t mem_flags, in dma_pool_alloc() argument
413 spin_lock_irqsave(&pool->lock, flags); in dma_pool_alloc()
414 block = pool_block_pop(pool); in dma_pool_alloc()
418 * &pool->lock in dma_pool_alloc()
420 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_alloc()
422 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); in dma_pool_alloc()
426 spin_lock_irqsave(&pool->lock, flags); in dma_pool_alloc()
427 pool_initialise_page(pool, page); in dma_pool_alloc()
428 block = pool_block_pop(pool); in dma_pool_alloc()
430 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_alloc()
432 *handle = block->dma; in dma_pool_alloc()
433 pool_check_block(pool, block, mem_flags); in dma_pool_alloc()
435 memset(block, 0, pool->size); in dma_pool_alloc()
442 * dma_pool_free - put block back into dma pool
443 * @pool: the dma pool holding the block
445 * @dma: dma address of block
448 * unless it is first re-allocated.
450 void dma_pool_free(struct dma_pool *pool, void *vaddr, dma_addr_t dma) in dma_pool_free() argument
455 spin_lock_irqsave(&pool->lock, flags); in dma_pool_free()
456 if (!pool_block_err(pool, vaddr, dma)) { in dma_pool_free()
457 pool_block_push(pool, block, dma); in dma_pool_free()
458 pool->nr_active--; in dma_pool_free()
460 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
465 * Managed DMA pool
469 struct dma_pool *pool = *(struct dma_pool **)res; in dmam_pool_release() local
471 dma_pool_destroy(pool); in dmam_pool_release()
480 * dmam_pool_create - Managed dma_pool_create()
481 * @name: name of pool, for diagnostics
482 * @dev: device that will be doing the DMA
483 * @size: size of the blocks in this pool.
487 * Managed dma_pool_create(). DMA pool created with this function is
490 * Return: a managed dma allocation pool with the requested
496 struct dma_pool **ptr, *pool; in dmam_pool_create() local
502 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); in dmam_pool_create()
503 if (pool) in dmam_pool_create()
508 return pool; in dmam_pool_create()
513 * dmam_pool_destroy - Managed dma_pool_destroy()
514 * @pool: dma pool that will be destroyed
518 void dmam_pool_destroy(struct dma_pool *pool) in dmam_pool_destroy() argument
520 struct device *dev = pool->dev; in dmam_pool_destroy()
522 WARN_ON(devres_release(dev, dmam_pool_release, dmam_pool_match, pool)); in dmam_pool_destroy()