Lines Matching +full:page +full:- +full:size
12 * This allocator returns small blocks of a given size which are DMA-able by
13 * the given device. It uses the dma_alloc_coherent page allocator to get
14 * new pages, then splits them up into blocks of the required size.
18 * represented by the 'struct dma_pool' which keeps a doubly-linked list of
19 * allocated pages. Each page in the page_list is split into blocks of at
20 * least 'size' bytes. Free blocks are tracked in an unsorted singly-linked
21 * list of free blocks within the page. Used blocks aren't tracked, but we
22 * keep a count of how many are currently allocated from each page.
26 #include <linux/dma-mapping.h>
48 size_t size; member
71 unsigned size; in show_pools() local
73 struct dma_page *page; in show_pools() local
77 size = PAGE_SIZE; in show_pools()
79 temp = scnprintf(next, size, "poolinfo - 0.1\n"); in show_pools()
80 size -= temp; in show_pools()
84 list_for_each_entry(pool, &dev->dma_pools, pools) { in show_pools()
88 spin_lock_irq(&pool->lock); in show_pools()
89 list_for_each_entry(page, &pool->page_list, page_list) { in show_pools()
91 blocks += page->in_use; in show_pools()
93 spin_unlock_irq(&pool->lock); in show_pools()
95 /* per-pool info, no real statistics yet */ in show_pools()
96 temp = scnprintf(next, size, "%-16s %4u %4zu %4zu %2u\n", in show_pools()
97 pool->name, blocks, in show_pools()
98 pages * (pool->allocation / pool->size), in show_pools()
99 pool->size, pages); in show_pools()
100 size -= temp; in show_pools()
105 return PAGE_SIZE - size; in show_pools()
111 * dma_pool_create - Creates a pool of consistent memory blocks, for dma.
114 * @size: size of the blocks in this pool.
123 * cache flushing primitives. The actual size of blocks allocated may be
127 * cross that size boundary. This is useful for devices which have
132 size_t size, size_t align, size_t boundary) in dma_pool_create() argument
140 else if (align & (align - 1)) in dma_pool_create()
143 if (size == 0) in dma_pool_create()
145 else if (size < 4) in dma_pool_create()
146 size = 4; in dma_pool_create()
148 if ((size % align) != 0) in dma_pool_create()
149 size = ALIGN(size, align); in dma_pool_create()
151 allocation = max_t(size_t, size, PAGE_SIZE); in dma_pool_create()
155 else if ((boundary < size) || (boundary & (boundary - 1))) in dma_pool_create()
162 strlcpy(retval->name, name, sizeof(retval->name)); in dma_pool_create()
164 retval->dev = dev; in dma_pool_create()
166 INIT_LIST_HEAD(&retval->page_list); in dma_pool_create()
167 spin_lock_init(&retval->lock); in dma_pool_create()
168 retval->size = size; in dma_pool_create()
169 retval->boundary = boundary; in dma_pool_create()
170 retval->allocation = allocation; in dma_pool_create()
172 INIT_LIST_HEAD(&retval->pools); in dma_pool_create()
175 * pools_lock ensures that the ->dma_pools list does not get corrupted. in dma_pool_create()
184 if (list_empty(&dev->dma_pools)) in dma_pool_create()
186 list_add(&retval->pools, &dev->dma_pools); in dma_pool_create()
194 list_del(&retval->pools); in dma_pool_create()
206 static void pool_initialise_page(struct dma_pool *pool, struct dma_page *page) in pool_initialise_page() argument
209 unsigned int next_boundary = pool->boundary; in pool_initialise_page()
212 unsigned int next = offset + pool->size; in pool_initialise_page()
213 if (unlikely((next + pool->size) >= next_boundary)) { in pool_initialise_page()
215 next_boundary += pool->boundary; in pool_initialise_page()
217 *(int *)(page->vaddr + offset) = next; in pool_initialise_page()
219 } while (offset < pool->allocation); in pool_initialise_page()
224 struct dma_page *page; in pool_alloc_page() local
226 page = kmalloc(sizeof(*page), mem_flags); in pool_alloc_page()
227 if (!page) in pool_alloc_page()
229 page->vaddr = dma_alloc_coherent(pool->dev, pool->allocation, in pool_alloc_page()
230 &page->dma, mem_flags); in pool_alloc_page()
231 if (page->vaddr) { in pool_alloc_page()
233 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_alloc_page()
235 pool_initialise_page(pool, page); in pool_alloc_page()
236 page->in_use = 0; in pool_alloc_page()
237 page->offset = 0; in pool_alloc_page()
239 kfree(page); in pool_alloc_page()
240 page = NULL; in pool_alloc_page()
242 return page; in pool_alloc_page()
245 static inline bool is_page_busy(struct dma_page *page) in is_page_busy() argument
247 return page->in_use != 0; in is_page_busy()
250 static void pool_free_page(struct dma_pool *pool, struct dma_page *page) in pool_free_page() argument
252 dma_addr_t dma = page->dma; in pool_free_page()
255 memset(page->vaddr, POOL_POISON_FREED, pool->allocation); in pool_free_page()
257 dma_free_coherent(pool->dev, pool->allocation, page->vaddr, dma); in pool_free_page()
258 list_del(&page->page_list); in pool_free_page()
259 kfree(page); in pool_free_page()
263 * dma_pool_destroy - destroys a pool of dma memory blocks.
279 list_del(&pool->pools); in dma_pool_destroy()
280 if (pool->dev && list_empty(&pool->dev->dma_pools)) in dma_pool_destroy()
284 device_remove_file(pool->dev, &dev_attr_pools); in dma_pool_destroy()
287 while (!list_empty(&pool->page_list)) { in dma_pool_destroy()
288 struct dma_page *page; in dma_pool_destroy() local
289 page = list_entry(pool->page_list.next, in dma_pool_destroy()
291 if (is_page_busy(page)) { in dma_pool_destroy()
292 if (pool->dev) in dma_pool_destroy()
293 dev_err(pool->dev, in dma_pool_destroy()
295 pool->name, page->vaddr); in dma_pool_destroy()
298 pool->name, page->vaddr); in dma_pool_destroy()
299 /* leak the still-in-use consistent memory */ in dma_pool_destroy()
300 list_del(&page->page_list); in dma_pool_destroy()
301 kfree(page); in dma_pool_destroy()
303 pool_free_page(pool, page); in dma_pool_destroy()
311 * dma_pool_alloc - get a block of consistent memory
324 struct dma_page *page; in dma_pool_alloc() local
330 spin_lock_irqsave(&pool->lock, flags); in dma_pool_alloc()
331 list_for_each_entry(page, &pool->page_list, page_list) { in dma_pool_alloc()
332 if (page->offset < pool->allocation) in dma_pool_alloc()
336 /* pool_alloc_page() might sleep, so temporarily drop &pool->lock */ in dma_pool_alloc()
337 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_alloc()
339 page = pool_alloc_page(pool, mem_flags & (~__GFP_ZERO)); in dma_pool_alloc()
340 if (!page) in dma_pool_alloc()
343 spin_lock_irqsave(&pool->lock, flags); in dma_pool_alloc()
345 list_add(&page->page_list, &pool->page_list); in dma_pool_alloc()
347 page->in_use++; in dma_pool_alloc()
348 offset = page->offset; in dma_pool_alloc()
349 page->offset = *(int *)(page->vaddr + offset); in dma_pool_alloc()
350 retval = offset + page->vaddr; in dma_pool_alloc()
351 *handle = offset + page->dma; in dma_pool_alloc()
356 /* page->offset is stored in first 4 bytes */ in dma_pool_alloc()
357 for (i = sizeof(page->offset); i < pool->size; i++) { in dma_pool_alloc()
360 if (pool->dev) in dma_pool_alloc()
361 dev_err(pool->dev, in dma_pool_alloc()
363 pool->name, retval); in dma_pool_alloc()
366 pool->name, retval); in dma_pool_alloc()
373 data, pool->size, 1); in dma_pool_alloc()
378 memset(retval, POOL_POISON_ALLOCATED, pool->size); in dma_pool_alloc()
380 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_alloc()
383 memset(retval, 0, pool->size); in dma_pool_alloc()
391 struct dma_page *page; in pool_find_page() local
393 list_for_each_entry(page, &pool->page_list, page_list) { in pool_find_page()
394 if (dma < page->dma) in pool_find_page()
396 if ((dma - page->dma) < pool->allocation) in pool_find_page()
397 return page; in pool_find_page()
403 * dma_pool_free - put block back into dma pool
409 * unless it is first re-allocated.
413 struct dma_page *page; in dma_pool_free() local
417 spin_lock_irqsave(&pool->lock, flags); in dma_pool_free()
418 page = pool_find_page(pool, dma); in dma_pool_free()
419 if (!page) { in dma_pool_free()
420 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
421 if (pool->dev) in dma_pool_free()
422 dev_err(pool->dev, in dma_pool_free()
424 pool->name, vaddr, (unsigned long)dma); in dma_pool_free()
427 pool->name, vaddr, (unsigned long)dma); in dma_pool_free()
431 offset = vaddr - page->vaddr; in dma_pool_free()
433 if ((dma - page->dma) != offset) { in dma_pool_free()
434 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
435 if (pool->dev) in dma_pool_free()
436 dev_err(pool->dev, in dma_pool_free()
438 pool->name, vaddr, &dma); in dma_pool_free()
441 pool->name, vaddr, &dma); in dma_pool_free()
445 unsigned int chain = page->offset; in dma_pool_free()
446 while (chain < pool->allocation) { in dma_pool_free()
448 chain = *(int *)(page->vaddr + chain); in dma_pool_free()
451 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
452 if (pool->dev) in dma_pool_free()
453 dev_err(pool->dev, "dma_pool_free %s, dma %pad already free\n", in dma_pool_free()
454 pool->name, &dma); in dma_pool_free()
457 pool->name, &dma); in dma_pool_free()
461 memset(vaddr, POOL_POISON_FREED, pool->size); in dma_pool_free()
464 page->in_use--; in dma_pool_free()
465 *(int *)vaddr = page->offset; in dma_pool_free()
466 page->offset = offset; in dma_pool_free()
469 * if (!is_page_busy(page)) pool_free_page(pool, page); in dma_pool_free()
472 spin_unlock_irqrestore(&pool->lock, flags); in dma_pool_free()
492 * dmam_pool_create - Managed dma_pool_create()
495 * @size: size of the blocks in this pool.
503 size_t size, size_t align, size_t allocation) in dmam_pool_create() argument
511 pool = *ptr = dma_pool_create(name, dev, size, align, allocation); in dmam_pool_create()
522 * dmam_pool_destroy - Managed dma_pool_destroy()
529 struct device *dev = pool->dev; in dmam_pool_destroy()