• Home
  • Raw
  • Download

Lines Matching full:block

23  * For DMA buffers the storage is sub-divided into so called blocks. Each block
24 * has its own memory buffer. The size of the block is the granularity at which
26 * basic unit of data exchange from one sample to one block decreases the
29 * sample the overhead will be x for each sample. Whereas when using a block
37 * them with data. Block on the outgoing queue have been filled with data and
40 * A block can be in one of the following states:
42 * the block.
45 * * Owned by the DMA controller: The DMA controller is processing the block
50 * * Dead: A block that is dead has been marked as to be freed. It might still
53 * incoming or outgoing queue the block will be freed.
56 * with both the block structure as well as the storage memory for the block
57 * will be freed when the last reference to the block is dropped. This means a
58 * block must not be accessed without holding a reference.
66 * converter to the memory region of the block. Once the DMA transfer has been
68 * block.
70 * Prior to this it must set the bytes_used field of the block contains
72 * size of the block, but if the DMA hardware has certain alignment requirements
75 * datum, i.e. the block must not contain partial samples.
77 * The driver must call iio_dma_buffer_block_done() for each block it has
79 * perform a DMA transfer for the block, e.g. because the buffer was disabled
80 * before the block transfer was started. In this case it should set bytes_used
97 struct iio_dma_buffer_block *block = container_of(kref, in iio_buffer_block_release() local
100 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD); in iio_buffer_block_release()
102 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
103 block->vaddr, block->phys_addr); in iio_buffer_block_release()
105 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release()
106 kfree(block); in iio_buffer_block_release()
109 static void iio_buffer_block_get(struct iio_dma_buffer_block *block) in iio_buffer_block_get() argument
111 kref_get(&block->kref); in iio_buffer_block_get()
114 static void iio_buffer_block_put(struct iio_dma_buffer_block *block) in iio_buffer_block_put() argument
116 kref_put(&block->kref, iio_buffer_block_release); in iio_buffer_block_put()
128 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_cleanup_worker() local
135 list_for_each_entry_safe(block, _block, &block_list, head) in iio_dma_buffer_cleanup_worker()
136 iio_buffer_block_release(&block->kref); in iio_dma_buffer_cleanup_worker()
142 struct iio_dma_buffer_block *block; in iio_buffer_block_release_atomic() local
145 block = container_of(kref, struct iio_dma_buffer_block, kref); in iio_buffer_block_release_atomic()
148 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks); in iio_buffer_block_release_atomic()
157 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block) in iio_buffer_block_put_atomic() argument
159 kref_put(&block->kref, iio_buffer_block_release_atomic); in iio_buffer_block_put_atomic()
170 struct iio_dma_buffer_block *block; in iio_dma_buffer_alloc_block() local
172 block = kzalloc(sizeof(*block), GFP_KERNEL); in iio_dma_buffer_alloc_block()
173 if (!block) in iio_dma_buffer_alloc_block()
176 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
177 &block->phys_addr, GFP_KERNEL); in iio_dma_buffer_alloc_block()
178 if (!block->vaddr) { in iio_dma_buffer_alloc_block()
179 kfree(block); in iio_dma_buffer_alloc_block()
183 block->size = size; in iio_dma_buffer_alloc_block()
184 block->state = IIO_BLOCK_STATE_DEQUEUED; in iio_dma_buffer_alloc_block()
185 block->queue = queue; in iio_dma_buffer_alloc_block()
186 INIT_LIST_HEAD(&block->head); in iio_dma_buffer_alloc_block()
187 kref_init(&block->kref); in iio_dma_buffer_alloc_block()
191 return block; in iio_dma_buffer_alloc_block()
194 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) in _iio_dma_buffer_block_done() argument
196 struct iio_dma_buffer_queue *queue = block->queue; in _iio_dma_buffer_block_done()
202 if (block->state != IIO_BLOCK_STATE_DEAD) { in _iio_dma_buffer_block_done()
203 block->state = IIO_BLOCK_STATE_DONE; in _iio_dma_buffer_block_done()
204 list_add_tail(&block->head, &queue->outgoing); in _iio_dma_buffer_block_done()
209 * iio_dma_buffer_block_done() - Indicate that a block has been completed
210 * @block: The completed block
212 * Should be called when the DMA controller has finished handling the block to
213 * pass back ownership of the block to the queue.
215 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) in iio_dma_buffer_block_done() argument
217 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done()
221 _iio_dma_buffer_block_done(block); in iio_dma_buffer_block_done()
224 iio_buffer_block_put_atomic(block); in iio_dma_buffer_block_done()
230 * iio_dma_buffer_block_list_abort() - Indicate that a list block has been
236 * stopped. This will set bytes_used to 0 for each block in the list and then
242 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_block_list_abort() local
246 list_for_each_entry_safe(block, _block, list, head) { in iio_dma_buffer_block_list_abort()
247 list_del(&block->head); in iio_dma_buffer_block_list_abort()
248 block->bytes_used = 0; in iio_dma_buffer_block_list_abort()
249 _iio_dma_buffer_block_done(block); in iio_dma_buffer_block_list_abort()
250 iio_buffer_block_put_atomic(block); in iio_dma_buffer_block_list_abort()
258 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) in iio_dma_block_reusable() argument
261 * If the core owns the block it can be re-used. This should be the in iio_dma_block_reusable()
263 * not support abort and has not given back the block yet. in iio_dma_block_reusable()
265 switch (block->state) { in iio_dma_block_reusable()
285 struct iio_dma_buffer_block *block; in iio_dma_buffer_request_update() local
293 * buffering scheme with usually one block at a time being used by the in iio_dma_buffer_request_update()
310 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
313 if (block && (!iio_dma_block_reusable(block) || !try_reuse)) in iio_dma_buffer_request_update()
314 block->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_request_update()
329 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
330 if (block->state == IIO_BLOCK_STATE_DEAD) { in iio_dma_buffer_request_update()
332 iio_buffer_block_put(block); in iio_dma_buffer_request_update()
333 block = NULL; in iio_dma_buffer_request_update()
335 block->size = size; in iio_dma_buffer_request_update()
338 block = NULL; in iio_dma_buffer_request_update()
341 if (!block) { in iio_dma_buffer_request_update()
342 block = iio_dma_buffer_alloc_block(queue, size); in iio_dma_buffer_request_update()
343 if (!block) { in iio_dma_buffer_request_update()
347 queue->fileio.blocks[i] = block; in iio_dma_buffer_request_update()
350 block->state = IIO_BLOCK_STATE_QUEUED; in iio_dma_buffer_request_update()
351 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update()
362 struct iio_dma_buffer_block *block) in iio_dma_buffer_submit_block() argument
367 * If the hardware has already been removed we put the block into in iio_dma_buffer_submit_block()
374 block->state = IIO_BLOCK_STATE_ACTIVE; in iio_dma_buffer_submit_block()
375 iio_buffer_block_get(block); in iio_dma_buffer_submit_block()
376 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block()
388 iio_buffer_block_put(block); in iio_dma_buffer_submit_block()
406 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_enable() local
410 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable()
411 list_del(&block->head); in iio_dma_buffer_enable()
412 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enable()
445 struct iio_dma_buffer_block *block) in iio_dma_buffer_enqueue() argument
447 if (block->state == IIO_BLOCK_STATE_DEAD) { in iio_dma_buffer_enqueue()
448 iio_buffer_block_put(block); in iio_dma_buffer_enqueue()
450 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enqueue()
452 block->state = IIO_BLOCK_STATE_QUEUED; in iio_dma_buffer_enqueue()
453 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue()
460 struct iio_dma_buffer_block *block; in iio_dma_buffer_dequeue() local
463 block = list_first_entry_or_null(&queue->outgoing, struct in iio_dma_buffer_dequeue()
465 if (block != NULL) { in iio_dma_buffer_dequeue()
466 list_del(&block->head); in iio_dma_buffer_dequeue()
467 block->state = IIO_BLOCK_STATE_DEQUEUED; in iio_dma_buffer_dequeue()
471 return block; in iio_dma_buffer_dequeue()
487 struct iio_dma_buffer_block *block; in iio_dma_buffer_read() local
496 block = iio_dma_buffer_dequeue(queue); in iio_dma_buffer_read()
497 if (block == NULL) { in iio_dma_buffer_read()
502 queue->fileio.active_block = block; in iio_dma_buffer_read()
504 block = queue->fileio.active_block; in iio_dma_buffer_read()
508 if (n > block->bytes_used - queue->fileio.pos) in iio_dma_buffer_read()
509 n = block->bytes_used - queue->fileio.pos; in iio_dma_buffer_read()
511 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { in iio_dma_buffer_read()
518 if (queue->fileio.pos == block->bytes_used) { in iio_dma_buffer_read()
520 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_read()
542 struct iio_dma_buffer_block *block; in iio_dma_buffer_data_available() local
546 * For counting the available bytes we'll use the size of the block not in iio_dma_buffer_data_available()
547 * the number of actual bytes available in the block. Otherwise it is in iio_dma_buffer_data_available()
557 list_for_each_entry(block, &queue->outgoing, head) in iio_dma_buffer_data_available()
558 data_available += block->size; in iio_dma_buffer_data_available()