• Home
  • Raw
  • Download

Lines Matching full:queue

35  * means of two queues. The incoming queue and the outgoing queue. Blocks on the
36 * incoming queue are waiting for the DMA controller to pick them up and fill
37 * them with data. Block on the outgoing queue have been filled with data and
53 * incoming or outgoing queue the block will be freed.
102 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
105 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release()
168 struct iio_dma_buffer_queue *queue, size_t size) in iio_dma_buffer_alloc_block() argument
176 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
185 block->queue = queue; in iio_dma_buffer_alloc_block()
189 iio_buffer_get(&queue->buffer); in iio_dma_buffer_alloc_block()
196 struct iio_dma_buffer_queue *queue = block->queue; in _iio_dma_buffer_block_done() local
204 list_add_tail(&block->head, &queue->outgoing); in _iio_dma_buffer_block_done()
213 * pass back ownership of the block to the queue.
217 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done() local
220 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_done()
222 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_done()
225 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); in iio_dma_buffer_block_done()
232 * @queue: Queue for which to complete blocks.
233 * @list: List of aborted blocks. All blocks in this list must be from @queue.
237 * hand the blocks back to the queue.
239 void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_block_list_abort() argument
245 spin_lock_irqsave(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
252 spin_unlock_irqrestore(&queue->list_lock, flags); in iio_dma_buffer_block_list_abort()
254 wake_up_interruptible_poll(&queue->buffer.pollq, EPOLLIN | EPOLLRDNORM); in iio_dma_buffer_block_list_abort()
284 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_request_update() local
296 size = DIV_ROUND_UP(queue->buffer.bytes_per_datum * in iio_dma_buffer_request_update()
297 queue->buffer.length, 2); in iio_dma_buffer_request_update()
299 mutex_lock(&queue->lock); in iio_dma_buffer_request_update()
302 if (PAGE_ALIGN(queue->fileio.block_size) == PAGE_ALIGN(size)) in iio_dma_buffer_request_update()
305 queue->fileio.block_size = size; in iio_dma_buffer_request_update()
306 queue->fileio.active_block = NULL; in iio_dma_buffer_request_update()
308 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
309 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
310 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
322 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_request_update()
323 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_request_update()
325 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_request_update()
327 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_request_update()
328 if (queue->fileio.blocks[i]) { in iio_dma_buffer_request_update()
329 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
342 block = iio_dma_buffer_alloc_block(queue, size); in iio_dma_buffer_request_update()
347 queue->fileio.blocks[i] = block; in iio_dma_buffer_request_update()
351 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update()
355 mutex_unlock(&queue->lock); in iio_dma_buffer_request_update()
361 static void iio_dma_buffer_submit_block(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_submit_block() argument
371 if (!queue->ops) in iio_dma_buffer_submit_block()
376 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block()
405 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_enable() local
408 mutex_lock(&queue->lock); in iio_dma_buffer_enable()
409 queue->active = true; in iio_dma_buffer_enable()
410 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable()
412 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enable()
414 mutex_unlock(&queue->lock); in iio_dma_buffer_enable()
431 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_disable() local
433 mutex_lock(&queue->lock); in iio_dma_buffer_disable()
434 queue->active = false; in iio_dma_buffer_disable()
436 if (queue->ops && queue->ops->abort) in iio_dma_buffer_disable()
437 queue->ops->abort(queue); in iio_dma_buffer_disable()
438 mutex_unlock(&queue->lock); in iio_dma_buffer_disable()
444 static void iio_dma_buffer_enqueue(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_enqueue() argument
449 } else if (queue->active) { in iio_dma_buffer_enqueue()
450 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enqueue()
453 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue()
458 struct iio_dma_buffer_queue *queue) in iio_dma_buffer_dequeue() argument
462 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
463 block = list_first_entry_or_null(&queue->outgoing, struct in iio_dma_buffer_dequeue()
469 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_dequeue()
486 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buffer); in iio_dma_buffer_read() local
493 mutex_lock(&queue->lock); in iio_dma_buffer_read()
495 if (!queue->fileio.active_block) { in iio_dma_buffer_read()
496 block = iio_dma_buffer_dequeue(queue); in iio_dma_buffer_read()
501 queue->fileio.pos = 0; in iio_dma_buffer_read()
502 queue->fileio.active_block = block; in iio_dma_buffer_read()
504 block = queue->fileio.active_block; in iio_dma_buffer_read()
508 if (n > block->bytes_used - queue->fileio.pos) in iio_dma_buffer_read()
509 n = block->bytes_used - queue->fileio.pos; in iio_dma_buffer_read()
511 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { in iio_dma_buffer_read()
516 queue->fileio.pos += n; in iio_dma_buffer_read()
518 if (queue->fileio.pos == block->bytes_used) { in iio_dma_buffer_read()
519 queue->fileio.active_block = NULL; in iio_dma_buffer_read()
520 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_read()
526 mutex_unlock(&queue->lock); in iio_dma_buffer_read()
541 struct iio_dma_buffer_queue *queue = iio_buffer_to_queue(buf); in iio_dma_buffer_data_available() local
552 mutex_lock(&queue->lock); in iio_dma_buffer_data_available()
553 if (queue->fileio.active_block) in iio_dma_buffer_data_available()
554 data_available += queue->fileio.active_block->size; in iio_dma_buffer_data_available()
556 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_data_available()
557 list_for_each_entry(block, &queue->outgoing, head) in iio_dma_buffer_data_available()
559 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_data_available()
560 mutex_unlock(&queue->lock); in iio_dma_buffer_data_available()
603 * iio_dma_buffer_init() - Initialize DMA buffer queue
604 * @queue: Buffer to initialize
606 * @ops: DMA buffer queue callback operations
608 * The DMA device will be used by the queue to do DMA memory allocations. So it
612 int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue, in iio_dma_buffer_init() argument
615 iio_buffer_init(&queue->buffer); in iio_dma_buffer_init()
616 queue->buffer.length = PAGE_SIZE; in iio_dma_buffer_init()
617 queue->buffer.watermark = queue->buffer.length / 2; in iio_dma_buffer_init()
618 queue->dev = dev; in iio_dma_buffer_init()
619 queue->ops = ops; in iio_dma_buffer_init()
621 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_init()
622 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_init()
624 mutex_init(&queue->lock); in iio_dma_buffer_init()
625 spin_lock_init(&queue->list_lock); in iio_dma_buffer_init()
632 * iio_dma_buffer_exit() - Cleanup DMA buffer queue
633 * @queue: Buffer to cleanup
638 void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_exit() argument
642 mutex_lock(&queue->lock); in iio_dma_buffer_exit()
644 spin_lock_irq(&queue->list_lock); in iio_dma_buffer_exit()
645 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_exit()
646 if (!queue->fileio.blocks[i]) in iio_dma_buffer_exit()
648 queue->fileio.blocks[i]->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_exit()
650 INIT_LIST_HEAD(&queue->outgoing); in iio_dma_buffer_exit()
651 spin_unlock_irq(&queue->list_lock); in iio_dma_buffer_exit()
653 INIT_LIST_HEAD(&queue->incoming); in iio_dma_buffer_exit()
655 for (i = 0; i < ARRAY_SIZE(queue->fileio.blocks); i++) { in iio_dma_buffer_exit()
656 if (!queue->fileio.blocks[i]) in iio_dma_buffer_exit()
658 iio_buffer_block_put(queue->fileio.blocks[i]); in iio_dma_buffer_exit()
659 queue->fileio.blocks[i] = NULL; in iio_dma_buffer_exit()
661 queue->fileio.active_block = NULL; in iio_dma_buffer_exit()
662 queue->ops = NULL; in iio_dma_buffer_exit()
664 mutex_unlock(&queue->lock); in iio_dma_buffer_exit()
670 * @queue: Buffer to release
676 void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue) in iio_dma_buffer_release() argument
678 mutex_destroy(&queue->lock); in iio_dma_buffer_release()