• Home
  • Raw
  • Download

Lines Matching refs:block

95 	struct iio_dma_buffer_block *block = container_of(kref,  in iio_buffer_block_release()  local
98 WARN_ON(block->state != IIO_BLOCK_STATE_DEAD); in iio_buffer_block_release()
100 dma_free_coherent(block->queue->dev, PAGE_ALIGN(block->size), in iio_buffer_block_release()
101 block->vaddr, block->phys_addr); in iio_buffer_block_release()
103 iio_buffer_put(&block->queue->buffer); in iio_buffer_block_release()
104 kfree(block); in iio_buffer_block_release()
107 static void iio_buffer_block_get(struct iio_dma_buffer_block *block) in iio_buffer_block_get() argument
109 kref_get(&block->kref); in iio_buffer_block_get()
112 static void iio_buffer_block_put(struct iio_dma_buffer_block *block) in iio_buffer_block_put() argument
114 kref_put(&block->kref, iio_buffer_block_release); in iio_buffer_block_put()
126 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_cleanup_worker() local
133 list_for_each_entry_safe(block, _block, &block_list, head) in iio_dma_buffer_cleanup_worker()
134 iio_buffer_block_release(&block->kref); in iio_dma_buffer_cleanup_worker()
140 struct iio_dma_buffer_block *block; in iio_buffer_block_release_atomic() local
143 block = container_of(kref, struct iio_dma_buffer_block, kref); in iio_buffer_block_release_atomic()
146 list_add_tail(&block->head, &iio_dma_buffer_dead_blocks); in iio_buffer_block_release_atomic()
155 static void iio_buffer_block_put_atomic(struct iio_dma_buffer_block *block) in iio_buffer_block_put_atomic() argument
157 kref_put(&block->kref, iio_buffer_block_release_atomic); in iio_buffer_block_put_atomic()
168 struct iio_dma_buffer_block *block; in iio_dma_buffer_alloc_block() local
170 block = kzalloc(sizeof(*block), GFP_KERNEL); in iio_dma_buffer_alloc_block()
171 if (!block) in iio_dma_buffer_alloc_block()
174 block->vaddr = dma_alloc_coherent(queue->dev, PAGE_ALIGN(size), in iio_dma_buffer_alloc_block()
175 &block->phys_addr, GFP_KERNEL); in iio_dma_buffer_alloc_block()
176 if (!block->vaddr) { in iio_dma_buffer_alloc_block()
177 kfree(block); in iio_dma_buffer_alloc_block()
181 block->size = size; in iio_dma_buffer_alloc_block()
182 block->state = IIO_BLOCK_STATE_DEQUEUED; in iio_dma_buffer_alloc_block()
183 block->queue = queue; in iio_dma_buffer_alloc_block()
184 INIT_LIST_HEAD(&block->head); in iio_dma_buffer_alloc_block()
185 kref_init(&block->kref); in iio_dma_buffer_alloc_block()
189 return block; in iio_dma_buffer_alloc_block()
192 static void _iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) in _iio_dma_buffer_block_done() argument
194 struct iio_dma_buffer_queue *queue = block->queue; in _iio_dma_buffer_block_done()
200 if (block->state != IIO_BLOCK_STATE_DEAD) { in _iio_dma_buffer_block_done()
201 block->state = IIO_BLOCK_STATE_DONE; in _iio_dma_buffer_block_done()
202 list_add_tail(&block->head, &queue->outgoing); in _iio_dma_buffer_block_done()
213 void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block) in iio_dma_buffer_block_done() argument
215 struct iio_dma_buffer_queue *queue = block->queue; in iio_dma_buffer_block_done()
219 _iio_dma_buffer_block_done(block); in iio_dma_buffer_block_done()
222 iio_buffer_block_put_atomic(block); in iio_dma_buffer_block_done()
240 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_block_list_abort() local
244 list_for_each_entry_safe(block, _block, list, head) { in iio_dma_buffer_block_list_abort()
245 list_del(&block->head); in iio_dma_buffer_block_list_abort()
246 block->bytes_used = 0; in iio_dma_buffer_block_list_abort()
247 _iio_dma_buffer_block_done(block); in iio_dma_buffer_block_list_abort()
248 iio_buffer_block_put_atomic(block); in iio_dma_buffer_block_list_abort()
256 static bool iio_dma_block_reusable(struct iio_dma_buffer_block *block) in iio_dma_block_reusable() argument
263 switch (block->state) { in iio_dma_block_reusable()
283 struct iio_dma_buffer_block *block; in iio_dma_buffer_request_update() local
308 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
311 if (block && (!iio_dma_block_reusable(block) || !try_reuse)) in iio_dma_buffer_request_update()
312 block->state = IIO_BLOCK_STATE_DEAD; in iio_dma_buffer_request_update()
327 block = queue->fileio.blocks[i]; in iio_dma_buffer_request_update()
328 if (block->state == IIO_BLOCK_STATE_DEAD) { in iio_dma_buffer_request_update()
330 iio_buffer_block_put(block); in iio_dma_buffer_request_update()
331 block = NULL; in iio_dma_buffer_request_update()
333 block->size = size; in iio_dma_buffer_request_update()
336 block = NULL; in iio_dma_buffer_request_update()
339 if (!block) { in iio_dma_buffer_request_update()
340 block = iio_dma_buffer_alloc_block(queue, size); in iio_dma_buffer_request_update()
341 if (!block) { in iio_dma_buffer_request_update()
345 queue->fileio.blocks[i] = block; in iio_dma_buffer_request_update()
348 block->state = IIO_BLOCK_STATE_QUEUED; in iio_dma_buffer_request_update()
349 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_request_update()
360 struct iio_dma_buffer_block *block) in iio_dma_buffer_submit_block() argument
372 block->state = IIO_BLOCK_STATE_ACTIVE; in iio_dma_buffer_submit_block()
373 iio_buffer_block_get(block); in iio_dma_buffer_submit_block()
374 ret = queue->ops->submit(queue, block); in iio_dma_buffer_submit_block()
386 iio_buffer_block_put(block); in iio_dma_buffer_submit_block()
404 struct iio_dma_buffer_block *block, *_block; in iio_dma_buffer_enable() local
408 list_for_each_entry_safe(block, _block, &queue->incoming, head) { in iio_dma_buffer_enable()
409 list_del(&block->head); in iio_dma_buffer_enable()
410 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enable()
443 struct iio_dma_buffer_block *block) in iio_dma_buffer_enqueue() argument
445 if (block->state == IIO_BLOCK_STATE_DEAD) { in iio_dma_buffer_enqueue()
446 iio_buffer_block_put(block); in iio_dma_buffer_enqueue()
448 iio_dma_buffer_submit_block(queue, block); in iio_dma_buffer_enqueue()
450 block->state = IIO_BLOCK_STATE_QUEUED; in iio_dma_buffer_enqueue()
451 list_add_tail(&block->head, &queue->incoming); in iio_dma_buffer_enqueue()
458 struct iio_dma_buffer_block *block; in iio_dma_buffer_dequeue() local
461 block = list_first_entry_or_null(&queue->outgoing, struct in iio_dma_buffer_dequeue()
463 if (block != NULL) { in iio_dma_buffer_dequeue()
464 list_del(&block->head); in iio_dma_buffer_dequeue()
465 block->state = IIO_BLOCK_STATE_DEQUEUED; in iio_dma_buffer_dequeue()
469 return block; in iio_dma_buffer_dequeue()
485 struct iio_dma_buffer_block *block; in iio_dma_buffer_read() local
494 block = iio_dma_buffer_dequeue(queue); in iio_dma_buffer_read()
495 if (block == NULL) { in iio_dma_buffer_read()
500 queue->fileio.active_block = block; in iio_dma_buffer_read()
502 block = queue->fileio.active_block; in iio_dma_buffer_read()
506 if (n > block->bytes_used - queue->fileio.pos) in iio_dma_buffer_read()
507 n = block->bytes_used - queue->fileio.pos; in iio_dma_buffer_read()
509 if (copy_to_user(user_buffer, block->vaddr + queue->fileio.pos, n)) { in iio_dma_buffer_read()
516 if (queue->fileio.pos == block->bytes_used) { in iio_dma_buffer_read()
518 iio_dma_buffer_enqueue(queue, block); in iio_dma_buffer_read()
540 struct iio_dma_buffer_block *block; in iio_dma_buffer_data_available() local
555 list_for_each_entry(block, &queue->outgoing, head) in iio_dma_buffer_data_available()
556 data_available += block->size; in iio_dma_buffer_data_available()