• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * videobuf2-core.c - V4L2 driver helper framework
3  *
4  * Copyright (C) 2010 Samsung Electronics
5  *
6  * Author: Pawel Osciak <pawel@osciak.com>
7  *	   Marek Szyprowski <m.szyprowski@samsung.com>
8  *
9  * The vb2_thread implementation was based on code from videobuf-dvb.c:
10  *	(c) 2004 Gerd Knorr <kraxel@bytesex.org> [SUSE Labs]
11  *
12  * This program is free software; you can redistribute it and/or modify
13  * it under the terms of the GNU General Public License as published by
14  * the Free Software Foundation.
15  */
16 
17 #include <linux/err.h>
18 #include <linux/kernel.h>
19 #include <linux/module.h>
20 #include <linux/mm.h>
21 #include <linux/poll.h>
22 #include <linux/slab.h>
23 #include <linux/sched.h>
24 #include <linux/freezer.h>
25 #include <linux/kthread.h>
26 
27 #include <media/v4l2-dev.h>
28 #include <media/v4l2-fh.h>
29 #include <media/v4l2-event.h>
30 #include <media/v4l2-common.h>
31 #include <media/videobuf2-core.h>
32 
33 static int debug;
34 module_param(debug, int, 0644);
35 
36 #define dprintk(level, fmt, arg...)					      \
37 	do {								      \
38 		if (debug >= level)					      \
39 			pr_info("vb2: %s: " fmt, __func__, ## arg); \
40 	} while (0)
41 
42 #ifdef CONFIG_VIDEO_ADV_DEBUG
43 
44 /*
45  * If advanced debugging is on, then count how often each op is called
46  * successfully, which can either be per-buffer or per-queue.
47  *
48  * This makes it easy to check that the 'init' and 'cleanup'
49  * (and variations thereof) stay balanced.
50  */
51 
52 #define log_memop(vb, op)						\
53 	dprintk(2, "call_memop(%p, %d, %s)%s\n",			\
54 		(vb)->vb2_queue, (vb)->v4l2_buf.index, #op,		\
55 		(vb)->vb2_queue->mem_ops->op ? "" : " (nop)")
56 
57 #define call_memop(vb, op, args...)					\
58 ({									\
59 	struct vb2_queue *_q = (vb)->vb2_queue;				\
60 	int err;							\
61 									\
62 	log_memop(vb, op);						\
63 	err = _q->mem_ops->op ? _q->mem_ops->op(args) : 0;		\
64 	if (!err)							\
65 		(vb)->cnt_mem_ ## op++;					\
66 	err;								\
67 })
68 
69 #define call_ptr_memop(vb, op, args...)					\
70 ({									\
71 	struct vb2_queue *_q = (vb)->vb2_queue;				\
72 	void *ptr;							\
73 									\
74 	log_memop(vb, op);						\
75 	ptr = _q->mem_ops->op ? _q->mem_ops->op(args) : NULL;		\
76 	if (!IS_ERR_OR_NULL(ptr))					\
77 		(vb)->cnt_mem_ ## op++;					\
78 	ptr;								\
79 })
80 
81 #define call_void_memop(vb, op, args...)				\
82 ({									\
83 	struct vb2_queue *_q = (vb)->vb2_queue;				\
84 									\
85 	log_memop(vb, op);						\
86 	if (_q->mem_ops->op)						\
87 		_q->mem_ops->op(args);					\
88 	(vb)->cnt_mem_ ## op++;						\
89 })
90 
91 #define log_qop(q, op)							\
92 	dprintk(2, "call_qop(%p, %s)%s\n", q, #op,			\
93 		(q)->ops->op ? "" : " (nop)")
94 
95 #define call_qop(q, op, args...)					\
96 ({									\
97 	int err;							\
98 									\
99 	log_qop(q, op);							\
100 	err = (q)->ops->op ? (q)->ops->op(args) : 0;			\
101 	if (!err)							\
102 		(q)->cnt_ ## op++;					\
103 	err;								\
104 })
105 
106 #define call_void_qop(q, op, args...)					\
107 ({									\
108 	log_qop(q, op);							\
109 	if ((q)->ops->op)						\
110 		(q)->ops->op(args);					\
111 	(q)->cnt_ ## op++;						\
112 })
113 
114 #define log_vb_qop(vb, op, args...)					\
115 	dprintk(2, "call_vb_qop(%p, %d, %s)%s\n",			\
116 		(vb)->vb2_queue, (vb)->v4l2_buf.index, #op,		\
117 		(vb)->vb2_queue->ops->op ? "" : " (nop)")
118 
119 #define call_vb_qop(vb, op, args...)					\
120 ({									\
121 	int err;							\
122 									\
123 	log_vb_qop(vb, op);						\
124 	err = (vb)->vb2_queue->ops->op ?				\
125 		(vb)->vb2_queue->ops->op(args) : 0;			\
126 	if (!err)							\
127 		(vb)->cnt_ ## op++;					\
128 	err;								\
129 })
130 
131 #define call_void_vb_qop(vb, op, args...)				\
132 ({									\
133 	log_vb_qop(vb, op);						\
134 	if ((vb)->vb2_queue->ops->op)					\
135 		(vb)->vb2_queue->ops->op(args);				\
136 	(vb)->cnt_ ## op++;						\
137 })
138 
139 #else
140 
141 #define call_memop(vb, op, args...)					\
142 	((vb)->vb2_queue->mem_ops->op ?					\
143 		(vb)->vb2_queue->mem_ops->op(args) : 0)
144 
145 #define call_ptr_memop(vb, op, args...)					\
146 	((vb)->vb2_queue->mem_ops->op ?					\
147 		(vb)->vb2_queue->mem_ops->op(args) : NULL)
148 
149 #define call_void_memop(vb, op, args...)				\
150 	do {								\
151 		if ((vb)->vb2_queue->mem_ops->op)			\
152 			(vb)->vb2_queue->mem_ops->op(args);		\
153 	} while (0)
154 
155 #define call_qop(q, op, args...)					\
156 	((q)->ops->op ? (q)->ops->op(args) : 0)
157 
158 #define call_void_qop(q, op, args...)					\
159 	do {								\
160 		if ((q)->ops->op)					\
161 			(q)->ops->op(args);				\
162 	} while (0)
163 
164 #define call_vb_qop(vb, op, args...)					\
165 	((vb)->vb2_queue->ops->op ? (vb)->vb2_queue->ops->op(args) : 0)
166 
167 #define call_void_vb_qop(vb, op, args...)				\
168 	do {								\
169 		if ((vb)->vb2_queue->ops->op)				\
170 			(vb)->vb2_queue->ops->op(args);			\
171 	} while (0)
172 
173 #endif
174 
175 /* Flags that are set by the vb2 core */
176 #define V4L2_BUFFER_MASK_FLAGS	(V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | \
177 				 V4L2_BUF_FLAG_DONE | V4L2_BUF_FLAG_ERROR | \
178 				 V4L2_BUF_FLAG_PREPARED | \
179 				 V4L2_BUF_FLAG_TIMESTAMP_MASK)
180 /* Output buffer flags that should be passed on to the driver */
181 #define V4L2_BUFFER_OUT_FLAGS	(V4L2_BUF_FLAG_PFRAME | V4L2_BUF_FLAG_BFRAME | \
182 				 V4L2_BUF_FLAG_KEYFRAME | V4L2_BUF_FLAG_TIMECODE)
183 
184 static void __vb2_queue_cancel(struct vb2_queue *q);
185 
186 /**
187  * __vb2_buf_mem_alloc() - allocate video memory for the given buffer
188  */
__vb2_buf_mem_alloc(struct vb2_buffer * vb)189 static int __vb2_buf_mem_alloc(struct vb2_buffer *vb)
190 {
191 	struct vb2_queue *q = vb->vb2_queue;
192 	void *mem_priv;
193 	int plane;
194 
195 	/*
196 	 * Allocate memory for all planes in this buffer
197 	 * NOTE: mmapped areas should be page aligned
198 	 */
199 	for (plane = 0; plane < vb->num_planes; ++plane) {
200 		unsigned long size = PAGE_ALIGN(q->plane_sizes[plane]);
201 
202 		mem_priv = call_ptr_memop(vb, alloc, q->alloc_ctx[plane],
203 				      size, q->gfp_flags);
204 		if (IS_ERR_OR_NULL(mem_priv))
205 			goto free;
206 
207 		/* Associate allocator private data with this plane */
208 		vb->planes[plane].mem_priv = mem_priv;
209 		vb->v4l2_planes[plane].length = q->plane_sizes[plane];
210 	}
211 
212 	return 0;
213 free:
214 	/* Free already allocated memory if one of the allocations failed */
215 	for (; plane > 0; --plane) {
216 		call_void_memop(vb, put, vb->planes[plane - 1].mem_priv);
217 		vb->planes[plane - 1].mem_priv = NULL;
218 	}
219 
220 	return -ENOMEM;
221 }
222 
223 /**
224  * __vb2_buf_mem_free() - free memory of the given buffer
225  */
__vb2_buf_mem_free(struct vb2_buffer * vb)226 static void __vb2_buf_mem_free(struct vb2_buffer *vb)
227 {
228 	unsigned int plane;
229 
230 	for (plane = 0; plane < vb->num_planes; ++plane) {
231 		call_void_memop(vb, put, vb->planes[plane].mem_priv);
232 		vb->planes[plane].mem_priv = NULL;
233 		dprintk(3, "freed plane %d of buffer %d\n", plane,
234 			vb->v4l2_buf.index);
235 	}
236 }
237 
238 /**
239  * __vb2_buf_userptr_put() - release userspace memory associated with
240  * a USERPTR buffer
241  */
__vb2_buf_userptr_put(struct vb2_buffer * vb)242 static void __vb2_buf_userptr_put(struct vb2_buffer *vb)
243 {
244 	unsigned int plane;
245 
246 	for (plane = 0; plane < vb->num_planes; ++plane) {
247 		if (vb->planes[plane].mem_priv)
248 			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
249 		vb->planes[plane].mem_priv = NULL;
250 	}
251 }
252 
253 /**
254  * __vb2_plane_dmabuf_put() - release memory associated with
255  * a DMABUF shared plane
256  */
__vb2_plane_dmabuf_put(struct vb2_buffer * vb,struct vb2_plane * p)257 static void __vb2_plane_dmabuf_put(struct vb2_buffer *vb, struct vb2_plane *p)
258 {
259 	if (!p->mem_priv)
260 		return;
261 
262 	if (p->dbuf_mapped)
263 		call_void_memop(vb, unmap_dmabuf, p->mem_priv);
264 
265 	call_void_memop(vb, detach_dmabuf, p->mem_priv);
266 	dma_buf_put(p->dbuf);
267 	memset(p, 0, sizeof(*p));
268 }
269 
270 /**
271  * __vb2_buf_dmabuf_put() - release memory associated with
272  * a DMABUF shared buffer
273  */
__vb2_buf_dmabuf_put(struct vb2_buffer * vb)274 static void __vb2_buf_dmabuf_put(struct vb2_buffer *vb)
275 {
276 	unsigned int plane;
277 
278 	for (plane = 0; plane < vb->num_planes; ++plane)
279 		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
280 }
281 
282 /**
283  * __setup_lengths() - setup initial lengths for every plane in
284  * every buffer on the queue
285  */
__setup_lengths(struct vb2_queue * q,unsigned int n)286 static void __setup_lengths(struct vb2_queue *q, unsigned int n)
287 {
288 	unsigned int buffer, plane;
289 	struct vb2_buffer *vb;
290 
291 	for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
292 		vb = q->bufs[buffer];
293 		if (!vb)
294 			continue;
295 
296 		for (plane = 0; plane < vb->num_planes; ++plane)
297 			vb->v4l2_planes[plane].length = q->plane_sizes[plane];
298 	}
299 }
300 
301 /**
302  * __setup_offsets() - setup unique offsets ("cookies") for every plane in
303  * every buffer on the queue
304  */
__setup_offsets(struct vb2_queue * q,unsigned int n)305 static void __setup_offsets(struct vb2_queue *q, unsigned int n)
306 {
307 	unsigned int buffer, plane;
308 	struct vb2_buffer *vb;
309 	unsigned long off;
310 
311 	if (q->num_buffers) {
312 		struct v4l2_plane *p;
313 		vb = q->bufs[q->num_buffers - 1];
314 		p = &vb->v4l2_planes[vb->num_planes - 1];
315 		off = PAGE_ALIGN(p->m.mem_offset + p->length);
316 	} else {
317 		off = 0;
318 	}
319 
320 	for (buffer = q->num_buffers; buffer < q->num_buffers + n; ++buffer) {
321 		vb = q->bufs[buffer];
322 		if (!vb)
323 			continue;
324 
325 		for (plane = 0; plane < vb->num_planes; ++plane) {
326 			vb->v4l2_planes[plane].m.mem_offset = off;
327 
328 			dprintk(3, "buffer %d, plane %d offset 0x%08lx\n",
329 					buffer, plane, off);
330 
331 			off += vb->v4l2_planes[plane].length;
332 			off = PAGE_ALIGN(off);
333 		}
334 	}
335 }
336 
337 /**
338  * __vb2_queue_alloc() - allocate videobuf buffer structures and (for MMAP type)
339  * video buffer memory for all buffers/planes on the queue and initializes the
340  * queue
341  *
342  * Returns the number of buffers successfully allocated.
343  */
__vb2_queue_alloc(struct vb2_queue * q,enum v4l2_memory memory,unsigned int num_buffers,unsigned int num_planes)344 static int __vb2_queue_alloc(struct vb2_queue *q, enum v4l2_memory memory,
345 			     unsigned int num_buffers, unsigned int num_planes)
346 {
347 	unsigned int buffer;
348 	struct vb2_buffer *vb;
349 	int ret;
350 
351 	for (buffer = 0; buffer < num_buffers; ++buffer) {
352 		/* Allocate videobuf buffer structures */
353 		vb = kzalloc(q->buf_struct_size, GFP_KERNEL);
354 		if (!vb) {
355 			dprintk(1, "memory alloc for buffer struct failed\n");
356 			break;
357 		}
358 
359 		/* Length stores number of planes for multiplanar buffers */
360 		if (V4L2_TYPE_IS_MULTIPLANAR(q->type))
361 			vb->v4l2_buf.length = num_planes;
362 
363 		vb->state = VB2_BUF_STATE_DEQUEUED;
364 		vb->vb2_queue = q;
365 		vb->num_planes = num_planes;
366 		vb->v4l2_buf.index = q->num_buffers + buffer;
367 		vb->v4l2_buf.type = q->type;
368 		vb->v4l2_buf.memory = memory;
369 
370 		/* Allocate video buffer memory for the MMAP type */
371 		if (memory == V4L2_MEMORY_MMAP) {
372 			ret = __vb2_buf_mem_alloc(vb);
373 			if (ret) {
374 				dprintk(1, "failed allocating memory for "
375 						"buffer %d\n", buffer);
376 				kfree(vb);
377 				break;
378 			}
379 			/*
380 			 * Call the driver-provided buffer initialization
381 			 * callback, if given. An error in initialization
382 			 * results in queue setup failure.
383 			 */
384 			ret = call_vb_qop(vb, buf_init, vb);
385 			if (ret) {
386 				dprintk(1, "buffer %d %p initialization"
387 					" failed\n", buffer, vb);
388 				__vb2_buf_mem_free(vb);
389 				kfree(vb);
390 				break;
391 			}
392 		}
393 
394 		q->bufs[q->num_buffers + buffer] = vb;
395 	}
396 
397 	__setup_lengths(q, buffer);
398 	if (memory == V4L2_MEMORY_MMAP)
399 		__setup_offsets(q, buffer);
400 
401 	dprintk(1, "allocated %d buffers, %d plane(s) each\n",
402 			buffer, num_planes);
403 
404 	return buffer;
405 }
406 
407 /**
408  * __vb2_free_mem() - release all video buffer memory for a given queue
409  */
__vb2_free_mem(struct vb2_queue * q,unsigned int buffers)410 static void __vb2_free_mem(struct vb2_queue *q, unsigned int buffers)
411 {
412 	unsigned int buffer;
413 	struct vb2_buffer *vb;
414 
415 	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
416 	     ++buffer) {
417 		vb = q->bufs[buffer];
418 		if (!vb)
419 			continue;
420 
421 		/* Free MMAP buffers or release USERPTR buffers */
422 		if (q->memory == V4L2_MEMORY_MMAP)
423 			__vb2_buf_mem_free(vb);
424 		else if (q->memory == V4L2_MEMORY_DMABUF)
425 			__vb2_buf_dmabuf_put(vb);
426 		else
427 			__vb2_buf_userptr_put(vb);
428 	}
429 }
430 
431 /**
432  * __vb2_queue_free() - free buffers at the end of the queue - video memory and
433  * related information, if no buffers are left return the queue to an
434  * uninitialized state. Might be called even if the queue has already been freed.
435  */
__vb2_queue_free(struct vb2_queue * q,unsigned int buffers)436 static int __vb2_queue_free(struct vb2_queue *q, unsigned int buffers)
437 {
438 	unsigned int buffer;
439 
440 	/*
441 	 * Sanity check: when preparing a buffer the queue lock is released for
442 	 * a short while (see __buf_prepare for the details), which would allow
443 	 * a race with a reqbufs which can call this function. Removing the
444 	 * buffers from underneath __buf_prepare is obviously a bad idea, so we
445 	 * check if any of the buffers is in the state PREPARING, and if so we
446 	 * just return -EAGAIN.
447 	 */
448 	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
449 	     ++buffer) {
450 		if (q->bufs[buffer] == NULL)
451 			continue;
452 		if (q->bufs[buffer]->state == VB2_BUF_STATE_PREPARING) {
453 			dprintk(1, "preparing buffers, cannot free\n");
454 			return -EAGAIN;
455 		}
456 	}
457 
458 	/* Call driver-provided cleanup function for each buffer, if provided */
459 	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
460 	     ++buffer) {
461 		struct vb2_buffer *vb = q->bufs[buffer];
462 
463 		if (vb && vb->planes[0].mem_priv)
464 			call_void_vb_qop(vb, buf_cleanup, vb);
465 	}
466 
467 	/* Release video buffer memory */
468 	__vb2_free_mem(q, buffers);
469 
470 #ifdef CONFIG_VIDEO_ADV_DEBUG
471 	/*
472 	 * Check that all the calls were balances during the life-time of this
473 	 * queue. If not (or if the debug level is 1 or up), then dump the
474 	 * counters to the kernel log.
475 	 */
476 	if (q->num_buffers) {
477 		bool unbalanced = q->cnt_start_streaming != q->cnt_stop_streaming ||
478 				  q->cnt_wait_prepare != q->cnt_wait_finish;
479 
480 		if (unbalanced || debug) {
481 			pr_info("vb2: counters for queue %p:%s\n", q,
482 				unbalanced ? " UNBALANCED!" : "");
483 			pr_info("vb2:     setup: %u start_streaming: %u stop_streaming: %u\n",
484 				q->cnt_queue_setup, q->cnt_start_streaming,
485 				q->cnt_stop_streaming);
486 			pr_info("vb2:     wait_prepare: %u wait_finish: %u\n",
487 				q->cnt_wait_prepare, q->cnt_wait_finish);
488 		}
489 		q->cnt_queue_setup = 0;
490 		q->cnt_wait_prepare = 0;
491 		q->cnt_wait_finish = 0;
492 		q->cnt_start_streaming = 0;
493 		q->cnt_stop_streaming = 0;
494 	}
495 	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
496 		struct vb2_buffer *vb = q->bufs[buffer];
497 		bool unbalanced = vb->cnt_mem_alloc != vb->cnt_mem_put ||
498 				  vb->cnt_mem_prepare != vb->cnt_mem_finish ||
499 				  vb->cnt_mem_get_userptr != vb->cnt_mem_put_userptr ||
500 				  vb->cnt_mem_attach_dmabuf != vb->cnt_mem_detach_dmabuf ||
501 				  vb->cnt_mem_map_dmabuf != vb->cnt_mem_unmap_dmabuf ||
502 				  vb->cnt_buf_queue != vb->cnt_buf_done ||
503 				  vb->cnt_buf_prepare != vb->cnt_buf_finish ||
504 				  vb->cnt_buf_init != vb->cnt_buf_cleanup;
505 
506 		if (unbalanced || debug) {
507 			pr_info("vb2:   counters for queue %p, buffer %d:%s\n",
508 				q, buffer, unbalanced ? " UNBALANCED!" : "");
509 			pr_info("vb2:     buf_init: %u buf_cleanup: %u buf_prepare: %u buf_finish: %u\n",
510 				vb->cnt_buf_init, vb->cnt_buf_cleanup,
511 				vb->cnt_buf_prepare, vb->cnt_buf_finish);
512 			pr_info("vb2:     buf_queue: %u buf_done: %u\n",
513 				vb->cnt_buf_queue, vb->cnt_buf_done);
514 			pr_info("vb2:     alloc: %u put: %u prepare: %u finish: %u mmap: %u\n",
515 				vb->cnt_mem_alloc, vb->cnt_mem_put,
516 				vb->cnt_mem_prepare, vb->cnt_mem_finish,
517 				vb->cnt_mem_mmap);
518 			pr_info("vb2:     get_userptr: %u put_userptr: %u\n",
519 				vb->cnt_mem_get_userptr, vb->cnt_mem_put_userptr);
520 			pr_info("vb2:     attach_dmabuf: %u detach_dmabuf: %u map_dmabuf: %u unmap_dmabuf: %u\n",
521 				vb->cnt_mem_attach_dmabuf, vb->cnt_mem_detach_dmabuf,
522 				vb->cnt_mem_map_dmabuf, vb->cnt_mem_unmap_dmabuf);
523 			pr_info("vb2:     get_dmabuf: %u num_users: %u vaddr: %u cookie: %u\n",
524 				vb->cnt_mem_get_dmabuf,
525 				vb->cnt_mem_num_users,
526 				vb->cnt_mem_vaddr,
527 				vb->cnt_mem_cookie);
528 		}
529 	}
530 #endif
531 
532 	/* Free videobuf buffers */
533 	for (buffer = q->num_buffers - buffers; buffer < q->num_buffers;
534 	     ++buffer) {
535 		kfree(q->bufs[buffer]);
536 		q->bufs[buffer] = NULL;
537 	}
538 
539 	q->num_buffers -= buffers;
540 	if (!q->num_buffers) {
541 		q->memory = 0;
542 		INIT_LIST_HEAD(&q->queued_list);
543 	}
544 	return 0;
545 }
546 
547 /**
548  * __verify_planes_array() - verify that the planes array passed in struct
549  * v4l2_buffer from userspace can be safely used
550  */
__verify_planes_array(struct vb2_buffer * vb,const struct v4l2_buffer * b)551 static int __verify_planes_array(struct vb2_buffer *vb, const struct v4l2_buffer *b)
552 {
553 	if (!V4L2_TYPE_IS_MULTIPLANAR(b->type))
554 		return 0;
555 
556 	/* Is memory for copying plane information present? */
557 	if (NULL == b->m.planes) {
558 		dprintk(1, "multi-planar buffer passed but "
559 			   "planes array not provided\n");
560 		return -EINVAL;
561 	}
562 
563 	if (b->length < vb->num_planes || b->length > VIDEO_MAX_PLANES) {
564 		dprintk(1, "incorrect planes array length, "
565 			   "expected %d, got %d\n", vb->num_planes, b->length);
566 		return -EINVAL;
567 	}
568 
569 	return 0;
570 }
571 
572 /**
573  * __verify_length() - Verify that the bytesused value for each plane fits in
574  * the plane length and that the data offset doesn't exceed the bytesused value.
575  */
__verify_length(struct vb2_buffer * vb,const struct v4l2_buffer * b)576 static int __verify_length(struct vb2_buffer *vb, const struct v4l2_buffer *b)
577 {
578 	unsigned int length;
579 	unsigned int bytesused;
580 	unsigned int plane;
581 
582 	if (!V4L2_TYPE_IS_OUTPUT(b->type))
583 		return 0;
584 
585 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
586 		for (plane = 0; plane < vb->num_planes; ++plane) {
587 			length = (b->memory == V4L2_MEMORY_USERPTR ||
588 				  b->memory == V4L2_MEMORY_DMABUF)
589 			       ? b->m.planes[plane].length
590 			       : vb->v4l2_planes[plane].length;
591 			bytesused = b->m.planes[plane].bytesused
592 				  ? b->m.planes[plane].bytesused : length;
593 
594 			if (b->m.planes[plane].bytesused > length)
595 				return -EINVAL;
596 
597 			if (b->m.planes[plane].data_offset > 0 &&
598 			    b->m.planes[plane].data_offset >= bytesused)
599 				return -EINVAL;
600 		}
601 	} else {
602 		length = (b->memory == V4L2_MEMORY_USERPTR)
603 		       ? b->length : vb->v4l2_planes[0].length;
604 		bytesused = b->bytesused ? b->bytesused : length;
605 
606 		if (b->bytesused > length)
607 			return -EINVAL;
608 	}
609 
610 	return 0;
611 }
612 
613 /**
614  * __buffer_in_use() - return true if the buffer is in use and
615  * the queue cannot be freed (by the means of REQBUFS(0)) call
616  */
__buffer_in_use(struct vb2_queue * q,struct vb2_buffer * vb)617 static bool __buffer_in_use(struct vb2_queue *q, struct vb2_buffer *vb)
618 {
619 	unsigned int plane;
620 	for (plane = 0; plane < vb->num_planes; ++plane) {
621 		void *mem_priv = vb->planes[plane].mem_priv;
622 		/*
623 		 * If num_users() has not been provided, call_memop
624 		 * will return 0, apparently nobody cares about this
625 		 * case anyway. If num_users() returns more than 1,
626 		 * we are not the only user of the plane's memory.
627 		 */
628 		if (mem_priv && call_memop(vb, num_users, mem_priv) > 1)
629 			return true;
630 	}
631 	return false;
632 }
633 
634 /**
635  * __buffers_in_use() - return true if any buffers on the queue are in use and
636  * the queue cannot be freed (by the means of REQBUFS(0)) call
637  */
__buffers_in_use(struct vb2_queue * q)638 static bool __buffers_in_use(struct vb2_queue *q)
639 {
640 	unsigned int buffer;
641 	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
642 		if (__buffer_in_use(q, q->bufs[buffer]))
643 			return true;
644 	}
645 	return false;
646 }
647 
648 /**
649  * __fill_v4l2_buffer() - fill in a struct v4l2_buffer with information to be
650  * returned to userspace
651  */
__fill_v4l2_buffer(struct vb2_buffer * vb,struct v4l2_buffer * b)652 static void __fill_v4l2_buffer(struct vb2_buffer *vb, struct v4l2_buffer *b)
653 {
654 	struct vb2_queue *q = vb->vb2_queue;
655 
656 	/* Copy back data such as timestamp, flags, etc. */
657 	memcpy(b, &vb->v4l2_buf, offsetof(struct v4l2_buffer, m));
658 	b->reserved2 = vb->v4l2_buf.reserved2;
659 	b->reserved = vb->v4l2_buf.reserved;
660 
661 	if (V4L2_TYPE_IS_MULTIPLANAR(q->type)) {
662 		/*
663 		 * Fill in plane-related data if userspace provided an array
664 		 * for it. The caller has already verified memory and size.
665 		 */
666 		b->length = vb->num_planes;
667 		memcpy(b->m.planes, vb->v4l2_planes,
668 			b->length * sizeof(struct v4l2_plane));
669 	} else {
670 		/*
671 		 * We use length and offset in v4l2_planes array even for
672 		 * single-planar buffers, but userspace does not.
673 		 */
674 		b->length = vb->v4l2_planes[0].length;
675 		b->bytesused = vb->v4l2_planes[0].bytesused;
676 		if (q->memory == V4L2_MEMORY_MMAP)
677 			b->m.offset = vb->v4l2_planes[0].m.mem_offset;
678 		else if (q->memory == V4L2_MEMORY_USERPTR)
679 			b->m.userptr = vb->v4l2_planes[0].m.userptr;
680 		else if (q->memory == V4L2_MEMORY_DMABUF)
681 			b->m.fd = vb->v4l2_planes[0].m.fd;
682 	}
683 
684 	/*
685 	 * Clear any buffer state related flags.
686 	 */
687 	b->flags &= ~V4L2_BUFFER_MASK_FLAGS;
688 	b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK;
689 	if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
690 	    V4L2_BUF_FLAG_TIMESTAMP_COPY) {
691 		/*
692 		 * For non-COPY timestamps, drop timestamp source bits
693 		 * and obtain the timestamp source from the queue.
694 		 */
695 		b->flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
696 		b->flags |= q->timestamp_flags & V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
697 	}
698 
699 	switch (vb->state) {
700 	case VB2_BUF_STATE_QUEUED:
701 	case VB2_BUF_STATE_ACTIVE:
702 		b->flags |= V4L2_BUF_FLAG_QUEUED;
703 		break;
704 	case VB2_BUF_STATE_ERROR:
705 		b->flags |= V4L2_BUF_FLAG_ERROR;
706 		/* fall through */
707 	case VB2_BUF_STATE_DONE:
708 		b->flags |= V4L2_BUF_FLAG_DONE;
709 		break;
710 	case VB2_BUF_STATE_PREPARED:
711 		b->flags |= V4L2_BUF_FLAG_PREPARED;
712 		break;
713 	case VB2_BUF_STATE_PREPARING:
714 	case VB2_BUF_STATE_DEQUEUED:
715 		/* nothing */
716 		break;
717 	}
718 
719 	if (__buffer_in_use(q, vb))
720 		b->flags |= V4L2_BUF_FLAG_MAPPED;
721 }
722 
723 /**
724  * vb2_querybuf() - query video buffer information
725  * @q:		videobuf queue
726  * @b:		buffer struct passed from userspace to vidioc_querybuf handler
727  *		in driver
728  *
729  * Should be called from vidioc_querybuf ioctl handler in driver.
730  * This function will verify the passed v4l2_buffer structure and fill the
731  * relevant information for the userspace.
732  *
733  * The return values from this function are intended to be directly returned
734  * from vidioc_querybuf handler in driver.
735  */
vb2_querybuf(struct vb2_queue * q,struct v4l2_buffer * b)736 int vb2_querybuf(struct vb2_queue *q, struct v4l2_buffer *b)
737 {
738 	struct vb2_buffer *vb;
739 	int ret;
740 
741 	if (b->type != q->type) {
742 		dprintk(1, "wrong buffer type\n");
743 		return -EINVAL;
744 	}
745 
746 	if (b->index >= q->num_buffers) {
747 		dprintk(1, "buffer index out of range\n");
748 		return -EINVAL;
749 	}
750 	vb = q->bufs[b->index];
751 	ret = __verify_planes_array(vb, b);
752 	if (!ret)
753 		__fill_v4l2_buffer(vb, b);
754 	return ret;
755 }
756 EXPORT_SYMBOL(vb2_querybuf);
757 
758 /**
759  * __verify_userptr_ops() - verify that all memory operations required for
760  * USERPTR queue type have been provided
761  */
__verify_userptr_ops(struct vb2_queue * q)762 static int __verify_userptr_ops(struct vb2_queue *q)
763 {
764 	if (!(q->io_modes & VB2_USERPTR) || !q->mem_ops->get_userptr ||
765 	    !q->mem_ops->put_userptr)
766 		return -EINVAL;
767 
768 	return 0;
769 }
770 
771 /**
772  * __verify_mmap_ops() - verify that all memory operations required for
773  * MMAP queue type have been provided
774  */
__verify_mmap_ops(struct vb2_queue * q)775 static int __verify_mmap_ops(struct vb2_queue *q)
776 {
777 	if (!(q->io_modes & VB2_MMAP) || !q->mem_ops->alloc ||
778 	    !q->mem_ops->put || !q->mem_ops->mmap)
779 		return -EINVAL;
780 
781 	return 0;
782 }
783 
784 /**
785  * __verify_dmabuf_ops() - verify that all memory operations required for
786  * DMABUF queue type have been provided
787  */
__verify_dmabuf_ops(struct vb2_queue * q)788 static int __verify_dmabuf_ops(struct vb2_queue *q)
789 {
790 	if (!(q->io_modes & VB2_DMABUF) || !q->mem_ops->attach_dmabuf ||
791 	    !q->mem_ops->detach_dmabuf  || !q->mem_ops->map_dmabuf ||
792 	    !q->mem_ops->unmap_dmabuf)
793 		return -EINVAL;
794 
795 	return 0;
796 }
797 
798 /**
799  * __verify_memory_type() - Check whether the memory type and buffer type
800  * passed to a buffer operation are compatible with the queue.
801  */
__verify_memory_type(struct vb2_queue * q,enum v4l2_memory memory,enum v4l2_buf_type type)802 static int __verify_memory_type(struct vb2_queue *q,
803 		enum v4l2_memory memory, enum v4l2_buf_type type)
804 {
805 	if (memory != V4L2_MEMORY_MMAP && memory != V4L2_MEMORY_USERPTR &&
806 	    memory != V4L2_MEMORY_DMABUF) {
807 		dprintk(1, "unsupported memory type\n");
808 		return -EINVAL;
809 	}
810 
811 	if (type != q->type) {
812 		dprintk(1, "requested type is incorrect\n");
813 		return -EINVAL;
814 	}
815 
816 	/*
817 	 * Make sure all the required memory ops for given memory type
818 	 * are available.
819 	 */
820 	if (memory == V4L2_MEMORY_MMAP && __verify_mmap_ops(q)) {
821 		dprintk(1, "MMAP for current setup unsupported\n");
822 		return -EINVAL;
823 	}
824 
825 	if (memory == V4L2_MEMORY_USERPTR && __verify_userptr_ops(q)) {
826 		dprintk(1, "USERPTR for current setup unsupported\n");
827 		return -EINVAL;
828 	}
829 
830 	if (memory == V4L2_MEMORY_DMABUF && __verify_dmabuf_ops(q)) {
831 		dprintk(1, "DMABUF for current setup unsupported\n");
832 		return -EINVAL;
833 	}
834 
835 	/*
836 	 * Place the busy tests at the end: -EBUSY can be ignored when
837 	 * create_bufs is called with count == 0, but count == 0 should still
838 	 * do the memory and type validation.
839 	 */
840 	if (vb2_fileio_is_active(q)) {
841 		dprintk(1, "file io in progress\n");
842 		return -EBUSY;
843 	}
844 	return 0;
845 }
846 
847 /**
848  * __reqbufs() - Initiate streaming
849  * @q:		videobuf2 queue
850  * @req:	struct passed from userspace to vidioc_reqbufs handler in driver
851  *
852  * Should be called from vidioc_reqbufs ioctl handler of a driver.
853  * This function:
854  * 1) verifies streaming parameters passed from the userspace,
855  * 2) sets up the queue,
856  * 3) negotiates number of buffers and planes per buffer with the driver
857  *    to be used during streaming,
858  * 4) allocates internal buffer structures (struct vb2_buffer), according to
859  *    the agreed parameters,
860  * 5) for MMAP memory type, allocates actual video memory, using the
861  *    memory handling/allocation routines provided during queue initialization
862  *
863  * If req->count is 0, all the memory will be freed instead.
864  * If the queue has been allocated previously (by a previous vb2_reqbufs) call
865  * and the queue is not busy, memory will be reallocated.
866  *
867  * The return values from this function are intended to be directly returned
868  * from vidioc_reqbufs handler in driver.
869  */
__reqbufs(struct vb2_queue * q,struct v4l2_requestbuffers * req)870 static int __reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
871 {
872 	unsigned int num_buffers, allocated_buffers, num_planes = 0;
873 	int ret;
874 
875 	if (q->streaming) {
876 		dprintk(1, "streaming active\n");
877 		return -EBUSY;
878 	}
879 
880 	if (req->count == 0 || q->num_buffers != 0 || q->memory != req->memory) {
881 		/*
882 		 * We already have buffers allocated, so first check if they
883 		 * are not in use and can be freed.
884 		 */
885 		mutex_lock(&q->mmap_lock);
886 		if (q->memory == V4L2_MEMORY_MMAP && __buffers_in_use(q)) {
887 			mutex_unlock(&q->mmap_lock);
888 			dprintk(1, "memory in use, cannot free\n");
889 			return -EBUSY;
890 		}
891 
892 		/*
893 		 * Call queue_cancel to clean up any buffers in the PREPARED or
894 		 * QUEUED state which is possible if buffers were prepared or
895 		 * queued without ever calling STREAMON.
896 		 */
897 		__vb2_queue_cancel(q);
898 		ret = __vb2_queue_free(q, q->num_buffers);
899 		mutex_unlock(&q->mmap_lock);
900 		if (ret)
901 			return ret;
902 
903 		/*
904 		 * In case of REQBUFS(0) return immediately without calling
905 		 * driver's queue_setup() callback and allocating resources.
906 		 */
907 		if (req->count == 0)
908 			return 0;
909 	}
910 
911 	/*
912 	 * Make sure the requested values and current defaults are sane.
913 	 */
914 	num_buffers = min_t(unsigned int, req->count, VIDEO_MAX_FRAME);
915 	num_buffers = max_t(unsigned int, num_buffers, q->min_buffers_needed);
916 	memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
917 	memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
918 	q->memory = req->memory;
919 
920 	/*
921 	 * Ask the driver how many buffers and planes per buffer it requires.
922 	 * Driver also sets the size and allocator context for each plane.
923 	 */
924 	ret = call_qop(q, queue_setup, q, NULL, &num_buffers, &num_planes,
925 		       q->plane_sizes, q->alloc_ctx);
926 	if (ret)
927 		return ret;
928 
929 	/* Finally, allocate buffers and video memory */
930 	allocated_buffers = __vb2_queue_alloc(q, req->memory, num_buffers, num_planes);
931 	if (allocated_buffers == 0) {
932 		dprintk(1, "memory allocation failed\n");
933 		return -ENOMEM;
934 	}
935 
936 	/*
937 	 * There is no point in continuing if we can't allocate the minimum
938 	 * number of buffers needed by this vb2_queue.
939 	 */
940 	if (allocated_buffers < q->min_buffers_needed)
941 		ret = -ENOMEM;
942 
943 	/*
944 	 * Check if driver can handle the allocated number of buffers.
945 	 */
946 	if (!ret && allocated_buffers < num_buffers) {
947 		num_buffers = allocated_buffers;
948 
949 		ret = call_qop(q, queue_setup, q, NULL, &num_buffers,
950 			       &num_planes, q->plane_sizes, q->alloc_ctx);
951 
952 		if (!ret && allocated_buffers < num_buffers)
953 			ret = -ENOMEM;
954 
955 		/*
956 		 * Either the driver has accepted a smaller number of buffers,
957 		 * or .queue_setup() returned an error
958 		 */
959 	}
960 
961 	mutex_lock(&q->mmap_lock);
962 	q->num_buffers = allocated_buffers;
963 
964 	if (ret < 0) {
965 		/*
966 		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
967 		 * from q->num_buffers.
968 		 */
969 		__vb2_queue_free(q, allocated_buffers);
970 		mutex_unlock(&q->mmap_lock);
971 		return ret;
972 	}
973 	mutex_unlock(&q->mmap_lock);
974 
975 	/*
976 	 * Return the number of successfully allocated buffers
977 	 * to the userspace.
978 	 */
979 	req->count = allocated_buffers;
980 	q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
981 
982 	return 0;
983 }
984 
985 /**
986  * vb2_reqbufs() - Wrapper for __reqbufs() that also verifies the memory and
987  * type values.
988  * @q:		videobuf2 queue
989  * @req:	struct passed from userspace to vidioc_reqbufs handler in driver
990  */
vb2_reqbufs(struct vb2_queue * q,struct v4l2_requestbuffers * req)991 int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req)
992 {
993 	int ret = __verify_memory_type(q, req->memory, req->type);
994 
995 	return ret ? ret : __reqbufs(q, req);
996 }
997 EXPORT_SYMBOL_GPL(vb2_reqbufs);
998 
999 /**
1000  * __create_bufs() - Allocate buffers and any required auxiliary structs
1001  * @q:		videobuf2 queue
1002  * @create:	creation parameters, passed from userspace to vidioc_create_bufs
1003  *		handler in driver
1004  *
1005  * Should be called from vidioc_create_bufs ioctl handler of a driver.
1006  * This function:
1007  * 1) verifies parameter sanity
1008  * 2) calls the .queue_setup() queue operation
1009  * 3) performs any necessary memory allocations
1010  *
1011  * The return values from this function are intended to be directly returned
1012  * from vidioc_create_bufs handler in driver.
1013  */
__create_bufs(struct vb2_queue * q,struct v4l2_create_buffers * create)1014 static int __create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
1015 {
1016 	unsigned int num_planes = 0, num_buffers, allocated_buffers;
1017 	int ret;
1018 
1019 	if (q->num_buffers == VIDEO_MAX_FRAME) {
1020 		dprintk(1, "maximum number of buffers already allocated\n");
1021 		return -ENOBUFS;
1022 	}
1023 
1024 	if (!q->num_buffers) {
1025 		memset(q->plane_sizes, 0, sizeof(q->plane_sizes));
1026 		memset(q->alloc_ctx, 0, sizeof(q->alloc_ctx));
1027 		q->memory = create->memory;
1028 		q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
1029 	}
1030 
1031 	num_buffers = min(create->count, VIDEO_MAX_FRAME - q->num_buffers);
1032 
1033 	/*
1034 	 * Ask the driver, whether the requested number of buffers, planes per
1035 	 * buffer and their sizes are acceptable
1036 	 */
1037 	ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1038 		       &num_planes, q->plane_sizes, q->alloc_ctx);
1039 	if (ret)
1040 		return ret;
1041 
1042 	/* Finally, allocate buffers and video memory */
1043 	allocated_buffers = __vb2_queue_alloc(q, create->memory, num_buffers,
1044 				num_planes);
1045 	if (allocated_buffers == 0) {
1046 		dprintk(1, "memory allocation failed\n");
1047 		return -ENOMEM;
1048 	}
1049 
1050 	/*
1051 	 * Check if driver can handle the so far allocated number of buffers.
1052 	 */
1053 	if (allocated_buffers < num_buffers) {
1054 		num_buffers = allocated_buffers;
1055 
1056 		/*
1057 		 * q->num_buffers contains the total number of buffers, that the
1058 		 * queue driver has set up
1059 		 */
1060 		ret = call_qop(q, queue_setup, q, &create->format, &num_buffers,
1061 			       &num_planes, q->plane_sizes, q->alloc_ctx);
1062 
1063 		if (!ret && allocated_buffers < num_buffers)
1064 			ret = -ENOMEM;
1065 
1066 		/*
1067 		 * Either the driver has accepted a smaller number of buffers,
1068 		 * or .queue_setup() returned an error
1069 		 */
1070 	}
1071 
1072 	mutex_lock(&q->mmap_lock);
1073 	q->num_buffers += allocated_buffers;
1074 
1075 	if (ret < 0) {
1076 		/*
1077 		 * Note: __vb2_queue_free() will subtract 'allocated_buffers'
1078 		 * from q->num_buffers.
1079 		 */
1080 		__vb2_queue_free(q, allocated_buffers);
1081 		mutex_unlock(&q->mmap_lock);
1082 		return -ENOMEM;
1083 	}
1084 	mutex_unlock(&q->mmap_lock);
1085 
1086 	/*
1087 	 * Return the number of successfully allocated buffers
1088 	 * to the userspace.
1089 	 */
1090 	create->count = allocated_buffers;
1091 
1092 	return 0;
1093 }
1094 
1095 /**
1096  * vb2_create_bufs() - Wrapper for __create_bufs() that also verifies the
1097  * memory and type values.
1098  * @q:		videobuf2 queue
1099  * @create:	creation parameters, passed from userspace to vidioc_create_bufs
1100  *		handler in driver
1101  */
vb2_create_bufs(struct vb2_queue * q,struct v4l2_create_buffers * create)1102 int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create)
1103 {
1104 	int ret = __verify_memory_type(q, create->memory, create->format.type);
1105 
1106 	create->index = q->num_buffers;
1107 	if (create->count == 0)
1108 		return ret != -EBUSY ? ret : 0;
1109 	return ret ? ret : __create_bufs(q, create);
1110 }
1111 EXPORT_SYMBOL_GPL(vb2_create_bufs);
1112 
1113 /**
1114  * vb2_plane_vaddr() - Return a kernel virtual address of a given plane
1115  * @vb:		vb2_buffer to which the plane in question belongs to
1116  * @plane_no:	plane number for which the address is to be returned
1117  *
1118  * This function returns a kernel virtual address of a given plane if
1119  * such a mapping exist, NULL otherwise.
1120  */
vb2_plane_vaddr(struct vb2_buffer * vb,unsigned int plane_no)1121 void *vb2_plane_vaddr(struct vb2_buffer *vb, unsigned int plane_no)
1122 {
1123 	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
1124 		return NULL;
1125 
1126 	return call_ptr_memop(vb, vaddr, vb->planes[plane_no].mem_priv);
1127 
1128 }
1129 EXPORT_SYMBOL_GPL(vb2_plane_vaddr);
1130 
1131 /**
1132  * vb2_plane_cookie() - Return allocator specific cookie for the given plane
1133  * @vb:		vb2_buffer to which the plane in question belongs to
1134  * @plane_no:	plane number for which the cookie is to be returned
1135  *
1136  * This function returns an allocator specific cookie for a given plane if
1137  * available, NULL otherwise. The allocator should provide some simple static
1138  * inline function, which would convert this cookie to the allocator specific
1139  * type that can be used directly by the driver to access the buffer. This can
1140  * be for example physical address, pointer to scatter list or IOMMU mapping.
1141  */
vb2_plane_cookie(struct vb2_buffer * vb,unsigned int plane_no)1142 void *vb2_plane_cookie(struct vb2_buffer *vb, unsigned int plane_no)
1143 {
1144 	if (plane_no >= vb->num_planes || !vb->planes[plane_no].mem_priv)
1145 		return NULL;
1146 
1147 	return call_ptr_memop(vb, cookie, vb->planes[plane_no].mem_priv);
1148 }
1149 EXPORT_SYMBOL_GPL(vb2_plane_cookie);
1150 
1151 /**
1152  * vb2_buffer_done() - inform videobuf that an operation on a buffer is finished
1153  * @vb:		vb2_buffer returned from the driver
1154  * @state:	either VB2_BUF_STATE_DONE if the operation finished successfully
1155  *		or VB2_BUF_STATE_ERROR if the operation finished with an error.
1156  *		If start_streaming fails then it should return buffers with state
1157  *		VB2_BUF_STATE_QUEUED to put them back into the queue.
1158  *
1159  * This function should be called by the driver after a hardware operation on
1160  * a buffer is finished and the buffer may be returned to userspace. The driver
1161  * cannot use this buffer anymore until it is queued back to it by videobuf
1162  * by the means of buf_queue callback. Only buffers previously queued to the
1163  * driver by buf_queue can be passed to this function.
1164  *
1165  * While streaming a buffer can only be returned in state DONE or ERROR.
1166  * The start_streaming op can also return them in case the DMA engine cannot
1167  * be started for some reason. In that case the buffers should be returned with
1168  * state QUEUED.
1169  */
vb2_buffer_done(struct vb2_buffer * vb,enum vb2_buffer_state state)1170 void vb2_buffer_done(struct vb2_buffer *vb, enum vb2_buffer_state state)
1171 {
1172 	struct vb2_queue *q = vb->vb2_queue;
1173 	unsigned long flags;
1174 	unsigned int plane;
1175 
1176 	if (WARN_ON(vb->state != VB2_BUF_STATE_ACTIVE))
1177 		return;
1178 
1179 	if (WARN_ON(state != VB2_BUF_STATE_DONE &&
1180 		    state != VB2_BUF_STATE_ERROR &&
1181 		    state != VB2_BUF_STATE_QUEUED))
1182 		state = VB2_BUF_STATE_ERROR;
1183 
1184 #ifdef CONFIG_VIDEO_ADV_DEBUG
1185 	/*
1186 	 * Although this is not a callback, it still does have to balance
1187 	 * with the buf_queue op. So update this counter manually.
1188 	 */
1189 	vb->cnt_buf_done++;
1190 #endif
1191 	dprintk(4, "done processing on buffer %d, state: %d\n",
1192 			vb->v4l2_buf.index, state);
1193 
1194 	/* sync buffers */
1195 	for (plane = 0; plane < vb->num_planes; ++plane)
1196 		call_void_memop(vb, finish, vb->planes[plane].mem_priv);
1197 
1198 	/* Add the buffer to the done buffers list */
1199 	spin_lock_irqsave(&q->done_lock, flags);
1200 	vb->state = state;
1201 	if (state != VB2_BUF_STATE_QUEUED)
1202 		list_add_tail(&vb->done_entry, &q->done_list);
1203 	atomic_dec(&q->owned_by_drv_count);
1204 	spin_unlock_irqrestore(&q->done_lock, flags);
1205 
1206 	if (state == VB2_BUF_STATE_QUEUED)
1207 		return;
1208 
1209 	/* Inform any processes that may be waiting for buffers */
1210 	wake_up(&q->done_wq);
1211 }
1212 EXPORT_SYMBOL_GPL(vb2_buffer_done);
1213 
1214 /**
1215  * vb2_discard_done() - discard all buffers marked as DONE
1216  * @q:		videobuf2 queue
1217  *
1218  * This function is intended to be used with suspend/resume operations. It
1219  * discards all 'done' buffers as they would be too old to be requested after
1220  * resume.
1221  *
1222  * Drivers must stop the hardware and synchronize with interrupt handlers and/or
1223  * delayed works before calling this function to make sure no buffer will be
1224  * touched by the driver and/or hardware.
1225  */
vb2_discard_done(struct vb2_queue * q)1226 void vb2_discard_done(struct vb2_queue *q)
1227 {
1228 	struct vb2_buffer *vb;
1229 	unsigned long flags;
1230 
1231 	spin_lock_irqsave(&q->done_lock, flags);
1232 	list_for_each_entry(vb, &q->done_list, done_entry)
1233 		vb->state = VB2_BUF_STATE_ERROR;
1234 	spin_unlock_irqrestore(&q->done_lock, flags);
1235 }
1236 EXPORT_SYMBOL_GPL(vb2_discard_done);
1237 
1238 /**
1239  * __fill_vb2_buffer() - fill a vb2_buffer with information provided in a
1240  * v4l2_buffer by the userspace. The caller has already verified that struct
1241  * v4l2_buffer has a valid number of planes.
1242  */
__fill_vb2_buffer(struct vb2_buffer * vb,const struct v4l2_buffer * b,struct v4l2_plane * v4l2_planes)1243 static void __fill_vb2_buffer(struct vb2_buffer *vb, const struct v4l2_buffer *b,
1244 				struct v4l2_plane *v4l2_planes)
1245 {
1246 	unsigned int plane;
1247 
1248 	if (V4L2_TYPE_IS_MULTIPLANAR(b->type)) {
1249 		if (b->memory == V4L2_MEMORY_USERPTR) {
1250 			for (plane = 0; plane < vb->num_planes; ++plane) {
1251 				v4l2_planes[plane].m.userptr =
1252 					b->m.planes[plane].m.userptr;
1253 				v4l2_planes[plane].length =
1254 					b->m.planes[plane].length;
1255 			}
1256 		}
1257 		if (b->memory == V4L2_MEMORY_DMABUF) {
1258 			for (plane = 0; plane < vb->num_planes; ++plane) {
1259 				v4l2_planes[plane].m.fd =
1260 					b->m.planes[plane].m.fd;
1261 				v4l2_planes[plane].length =
1262 					b->m.planes[plane].length;
1263 			}
1264 		}
1265 
1266 		/* Fill in driver-provided information for OUTPUT types */
1267 		if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1268 			/*
1269 			 * Will have to go up to b->length when API starts
1270 			 * accepting variable number of planes.
1271 			 *
1272 			 * If bytesused == 0 for the output buffer, then fall
1273 			 * back to the full buffer size. In that case
1274 			 * userspace clearly never bothered to set it and
1275 			 * it's a safe assumption that they really meant to
1276 			 * use the full plane sizes.
1277 			 */
1278 			for (plane = 0; plane < vb->num_planes; ++plane) {
1279 				struct v4l2_plane *pdst = &v4l2_planes[plane];
1280 				struct v4l2_plane *psrc = &b->m.planes[plane];
1281 
1282 				pdst->bytesused = psrc->bytesused ?
1283 					psrc->bytesused : pdst->length;
1284 				pdst->data_offset = psrc->data_offset;
1285 			}
1286 		}
1287 	} else {
1288 		/*
1289 		 * Single-planar buffers do not use planes array,
1290 		 * so fill in relevant v4l2_buffer struct fields instead.
1291 		 * In videobuf we use our internal V4l2_planes struct for
1292 		 * single-planar buffers as well, for simplicity.
1293 		 *
1294 		 * If bytesused == 0 for the output buffer, then fall back
1295 		 * to the full buffer size as that's a sensible default.
1296 		 */
1297 		if (b->memory == V4L2_MEMORY_USERPTR) {
1298 			v4l2_planes[0].m.userptr = b->m.userptr;
1299 			v4l2_planes[0].length = b->length;
1300 		}
1301 
1302 		if (b->memory == V4L2_MEMORY_DMABUF) {
1303 			v4l2_planes[0].m.fd = b->m.fd;
1304 			v4l2_planes[0].length = b->length;
1305 		}
1306 
1307 		if (V4L2_TYPE_IS_OUTPUT(b->type))
1308 			v4l2_planes[0].bytesused = b->bytesused ?
1309 				b->bytesused : v4l2_planes[0].length;
1310 		else
1311 			v4l2_planes[0].bytesused = 0;
1312 
1313 	}
1314 
1315 	/* Zero flags that the vb2 core handles */
1316 	vb->v4l2_buf.flags = b->flags & ~V4L2_BUFFER_MASK_FLAGS;
1317 	if ((vb->vb2_queue->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) !=
1318 	    V4L2_BUF_FLAG_TIMESTAMP_COPY || !V4L2_TYPE_IS_OUTPUT(b->type)) {
1319 		/*
1320 		 * Non-COPY timestamps and non-OUTPUT queues will get
1321 		 * their timestamp and timestamp source flags from the
1322 		 * queue.
1323 		 */
1324 		vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TSTAMP_SRC_MASK;
1325 	}
1326 
1327 	if (V4L2_TYPE_IS_OUTPUT(b->type)) {
1328 		/*
1329 		 * For output buffers mask out the timecode flag:
1330 		 * this will be handled later in vb2_internal_qbuf().
1331 		 * The 'field' is valid metadata for this output buffer
1332 		 * and so that needs to be copied here.
1333 		 */
1334 		vb->v4l2_buf.flags &= ~V4L2_BUF_FLAG_TIMECODE;
1335 		vb->v4l2_buf.field = b->field;
1336 	} else {
1337 		/* Zero any output buffer flags as this is a capture buffer */
1338 		vb->v4l2_buf.flags &= ~V4L2_BUFFER_OUT_FLAGS;
1339 	}
1340 }
1341 
1342 /**
1343  * __qbuf_mmap() - handle qbuf of an MMAP buffer
1344  */
__qbuf_mmap(struct vb2_buffer * vb,const struct v4l2_buffer * b)1345 static int __qbuf_mmap(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1346 {
1347 	__fill_vb2_buffer(vb, b, vb->v4l2_planes);
1348 	return call_vb_qop(vb, buf_prepare, vb);
1349 }
1350 
1351 /**
1352  * __qbuf_userptr() - handle qbuf of a USERPTR buffer
1353  */
__qbuf_userptr(struct vb2_buffer * vb,const struct v4l2_buffer * b)1354 static int __qbuf_userptr(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1355 {
1356 	struct v4l2_plane planes[VIDEO_MAX_PLANES];
1357 	struct vb2_queue *q = vb->vb2_queue;
1358 	void *mem_priv;
1359 	unsigned int plane;
1360 	int ret;
1361 	int write = !V4L2_TYPE_IS_OUTPUT(q->type);
1362 	bool reacquired = vb->planes[0].mem_priv == NULL;
1363 
1364 	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1365 	/* Copy relevant information provided by the userspace */
1366 	__fill_vb2_buffer(vb, b, planes);
1367 
1368 	for (plane = 0; plane < vb->num_planes; ++plane) {
1369 		/* Skip the plane if already verified */
1370 		if (vb->v4l2_planes[plane].m.userptr &&
1371 		    vb->v4l2_planes[plane].m.userptr == planes[plane].m.userptr
1372 		    && vb->v4l2_planes[plane].length == planes[plane].length)
1373 			continue;
1374 
1375 		dprintk(3, "userspace address for plane %d changed, "
1376 				"reacquiring memory\n", plane);
1377 
1378 		/* Check if the provided plane buffer is large enough */
1379 		if (planes[plane].length < q->plane_sizes[plane]) {
1380 			dprintk(1, "provided buffer size %u is less than "
1381 						"setup size %u for plane %d\n",
1382 						planes[plane].length,
1383 						q->plane_sizes[plane], plane);
1384 			ret = -EINVAL;
1385 			goto err;
1386 		}
1387 
1388 		/* Release previously acquired memory if present */
1389 		if (vb->planes[plane].mem_priv) {
1390 			if (!reacquired) {
1391 				reacquired = true;
1392 				call_void_vb_qop(vb, buf_cleanup, vb);
1393 			}
1394 			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1395 		}
1396 
1397 		vb->planes[plane].mem_priv = NULL;
1398 		memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1399 
1400 		/* Acquire each plane's memory */
1401 		mem_priv = call_ptr_memop(vb, get_userptr, q->alloc_ctx[plane],
1402 				      planes[plane].m.userptr,
1403 				      planes[plane].length, write);
1404 		if (IS_ERR_OR_NULL(mem_priv)) {
1405 			dprintk(1, "failed acquiring userspace "
1406 						"memory for plane %d\n", plane);
1407 			ret = mem_priv ? PTR_ERR(mem_priv) : -EINVAL;
1408 			goto err;
1409 		}
1410 		vb->planes[plane].mem_priv = mem_priv;
1411 	}
1412 
1413 	/*
1414 	 * Now that everything is in order, copy relevant information
1415 	 * provided by userspace.
1416 	 */
1417 	for (plane = 0; plane < vb->num_planes; ++plane)
1418 		vb->v4l2_planes[plane] = planes[plane];
1419 
1420 	if (reacquired) {
1421 		/*
1422 		 * One or more planes changed, so we must call buf_init to do
1423 		 * the driver-specific initialization on the newly acquired
1424 		 * buffer, if provided.
1425 		 */
1426 		ret = call_vb_qop(vb, buf_init, vb);
1427 		if (ret) {
1428 			dprintk(1, "buffer initialization failed\n");
1429 			goto err;
1430 		}
1431 	}
1432 
1433 	ret = call_vb_qop(vb, buf_prepare, vb);
1434 	if (ret) {
1435 		dprintk(1, "buffer preparation failed\n");
1436 		call_void_vb_qop(vb, buf_cleanup, vb);
1437 		goto err;
1438 	}
1439 
1440 	return 0;
1441 err:
1442 	/* In case of errors, release planes that were already acquired */
1443 	for (plane = 0; plane < vb->num_planes; ++plane) {
1444 		if (vb->planes[plane].mem_priv)
1445 			call_void_memop(vb, put_userptr, vb->planes[plane].mem_priv);
1446 		vb->planes[plane].mem_priv = NULL;
1447 		vb->v4l2_planes[plane].m.userptr = 0;
1448 		vb->v4l2_planes[plane].length = 0;
1449 	}
1450 
1451 	return ret;
1452 }
1453 
1454 /**
1455  * __qbuf_dmabuf() - handle qbuf of a DMABUF buffer
1456  */
__qbuf_dmabuf(struct vb2_buffer * vb,const struct v4l2_buffer * b)1457 static int __qbuf_dmabuf(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1458 {
1459 	struct v4l2_plane planes[VIDEO_MAX_PLANES];
1460 	struct vb2_queue *q = vb->vb2_queue;
1461 	void *mem_priv;
1462 	unsigned int plane;
1463 	int ret;
1464 	int write = !V4L2_TYPE_IS_OUTPUT(q->type);
1465 	bool reacquired = vb->planes[0].mem_priv == NULL;
1466 
1467 	memset(planes, 0, sizeof(planes[0]) * vb->num_planes);
1468 	/* Copy relevant information provided by the userspace */
1469 	__fill_vb2_buffer(vb, b, planes);
1470 
1471 	for (plane = 0; plane < vb->num_planes; ++plane) {
1472 		struct dma_buf *dbuf = dma_buf_get(planes[plane].m.fd);
1473 
1474 		if (IS_ERR_OR_NULL(dbuf)) {
1475 			dprintk(1, "invalid dmabuf fd for plane %d\n",
1476 				plane);
1477 			ret = -EINVAL;
1478 			goto err;
1479 		}
1480 
1481 		/* use DMABUF size if length is not provided */
1482 		if (planes[plane].length == 0)
1483 			planes[plane].length = dbuf->size;
1484 
1485 		if (planes[plane].length < q->plane_sizes[plane]) {
1486 			dprintk(1, "invalid dmabuf length for plane %d\n",
1487 				plane);
1488 			ret = -EINVAL;
1489 			goto err;
1490 		}
1491 
1492 		/* Skip the plane if already verified */
1493 		if (dbuf == vb->planes[plane].dbuf &&
1494 		    vb->v4l2_planes[plane].length == planes[plane].length) {
1495 			dma_buf_put(dbuf);
1496 			continue;
1497 		}
1498 
1499 		dprintk(1, "buffer for plane %d changed\n", plane);
1500 
1501 		if (!reacquired) {
1502 			reacquired = true;
1503 			call_void_vb_qop(vb, buf_cleanup, vb);
1504 		}
1505 
1506 		/* Release previously acquired memory if present */
1507 		__vb2_plane_dmabuf_put(vb, &vb->planes[plane]);
1508 		memset(&vb->v4l2_planes[plane], 0, sizeof(struct v4l2_plane));
1509 
1510 		/* Acquire each plane's memory */
1511 		mem_priv = call_ptr_memop(vb, attach_dmabuf, q->alloc_ctx[plane],
1512 			dbuf, planes[plane].length, write);
1513 		if (IS_ERR(mem_priv)) {
1514 			dprintk(1, "failed to attach dmabuf\n");
1515 			ret = PTR_ERR(mem_priv);
1516 			dma_buf_put(dbuf);
1517 			goto err;
1518 		}
1519 
1520 		vb->planes[plane].dbuf = dbuf;
1521 		vb->planes[plane].mem_priv = mem_priv;
1522 	}
1523 
1524 	/* TODO: This pins the buffer(s) with  dma_buf_map_attachment()).. but
1525 	 * really we want to do this just before the DMA, not while queueing
1526 	 * the buffer(s)..
1527 	 */
1528 	for (plane = 0; plane < vb->num_planes; ++plane) {
1529 		ret = call_memop(vb, map_dmabuf, vb->planes[plane].mem_priv);
1530 		if (ret) {
1531 			dprintk(1, "failed to map dmabuf for plane %d\n",
1532 				plane);
1533 			goto err;
1534 		}
1535 		vb->planes[plane].dbuf_mapped = 1;
1536 	}
1537 
1538 	/*
1539 	 * Now that everything is in order, copy relevant information
1540 	 * provided by userspace.
1541 	 */
1542 	for (plane = 0; plane < vb->num_planes; ++plane)
1543 		vb->v4l2_planes[plane] = planes[plane];
1544 
1545 	if (reacquired) {
1546 		/*
1547 		 * Call driver-specific initialization on the newly acquired buffer,
1548 		 * if provided.
1549 		 */
1550 		ret = call_vb_qop(vb, buf_init, vb);
1551 		if (ret) {
1552 			dprintk(1, "buffer initialization failed\n");
1553 			goto err;
1554 		}
1555 	}
1556 
1557 	ret = call_vb_qop(vb, buf_prepare, vb);
1558 	if (ret) {
1559 		dprintk(1, "buffer preparation failed\n");
1560 		call_void_vb_qop(vb, buf_cleanup, vb);
1561 		goto err;
1562 	}
1563 
1564 	return 0;
1565 err:
1566 	/* In case of errors, release planes that were already acquired */
1567 	__vb2_buf_dmabuf_put(vb);
1568 
1569 	return ret;
1570 }
1571 
1572 /**
1573  * __enqueue_in_driver() - enqueue a vb2_buffer in driver for processing
1574  */
__enqueue_in_driver(struct vb2_buffer * vb)1575 static void __enqueue_in_driver(struct vb2_buffer *vb)
1576 {
1577 	struct vb2_queue *q = vb->vb2_queue;
1578 	unsigned int plane;
1579 
1580 	vb->state = VB2_BUF_STATE_ACTIVE;
1581 	atomic_inc(&q->owned_by_drv_count);
1582 
1583 	/* sync buffers */
1584 	for (plane = 0; plane < vb->num_planes; ++plane)
1585 		call_void_memop(vb, prepare, vb->planes[plane].mem_priv);
1586 
1587 	call_void_vb_qop(vb, buf_queue, vb);
1588 }
1589 
__buf_prepare(struct vb2_buffer * vb,const struct v4l2_buffer * b)1590 static int __buf_prepare(struct vb2_buffer *vb, const struct v4l2_buffer *b)
1591 {
1592 	struct vb2_queue *q = vb->vb2_queue;
1593 	int ret;
1594 
1595 	ret = __verify_length(vb, b);
1596 	if (ret < 0) {
1597 		dprintk(1, "plane parameters verification failed: %d\n", ret);
1598 		return ret;
1599 	}
1600 	if (b->field == V4L2_FIELD_ALTERNATE && V4L2_TYPE_IS_OUTPUT(q->type)) {
1601 		/*
1602 		 * If the format's field is ALTERNATE, then the buffer's field
1603 		 * should be either TOP or BOTTOM, not ALTERNATE since that
1604 		 * makes no sense. The driver has to know whether the
1605 		 * buffer represents a top or a bottom field in order to
1606 		 * program any DMA correctly. Using ALTERNATE is wrong, since
1607 		 * that just says that it is either a top or a bottom field,
1608 		 * but not which of the two it is.
1609 		 */
1610 		dprintk(1, "the field is incorrectly set to ALTERNATE for an output buffer\n");
1611 		return -EINVAL;
1612 	}
1613 
1614 	if (q->error) {
1615 		dprintk(1, "fatal error occurred on queue\n");
1616 		return -EIO;
1617 	}
1618 
1619 	vb->state = VB2_BUF_STATE_PREPARING;
1620 	vb->v4l2_buf.timestamp.tv_sec = 0;
1621 	vb->v4l2_buf.timestamp.tv_usec = 0;
1622 	vb->v4l2_buf.sequence = 0;
1623 
1624 	switch (q->memory) {
1625 	case V4L2_MEMORY_MMAP:
1626 		ret = __qbuf_mmap(vb, b);
1627 		break;
1628 	case V4L2_MEMORY_USERPTR:
1629 		down_read(&current->mm->mmap_sem);
1630 		ret = __qbuf_userptr(vb, b);
1631 		up_read(&current->mm->mmap_sem);
1632 		break;
1633 	case V4L2_MEMORY_DMABUF:
1634 		ret = __qbuf_dmabuf(vb, b);
1635 		break;
1636 	default:
1637 		WARN(1, "Invalid queue type\n");
1638 		ret = -EINVAL;
1639 	}
1640 
1641 	if (ret)
1642 		dprintk(1, "buffer preparation failed: %d\n", ret);
1643 	vb->state = ret ? VB2_BUF_STATE_DEQUEUED : VB2_BUF_STATE_PREPARED;
1644 
1645 	return ret;
1646 }
1647 
vb2_queue_or_prepare_buf(struct vb2_queue * q,struct v4l2_buffer * b,const char * opname)1648 static int vb2_queue_or_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b,
1649 				    const char *opname)
1650 {
1651 	if (b->type != q->type) {
1652 		dprintk(1, "%s: invalid buffer type\n", opname);
1653 		return -EINVAL;
1654 	}
1655 
1656 	if (b->index >= q->num_buffers) {
1657 		dprintk(1, "%s: buffer index out of range\n", opname);
1658 		return -EINVAL;
1659 	}
1660 
1661 	if (q->bufs[b->index] == NULL) {
1662 		/* Should never happen */
1663 		dprintk(1, "%s: buffer is NULL\n", opname);
1664 		return -EINVAL;
1665 	}
1666 
1667 	if (b->memory != q->memory) {
1668 		dprintk(1, "%s: invalid memory type\n", opname);
1669 		return -EINVAL;
1670 	}
1671 
1672 	return __verify_planes_array(q->bufs[b->index], b);
1673 }
1674 
1675 /**
1676  * vb2_prepare_buf() - Pass ownership of a buffer from userspace to the kernel
1677  * @q:		videobuf2 queue
1678  * @b:		buffer structure passed from userspace to vidioc_prepare_buf
1679  *		handler in driver
1680  *
1681  * Should be called from vidioc_prepare_buf ioctl handler of a driver.
1682  * This function:
1683  * 1) verifies the passed buffer,
1684  * 2) calls buf_prepare callback in the driver (if provided), in which
1685  *    driver-specific buffer initialization can be performed,
1686  *
1687  * The return values from this function are intended to be directly returned
1688  * from vidioc_prepare_buf handler in driver.
1689  */
vb2_prepare_buf(struct vb2_queue * q,struct v4l2_buffer * b)1690 int vb2_prepare_buf(struct vb2_queue *q, struct v4l2_buffer *b)
1691 {
1692 	struct vb2_buffer *vb;
1693 	int ret;
1694 
1695 	if (vb2_fileio_is_active(q)) {
1696 		dprintk(1, "file io in progress\n");
1697 		return -EBUSY;
1698 	}
1699 
1700 	ret = vb2_queue_or_prepare_buf(q, b, "prepare_buf");
1701 	if (ret)
1702 		return ret;
1703 
1704 	vb = q->bufs[b->index];
1705 	if (vb->state != VB2_BUF_STATE_DEQUEUED) {
1706 		dprintk(1, "invalid buffer state %d\n",
1707 			vb->state);
1708 		return -EINVAL;
1709 	}
1710 
1711 	ret = __buf_prepare(vb, b);
1712 	if (!ret) {
1713 		/* Fill buffer information for the userspace */
1714 		__fill_v4l2_buffer(vb, b);
1715 
1716 		dprintk(1, "prepare of buffer %d succeeded\n", vb->v4l2_buf.index);
1717 	}
1718 	return ret;
1719 }
1720 EXPORT_SYMBOL_GPL(vb2_prepare_buf);
1721 
1722 /**
1723  * vb2_start_streaming() - Attempt to start streaming.
1724  * @q:		videobuf2 queue
1725  *
1726  * Attempt to start streaming. When this function is called there must be
1727  * at least q->min_buffers_needed buffers queued up (i.e. the minimum
1728  * number of buffers required for the DMA engine to function). If the
1729  * @start_streaming op fails it is supposed to return all the driver-owned
1730  * buffers back to vb2 in state QUEUED. Check if that happened and if
1731  * not warn and reclaim them forcefully.
1732  */
vb2_start_streaming(struct vb2_queue * q)1733 static int vb2_start_streaming(struct vb2_queue *q)
1734 {
1735 	struct vb2_buffer *vb;
1736 	int ret;
1737 
1738 	/*
1739 	 * If any buffers were queued before streamon,
1740 	 * we can now pass them to driver for processing.
1741 	 */
1742 	list_for_each_entry(vb, &q->queued_list, queued_entry)
1743 		__enqueue_in_driver(vb);
1744 
1745 	/* Tell the driver to start streaming */
1746 	q->start_streaming_called = 1;
1747 	ret = call_qop(q, start_streaming, q,
1748 		       atomic_read(&q->owned_by_drv_count));
1749 	if (!ret)
1750 		return 0;
1751 
1752 	q->start_streaming_called = 0;
1753 
1754 	dprintk(1, "driver refused to start streaming\n");
1755 	/*
1756 	 * If you see this warning, then the driver isn't cleaning up properly
1757 	 * after a failed start_streaming(). See the start_streaming()
1758 	 * documentation in videobuf2-core.h for more information how buffers
1759 	 * should be returned to vb2 in start_streaming().
1760 	 */
1761 	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
1762 		unsigned i;
1763 
1764 		/*
1765 		 * Forcefully reclaim buffers if the driver did not
1766 		 * correctly return them to vb2.
1767 		 */
1768 		for (i = 0; i < q->num_buffers; ++i) {
1769 			vb = q->bufs[i];
1770 			if (vb->state == VB2_BUF_STATE_ACTIVE)
1771 				vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED);
1772 		}
1773 		/* Must be zero now */
1774 		WARN_ON(atomic_read(&q->owned_by_drv_count));
1775 	}
1776 	/*
1777 	 * If done_list is not empty, then start_streaming() didn't call
1778 	 * vb2_buffer_done(vb, VB2_BUF_STATE_QUEUED) but STATE_ERROR or
1779 	 * STATE_DONE.
1780 	 */
1781 	WARN_ON(!list_empty(&q->done_list));
1782 	return ret;
1783 }
1784 
vb2_internal_qbuf(struct vb2_queue * q,struct v4l2_buffer * b)1785 static int vb2_internal_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1786 {
1787 	int ret = vb2_queue_or_prepare_buf(q, b, "qbuf");
1788 	struct vb2_buffer *vb;
1789 
1790 	if (ret)
1791 		return ret;
1792 
1793 	vb = q->bufs[b->index];
1794 
1795 	switch (vb->state) {
1796 	case VB2_BUF_STATE_DEQUEUED:
1797 		ret = __buf_prepare(vb, b);
1798 		if (ret)
1799 			return ret;
1800 		break;
1801 	case VB2_BUF_STATE_PREPARED:
1802 		break;
1803 	case VB2_BUF_STATE_PREPARING:
1804 		dprintk(1, "buffer still being prepared\n");
1805 		return -EINVAL;
1806 	default:
1807 		dprintk(1, "invalid buffer state %d\n", vb->state);
1808 		return -EINVAL;
1809 	}
1810 
1811 	/*
1812 	 * Add to the queued buffers list, a buffer will stay on it until
1813 	 * dequeued in dqbuf.
1814 	 */
1815 	list_add_tail(&vb->queued_entry, &q->queued_list);
1816 	q->queued_count++;
1817 	q->waiting_for_buffers = false;
1818 	vb->state = VB2_BUF_STATE_QUEUED;
1819 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
1820 		/*
1821 		 * For output buffers copy the timestamp if needed,
1822 		 * and the timecode field and flag if needed.
1823 		 */
1824 		if ((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
1825 		    V4L2_BUF_FLAG_TIMESTAMP_COPY)
1826 			vb->v4l2_buf.timestamp = b->timestamp;
1827 		vb->v4l2_buf.flags |= b->flags & V4L2_BUF_FLAG_TIMECODE;
1828 		if (b->flags & V4L2_BUF_FLAG_TIMECODE)
1829 			vb->v4l2_buf.timecode = b->timecode;
1830 	}
1831 
1832 	/*
1833 	 * If already streaming, give the buffer to driver for processing.
1834 	 * If not, the buffer will be given to driver on next streamon.
1835 	 */
1836 	if (q->start_streaming_called)
1837 		__enqueue_in_driver(vb);
1838 
1839 	/* Fill buffer information for the userspace */
1840 	__fill_v4l2_buffer(vb, b);
1841 
1842 	/*
1843 	 * If streamon has been called, and we haven't yet called
1844 	 * start_streaming() since not enough buffers were queued, and
1845 	 * we now have reached the minimum number of queued buffers,
1846 	 * then we can finally call start_streaming().
1847 	 */
1848 	if (q->streaming && !q->start_streaming_called &&
1849 	    q->queued_count >= q->min_buffers_needed) {
1850 		ret = vb2_start_streaming(q);
1851 		if (ret)
1852 			return ret;
1853 	}
1854 
1855 	dprintk(1, "qbuf of buffer %d succeeded\n", vb->v4l2_buf.index);
1856 	return 0;
1857 }
1858 
1859 /**
1860  * vb2_qbuf() - Queue a buffer from userspace
1861  * @q:		videobuf2 queue
1862  * @b:		buffer structure passed from userspace to vidioc_qbuf handler
1863  *		in driver
1864  *
1865  * Should be called from vidioc_qbuf ioctl handler of a driver.
1866  * This function:
1867  * 1) verifies the passed buffer,
1868  * 2) if necessary, calls buf_prepare callback in the driver (if provided), in
1869  *    which driver-specific buffer initialization can be performed,
1870  * 3) if streaming is on, queues the buffer in driver by the means of buf_queue
1871  *    callback for processing.
1872  *
1873  * The return values from this function are intended to be directly returned
1874  * from vidioc_qbuf handler in driver.
1875  */
vb2_qbuf(struct vb2_queue * q,struct v4l2_buffer * b)1876 int vb2_qbuf(struct vb2_queue *q, struct v4l2_buffer *b)
1877 {
1878 	if (vb2_fileio_is_active(q)) {
1879 		dprintk(1, "file io in progress\n");
1880 		return -EBUSY;
1881 	}
1882 
1883 	return vb2_internal_qbuf(q, b);
1884 }
1885 EXPORT_SYMBOL_GPL(vb2_qbuf);
1886 
1887 /**
1888  * __vb2_wait_for_done_vb() - wait for a buffer to become available
1889  * for dequeuing
1890  *
1891  * Will sleep if required for nonblocking == false.
1892  */
__vb2_wait_for_done_vb(struct vb2_queue * q,int nonblocking)1893 static int __vb2_wait_for_done_vb(struct vb2_queue *q, int nonblocking)
1894 {
1895 	/*
1896 	 * All operations on vb_done_list are performed under done_lock
1897 	 * spinlock protection. However, buffers may be removed from
1898 	 * it and returned to userspace only while holding both driver's
1899 	 * lock and the done_lock spinlock. Thus we can be sure that as
1900 	 * long as we hold the driver's lock, the list will remain not
1901 	 * empty if list_empty() check succeeds.
1902 	 */
1903 
1904 	for (;;) {
1905 		int ret;
1906 
1907 		if (!q->streaming) {
1908 			dprintk(1, "streaming off, will not wait for buffers\n");
1909 			return -EINVAL;
1910 		}
1911 
1912 		if (q->error) {
1913 			dprintk(1, "Queue in error state, will not wait for buffers\n");
1914 			return -EIO;
1915 		}
1916 
1917 		if (!list_empty(&q->done_list)) {
1918 			/*
1919 			 * Found a buffer that we were waiting for.
1920 			 */
1921 			break;
1922 		}
1923 
1924 		if (nonblocking) {
1925 			dprintk(1, "nonblocking and no buffers to dequeue, "
1926 								"will not wait\n");
1927 			return -EAGAIN;
1928 		}
1929 
1930 		/*
1931 		 * We are streaming and blocking, wait for another buffer to
1932 		 * become ready or for streamoff. Driver's lock is released to
1933 		 * allow streamoff or qbuf to be called while waiting.
1934 		 */
1935 		call_void_qop(q, wait_prepare, q);
1936 
1937 		/*
1938 		 * All locks have been released, it is safe to sleep now.
1939 		 */
1940 		dprintk(3, "will sleep waiting for buffers\n");
1941 		ret = wait_event_interruptible(q->done_wq,
1942 				!list_empty(&q->done_list) || !q->streaming ||
1943 				q->error);
1944 
1945 		/*
1946 		 * We need to reevaluate both conditions again after reacquiring
1947 		 * the locks or return an error if one occurred.
1948 		 */
1949 		call_void_qop(q, wait_finish, q);
1950 		if (ret) {
1951 			dprintk(1, "sleep was interrupted\n");
1952 			return ret;
1953 		}
1954 	}
1955 	return 0;
1956 }
1957 
1958 /**
1959  * __vb2_get_done_vb() - get a buffer ready for dequeuing
1960  *
1961  * Will sleep if required for nonblocking == false.
1962  */
__vb2_get_done_vb(struct vb2_queue * q,struct vb2_buffer ** vb,struct v4l2_buffer * b,int nonblocking)1963 static int __vb2_get_done_vb(struct vb2_queue *q, struct vb2_buffer **vb,
1964 				struct v4l2_buffer *b, int nonblocking)
1965 {
1966 	unsigned long flags;
1967 	int ret;
1968 
1969 	/*
1970 	 * Wait for at least one buffer to become available on the done_list.
1971 	 */
1972 	ret = __vb2_wait_for_done_vb(q, nonblocking);
1973 	if (ret)
1974 		return ret;
1975 
1976 	/*
1977 	 * Driver's lock has been held since we last verified that done_list
1978 	 * is not empty, so no need for another list_empty(done_list) check.
1979 	 */
1980 	spin_lock_irqsave(&q->done_lock, flags);
1981 	*vb = list_first_entry(&q->done_list, struct vb2_buffer, done_entry);
1982 	/*
1983 	 * Only remove the buffer from done_list if v4l2_buffer can handle all
1984 	 * the planes.
1985 	 */
1986 	ret = __verify_planes_array(*vb, b);
1987 	if (!ret)
1988 		list_del(&(*vb)->done_entry);
1989 	spin_unlock_irqrestore(&q->done_lock, flags);
1990 
1991 	return ret;
1992 }
1993 
1994 /**
1995  * vb2_wait_for_all_buffers() - wait until all buffers are given back to vb2
1996  * @q:		videobuf2 queue
1997  *
1998  * This function will wait until all buffers that have been given to the driver
1999  * by buf_queue() are given back to vb2 with vb2_buffer_done(). It doesn't call
2000  * wait_prepare, wait_finish pair. It is intended to be called with all locks
2001  * taken, for example from stop_streaming() callback.
2002  */
vb2_wait_for_all_buffers(struct vb2_queue * q)2003 int vb2_wait_for_all_buffers(struct vb2_queue *q)
2004 {
2005 	if (!q->streaming) {
2006 		dprintk(1, "streaming off, will not wait for buffers\n");
2007 		return -EINVAL;
2008 	}
2009 
2010 	if (q->start_streaming_called)
2011 		wait_event(q->done_wq, !atomic_read(&q->owned_by_drv_count));
2012 	return 0;
2013 }
2014 EXPORT_SYMBOL_GPL(vb2_wait_for_all_buffers);
2015 
2016 /**
2017  * __vb2_dqbuf() - bring back the buffer to the DEQUEUED state
2018  */
__vb2_dqbuf(struct vb2_buffer * vb)2019 static void __vb2_dqbuf(struct vb2_buffer *vb)
2020 {
2021 	struct vb2_queue *q = vb->vb2_queue;
2022 	unsigned int i;
2023 
2024 	/* nothing to do if the buffer is already dequeued */
2025 	if (vb->state == VB2_BUF_STATE_DEQUEUED)
2026 		return;
2027 
2028 	vb->state = VB2_BUF_STATE_DEQUEUED;
2029 
2030 	/* unmap DMABUF buffer */
2031 	if (q->memory == V4L2_MEMORY_DMABUF)
2032 		for (i = 0; i < vb->num_planes; ++i) {
2033 			if (!vb->planes[i].dbuf_mapped)
2034 				continue;
2035 			call_void_memop(vb, unmap_dmabuf, vb->planes[i].mem_priv);
2036 			vb->planes[i].dbuf_mapped = 0;
2037 		}
2038 }
2039 
vb2_internal_dqbuf(struct vb2_queue * q,struct v4l2_buffer * b,bool nonblocking)2040 static int vb2_internal_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
2041 {
2042 	struct vb2_buffer *vb = NULL;
2043 	int ret;
2044 
2045 	if (b->type != q->type) {
2046 		dprintk(1, "invalid buffer type\n");
2047 		return -EINVAL;
2048 	}
2049 	ret = __vb2_get_done_vb(q, &vb, b, nonblocking);
2050 	if (ret < 0)
2051 		return ret;
2052 
2053 	switch (vb->state) {
2054 	case VB2_BUF_STATE_DONE:
2055 		dprintk(3, "returning done buffer\n");
2056 		break;
2057 	case VB2_BUF_STATE_ERROR:
2058 		dprintk(3, "returning done buffer with errors\n");
2059 		break;
2060 	default:
2061 		dprintk(1, "invalid buffer state\n");
2062 		return -EINVAL;
2063 	}
2064 
2065 	call_void_vb_qop(vb, buf_finish, vb);
2066 
2067 	/* Fill buffer information for the userspace */
2068 	__fill_v4l2_buffer(vb, b);
2069 	/* Remove from videobuf queue */
2070 	list_del(&vb->queued_entry);
2071 	q->queued_count--;
2072 	/* go back to dequeued state */
2073 	__vb2_dqbuf(vb);
2074 
2075 	dprintk(1, "dqbuf of buffer %d, with state %d\n",
2076 			vb->v4l2_buf.index, vb->state);
2077 
2078 	return 0;
2079 }
2080 
2081 /**
2082  * vb2_dqbuf() - Dequeue a buffer to the userspace
2083  * @q:		videobuf2 queue
2084  * @b:		buffer structure passed from userspace to vidioc_dqbuf handler
2085  *		in driver
2086  * @nonblocking: if true, this call will not sleep waiting for a buffer if no
2087  *		 buffers ready for dequeuing are present. Normally the driver
2088  *		 would be passing (file->f_flags & O_NONBLOCK) here
2089  *
2090  * Should be called from vidioc_dqbuf ioctl handler of a driver.
2091  * This function:
2092  * 1) verifies the passed buffer,
2093  * 2) calls buf_finish callback in the driver (if provided), in which
2094  *    driver can perform any additional operations that may be required before
2095  *    returning the buffer to userspace, such as cache sync,
2096  * 3) the buffer struct members are filled with relevant information for
2097  *    the userspace.
2098  *
2099  * The return values from this function are intended to be directly returned
2100  * from vidioc_dqbuf handler in driver.
2101  */
vb2_dqbuf(struct vb2_queue * q,struct v4l2_buffer * b,bool nonblocking)2102 int vb2_dqbuf(struct vb2_queue *q, struct v4l2_buffer *b, bool nonblocking)
2103 {
2104 	if (vb2_fileio_is_active(q)) {
2105 		dprintk(1, "file io in progress\n");
2106 		return -EBUSY;
2107 	}
2108 	return vb2_internal_dqbuf(q, b, nonblocking);
2109 }
2110 EXPORT_SYMBOL_GPL(vb2_dqbuf);
2111 
2112 /**
2113  * __vb2_queue_cancel() - cancel and stop (pause) streaming
2114  *
2115  * Removes all queued buffers from driver's queue and all buffers queued by
2116  * userspace from videobuf's queue. Returns to state after reqbufs.
2117  */
__vb2_queue_cancel(struct vb2_queue * q)2118 static void __vb2_queue_cancel(struct vb2_queue *q)
2119 {
2120 	unsigned int i;
2121 
2122 	/*
2123 	 * Tell driver to stop all transactions and release all queued
2124 	 * buffers.
2125 	 */
2126 	if (q->start_streaming_called)
2127 		call_void_qop(q, stop_streaming, q);
2128 
2129 	/*
2130 	 * If you see this warning, then the driver isn't cleaning up properly
2131 	 * in stop_streaming(). See the stop_streaming() documentation in
2132 	 * videobuf2-core.h for more information how buffers should be returned
2133 	 * to vb2 in stop_streaming().
2134 	 */
2135 	if (WARN_ON(atomic_read(&q->owned_by_drv_count))) {
2136 		for (i = 0; i < q->num_buffers; ++i)
2137 			if (q->bufs[i]->state == VB2_BUF_STATE_ACTIVE)
2138 				vb2_buffer_done(q->bufs[i], VB2_BUF_STATE_ERROR);
2139 		/* Must be zero now */
2140 		WARN_ON(atomic_read(&q->owned_by_drv_count));
2141 	}
2142 
2143 	q->streaming = 0;
2144 	q->start_streaming_called = 0;
2145 	q->queued_count = 0;
2146 	q->error = 0;
2147 
2148 	/*
2149 	 * Remove all buffers from videobuf's list...
2150 	 */
2151 	INIT_LIST_HEAD(&q->queued_list);
2152 	/*
2153 	 * ...and done list; userspace will not receive any buffers it
2154 	 * has not already dequeued before initiating cancel.
2155 	 */
2156 	INIT_LIST_HEAD(&q->done_list);
2157 	atomic_set(&q->owned_by_drv_count, 0);
2158 	wake_up_all(&q->done_wq);
2159 
2160 	/*
2161 	 * Reinitialize all buffers for next use.
2162 	 * Make sure to call buf_finish for any queued buffers. Normally
2163 	 * that's done in dqbuf, but that's not going to happen when we
2164 	 * cancel the whole queue. Note: this code belongs here, not in
2165 	 * __vb2_dqbuf() since in vb2_internal_dqbuf() there is a critical
2166 	 * call to __fill_v4l2_buffer() after buf_finish(). That order can't
2167 	 * be changed, so we can't move the buf_finish() to __vb2_dqbuf().
2168 	 */
2169 	for (i = 0; i < q->num_buffers; ++i) {
2170 		struct vb2_buffer *vb = q->bufs[i];
2171 
2172 		if (vb->state != VB2_BUF_STATE_DEQUEUED) {
2173 			vb->state = VB2_BUF_STATE_PREPARED;
2174 			call_void_vb_qop(vb, buf_finish, vb);
2175 		}
2176 		__vb2_dqbuf(vb);
2177 	}
2178 }
2179 
vb2_internal_streamon(struct vb2_queue * q,enum v4l2_buf_type type)2180 static int vb2_internal_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
2181 {
2182 	int ret;
2183 
2184 	if (type != q->type) {
2185 		dprintk(1, "invalid stream type\n");
2186 		return -EINVAL;
2187 	}
2188 
2189 	if (q->streaming) {
2190 		dprintk(3, "already streaming\n");
2191 		return 0;
2192 	}
2193 
2194 	if (!q->num_buffers) {
2195 		dprintk(1, "no buffers have been allocated\n");
2196 		return -EINVAL;
2197 	}
2198 
2199 	if (q->num_buffers < q->min_buffers_needed) {
2200 		dprintk(1, "need at least %u allocated buffers\n",
2201 				q->min_buffers_needed);
2202 		return -EINVAL;
2203 	}
2204 
2205 	/*
2206 	 * Tell driver to start streaming provided sufficient buffers
2207 	 * are available.
2208 	 */
2209 	if (q->queued_count >= q->min_buffers_needed) {
2210 		ret = vb2_start_streaming(q);
2211 		if (ret) {
2212 			__vb2_queue_cancel(q);
2213 			return ret;
2214 		}
2215 	}
2216 
2217 	q->streaming = 1;
2218 
2219 	dprintk(3, "successful\n");
2220 	return 0;
2221 }
2222 
2223 /**
2224  * vb2_queue_error() - signal a fatal error on the queue
2225  * @q:		videobuf2 queue
2226  *
2227  * Flag that a fatal unrecoverable error has occurred and wake up all processes
2228  * waiting on the queue. Polling will now set POLLERR and queuing and dequeuing
2229  * buffers will return -EIO.
2230  *
2231  * The error flag will be cleared when cancelling the queue, either from
2232  * vb2_streamoff or vb2_queue_release. Drivers should thus not call this
2233  * function before starting the stream, otherwise the error flag will remain set
2234  * until the queue is released when closing the device node.
2235  */
vb2_queue_error(struct vb2_queue * q)2236 void vb2_queue_error(struct vb2_queue *q)
2237 {
2238 	q->error = 1;
2239 
2240 	wake_up_all(&q->done_wq);
2241 }
2242 EXPORT_SYMBOL_GPL(vb2_queue_error);
2243 
2244 /**
2245  * vb2_streamon - start streaming
2246  * @q:		videobuf2 queue
2247  * @type:	type argument passed from userspace to vidioc_streamon handler
2248  *
2249  * Should be called from vidioc_streamon handler of a driver.
2250  * This function:
2251  * 1) verifies current state
2252  * 2) passes any previously queued buffers to the driver and starts streaming
2253  *
2254  * The return values from this function are intended to be directly returned
2255  * from vidioc_streamon handler in the driver.
2256  */
vb2_streamon(struct vb2_queue * q,enum v4l2_buf_type type)2257 int vb2_streamon(struct vb2_queue *q, enum v4l2_buf_type type)
2258 {
2259 	if (vb2_fileio_is_active(q)) {
2260 		dprintk(1, "file io in progress\n");
2261 		return -EBUSY;
2262 	}
2263 	return vb2_internal_streamon(q, type);
2264 }
2265 EXPORT_SYMBOL_GPL(vb2_streamon);
2266 
vb2_internal_streamoff(struct vb2_queue * q,enum v4l2_buf_type type)2267 static int vb2_internal_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2268 {
2269 	if (type != q->type) {
2270 		dprintk(1, "invalid stream type\n");
2271 		return -EINVAL;
2272 	}
2273 
2274 	/*
2275 	 * Cancel will pause streaming and remove all buffers from the driver
2276 	 * and videobuf, effectively returning control over them to userspace.
2277 	 *
2278 	 * Note that we do this even if q->streaming == 0: if you prepare or
2279 	 * queue buffers, and then call streamoff without ever having called
2280 	 * streamon, you would still expect those buffers to be returned to
2281 	 * their normal dequeued state.
2282 	 */
2283 	__vb2_queue_cancel(q);
2284 	q->waiting_for_buffers = !V4L2_TYPE_IS_OUTPUT(q->type);
2285 
2286 	dprintk(3, "successful\n");
2287 	return 0;
2288 }
2289 
2290 /**
2291  * vb2_streamoff - stop streaming
2292  * @q:		videobuf2 queue
2293  * @type:	type argument passed from userspace to vidioc_streamoff handler
2294  *
2295  * Should be called from vidioc_streamoff handler of a driver.
2296  * This function:
2297  * 1) verifies current state,
2298  * 2) stop streaming and dequeues any queued buffers, including those previously
2299  *    passed to the driver (after waiting for the driver to finish).
2300  *
2301  * This call can be used for pausing playback.
2302  * The return values from this function are intended to be directly returned
2303  * from vidioc_streamoff handler in the driver
2304  */
vb2_streamoff(struct vb2_queue * q,enum v4l2_buf_type type)2305 int vb2_streamoff(struct vb2_queue *q, enum v4l2_buf_type type)
2306 {
2307 	if (vb2_fileio_is_active(q)) {
2308 		dprintk(1, "file io in progress\n");
2309 		return -EBUSY;
2310 	}
2311 	return vb2_internal_streamoff(q, type);
2312 }
2313 EXPORT_SYMBOL_GPL(vb2_streamoff);
2314 
2315 /**
2316  * __find_plane_by_offset() - find plane associated with the given offset off
2317  */
__find_plane_by_offset(struct vb2_queue * q,unsigned long off,unsigned int * _buffer,unsigned int * _plane)2318 static int __find_plane_by_offset(struct vb2_queue *q, unsigned long off,
2319 			unsigned int *_buffer, unsigned int *_plane)
2320 {
2321 	struct vb2_buffer *vb;
2322 	unsigned int buffer, plane;
2323 
2324 	/*
2325 	 * Go over all buffers and their planes, comparing the given offset
2326 	 * with an offset assigned to each plane. If a match is found,
2327 	 * return its buffer and plane numbers.
2328 	 */
2329 	for (buffer = 0; buffer < q->num_buffers; ++buffer) {
2330 		vb = q->bufs[buffer];
2331 
2332 		for (plane = 0; plane < vb->num_planes; ++plane) {
2333 			if (vb->v4l2_planes[plane].m.mem_offset == off) {
2334 				*_buffer = buffer;
2335 				*_plane = plane;
2336 				return 0;
2337 			}
2338 		}
2339 	}
2340 
2341 	return -EINVAL;
2342 }
2343 
2344 /**
2345  * vb2_expbuf() - Export a buffer as a file descriptor
2346  * @q:		videobuf2 queue
2347  * @eb:		export buffer structure passed from userspace to vidioc_expbuf
2348  *		handler in driver
2349  *
2350  * The return values from this function are intended to be directly returned
2351  * from vidioc_expbuf handler in driver.
2352  */
vb2_expbuf(struct vb2_queue * q,struct v4l2_exportbuffer * eb)2353 int vb2_expbuf(struct vb2_queue *q, struct v4l2_exportbuffer *eb)
2354 {
2355 	struct vb2_buffer *vb = NULL;
2356 	struct vb2_plane *vb_plane;
2357 	int ret;
2358 	struct dma_buf *dbuf;
2359 
2360 	if (q->memory != V4L2_MEMORY_MMAP) {
2361 		dprintk(1, "queue is not currently set up for mmap\n");
2362 		return -EINVAL;
2363 	}
2364 
2365 	if (!q->mem_ops->get_dmabuf) {
2366 		dprintk(1, "queue does not support DMA buffer exporting\n");
2367 		return -EINVAL;
2368 	}
2369 
2370 	if (eb->flags & ~(O_CLOEXEC | O_ACCMODE)) {
2371 		dprintk(1, "queue does support only O_CLOEXEC and access mode flags\n");
2372 		return -EINVAL;
2373 	}
2374 
2375 	if (eb->type != q->type) {
2376 		dprintk(1, "invalid buffer type\n");
2377 		return -EINVAL;
2378 	}
2379 
2380 	if (eb->index >= q->num_buffers) {
2381 		dprintk(1, "buffer index out of range\n");
2382 		return -EINVAL;
2383 	}
2384 
2385 	vb = q->bufs[eb->index];
2386 
2387 	if (eb->plane >= vb->num_planes) {
2388 		dprintk(1, "buffer plane out of range\n");
2389 		return -EINVAL;
2390 	}
2391 
2392 	if (vb2_fileio_is_active(q)) {
2393 		dprintk(1, "expbuf: file io in progress\n");
2394 		return -EBUSY;
2395 	}
2396 
2397 	vb_plane = &vb->planes[eb->plane];
2398 
2399 	dbuf = call_ptr_memop(vb, get_dmabuf, vb_plane->mem_priv, eb->flags & O_ACCMODE);
2400 	if (IS_ERR_OR_NULL(dbuf)) {
2401 		dprintk(1, "failed to export buffer %d, plane %d\n",
2402 			eb->index, eb->plane);
2403 		return -EINVAL;
2404 	}
2405 
2406 	ret = dma_buf_fd(dbuf, eb->flags & ~O_ACCMODE);
2407 	if (ret < 0) {
2408 		dprintk(3, "buffer %d, plane %d failed to export (%d)\n",
2409 			eb->index, eb->plane, ret);
2410 		dma_buf_put(dbuf);
2411 		return ret;
2412 	}
2413 
2414 	dprintk(3, "buffer %d, plane %d exported as %d descriptor\n",
2415 		eb->index, eb->plane, ret);
2416 	eb->fd = ret;
2417 
2418 	return 0;
2419 }
2420 EXPORT_SYMBOL_GPL(vb2_expbuf);
2421 
2422 /**
2423  * vb2_mmap() - map video buffers into application address space
2424  * @q:		videobuf2 queue
2425  * @vma:	vma passed to the mmap file operation handler in the driver
2426  *
2427  * Should be called from mmap file operation handler of a driver.
2428  * This function maps one plane of one of the available video buffers to
2429  * userspace. To map whole video memory allocated on reqbufs, this function
2430  * has to be called once per each plane per each buffer previously allocated.
2431  *
2432  * When the userspace application calls mmap, it passes to it an offset returned
2433  * to it earlier by the means of vidioc_querybuf handler. That offset acts as
2434  * a "cookie", which is then used to identify the plane to be mapped.
2435  * This function finds a plane with a matching offset and a mapping is performed
2436  * by the means of a provided memory operation.
2437  *
2438  * The return values from this function are intended to be directly returned
2439  * from the mmap handler in driver.
2440  */
vb2_mmap(struct vb2_queue * q,struct vm_area_struct * vma)2441 int vb2_mmap(struct vb2_queue *q, struct vm_area_struct *vma)
2442 {
2443 	unsigned long off = vma->vm_pgoff << PAGE_SHIFT;
2444 	struct vb2_buffer *vb;
2445 	unsigned int buffer = 0, plane = 0;
2446 	int ret;
2447 	unsigned long length;
2448 
2449 	if (q->memory != V4L2_MEMORY_MMAP) {
2450 		dprintk(1, "queue is not currently set up for mmap\n");
2451 		return -EINVAL;
2452 	}
2453 
2454 	/*
2455 	 * Check memory area access mode.
2456 	 */
2457 	if (!(vma->vm_flags & VM_SHARED)) {
2458 		dprintk(1, "invalid vma flags, VM_SHARED needed\n");
2459 		return -EINVAL;
2460 	}
2461 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
2462 		if (!(vma->vm_flags & VM_WRITE)) {
2463 			dprintk(1, "invalid vma flags, VM_WRITE needed\n");
2464 			return -EINVAL;
2465 		}
2466 	} else {
2467 		if (!(vma->vm_flags & VM_READ)) {
2468 			dprintk(1, "invalid vma flags, VM_READ needed\n");
2469 			return -EINVAL;
2470 		}
2471 	}
2472 	if (vb2_fileio_is_active(q)) {
2473 		dprintk(1, "mmap: file io in progress\n");
2474 		return -EBUSY;
2475 	}
2476 
2477 	/*
2478 	 * Find the plane corresponding to the offset passed by userspace.
2479 	 */
2480 	ret = __find_plane_by_offset(q, off, &buffer, &plane);
2481 	if (ret)
2482 		return ret;
2483 
2484 	vb = q->bufs[buffer];
2485 
2486 	/*
2487 	 * MMAP requires page_aligned buffers.
2488 	 * The buffer length was page_aligned at __vb2_buf_mem_alloc(),
2489 	 * so, we need to do the same here.
2490 	 */
2491 	length = PAGE_ALIGN(vb->v4l2_planes[plane].length);
2492 	if (length < (vma->vm_end - vma->vm_start)) {
2493 		dprintk(1,
2494 			"MMAP invalid, as it would overflow buffer length\n");
2495 		return -EINVAL;
2496 	}
2497 
2498 	mutex_lock(&q->mmap_lock);
2499 	ret = call_memop(vb, mmap, vb->planes[plane].mem_priv, vma);
2500 	mutex_unlock(&q->mmap_lock);
2501 	if (ret)
2502 		return ret;
2503 
2504 	dprintk(3, "buffer %d, plane %d successfully mapped\n", buffer, plane);
2505 	return 0;
2506 }
2507 EXPORT_SYMBOL_GPL(vb2_mmap);
2508 
2509 #ifndef CONFIG_MMU
vb2_get_unmapped_area(struct vb2_queue * q,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)2510 unsigned long vb2_get_unmapped_area(struct vb2_queue *q,
2511 				    unsigned long addr,
2512 				    unsigned long len,
2513 				    unsigned long pgoff,
2514 				    unsigned long flags)
2515 {
2516 	unsigned long off = pgoff << PAGE_SHIFT;
2517 	struct vb2_buffer *vb;
2518 	unsigned int buffer, plane;
2519 	void *vaddr;
2520 	int ret;
2521 
2522 	if (q->memory != V4L2_MEMORY_MMAP) {
2523 		dprintk(1, "queue is not currently set up for mmap\n");
2524 		return -EINVAL;
2525 	}
2526 
2527 	/*
2528 	 * Find the plane corresponding to the offset passed by userspace.
2529 	 */
2530 	ret = __find_plane_by_offset(q, off, &buffer, &plane);
2531 	if (ret)
2532 		return ret;
2533 
2534 	vb = q->bufs[buffer];
2535 
2536 	vaddr = vb2_plane_vaddr(vb, plane);
2537 	return vaddr ? (unsigned long)vaddr : -EINVAL;
2538 }
2539 EXPORT_SYMBOL_GPL(vb2_get_unmapped_area);
2540 #endif
2541 
2542 static int __vb2_init_fileio(struct vb2_queue *q, int read);
2543 static int __vb2_cleanup_fileio(struct vb2_queue *q);
2544 
2545 /**
2546  * vb2_poll() - implements poll userspace operation
2547  * @q:		videobuf2 queue
2548  * @file:	file argument passed to the poll file operation handler
2549  * @wait:	wait argument passed to the poll file operation handler
2550  *
2551  * This function implements poll file operation handler for a driver.
2552  * For CAPTURE queues, if a buffer is ready to be dequeued, the userspace will
2553  * be informed that the file descriptor of a video device is available for
2554  * reading.
2555  * For OUTPUT queues, if a buffer is ready to be dequeued, the file descriptor
2556  * will be reported as available for writing.
2557  *
2558  * If the driver uses struct v4l2_fh, then vb2_poll() will also check for any
2559  * pending events.
2560  *
2561  * The return values from this function are intended to be directly returned
2562  * from poll handler in driver.
2563  */
vb2_poll(struct vb2_queue * q,struct file * file,poll_table * wait)2564 unsigned int vb2_poll(struct vb2_queue *q, struct file *file, poll_table *wait)
2565 {
2566 	struct video_device *vfd = video_devdata(file);
2567 	unsigned long req_events = poll_requested_events(wait);
2568 	struct vb2_buffer *vb = NULL;
2569 	unsigned int res = 0;
2570 	unsigned long flags;
2571 
2572 	if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) {
2573 		struct v4l2_fh *fh = file->private_data;
2574 
2575 		if (v4l2_event_pending(fh))
2576 			res = POLLPRI;
2577 		else if (req_events & POLLPRI)
2578 			poll_wait(file, &fh->wait, wait);
2579 	}
2580 
2581 	if (!V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLIN | POLLRDNORM)))
2582 		return res;
2583 	if (V4L2_TYPE_IS_OUTPUT(q->type) && !(req_events & (POLLOUT | POLLWRNORM)))
2584 		return res;
2585 
2586 	/*
2587 	 * Start file I/O emulator only if streaming API has not been used yet.
2588 	 */
2589 	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
2590 		if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
2591 				(req_events & (POLLIN | POLLRDNORM))) {
2592 			if (__vb2_init_fileio(q, 1))
2593 				return res | POLLERR;
2594 		}
2595 		if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
2596 				(req_events & (POLLOUT | POLLWRNORM))) {
2597 			if (__vb2_init_fileio(q, 0))
2598 				return res | POLLERR;
2599 			/*
2600 			 * Write to OUTPUT queue can be done immediately.
2601 			 */
2602 			return res | POLLOUT | POLLWRNORM;
2603 		}
2604 	}
2605 
2606 	/*
2607 	 * There is nothing to wait for if the queue isn't streaming, or if the
2608 	 * error flag is set.
2609 	 */
2610 	if (!vb2_is_streaming(q) || q->error)
2611 		return res | POLLERR;
2612 	/*
2613 	 * For compatibility with vb1: if QBUF hasn't been called yet, then
2614 	 * return POLLERR as well. This only affects capture queues, output
2615 	 * queues will always initialize waiting_for_buffers to false.
2616 	 */
2617 	if (q->waiting_for_buffers)
2618 		return res | POLLERR;
2619 
2620 	/*
2621 	 * For output streams you can call write() as long as there are fewer
2622 	 * buffers queued than there are buffers available.
2623 	 */
2624 	if (V4L2_TYPE_IS_OUTPUT(q->type) && q->fileio && q->queued_count < q->num_buffers)
2625 		return res | POLLOUT | POLLWRNORM;
2626 
2627 	if (list_empty(&q->done_list))
2628 		poll_wait(file, &q->done_wq, wait);
2629 
2630 	/*
2631 	 * Take first buffer available for dequeuing.
2632 	 */
2633 	spin_lock_irqsave(&q->done_lock, flags);
2634 	if (!list_empty(&q->done_list))
2635 		vb = list_first_entry(&q->done_list, struct vb2_buffer,
2636 					done_entry);
2637 	spin_unlock_irqrestore(&q->done_lock, flags);
2638 
2639 	if (vb && (vb->state == VB2_BUF_STATE_DONE
2640 			|| vb->state == VB2_BUF_STATE_ERROR)) {
2641 		return (V4L2_TYPE_IS_OUTPUT(q->type)) ?
2642 				res | POLLOUT | POLLWRNORM :
2643 				res | POLLIN | POLLRDNORM;
2644 	}
2645 	return res;
2646 }
2647 EXPORT_SYMBOL_GPL(vb2_poll);
2648 
2649 /**
2650  * vb2_queue_init() - initialize a videobuf2 queue
2651  * @q:		videobuf2 queue; this structure should be allocated in driver
2652  *
2653  * The vb2_queue structure should be allocated by the driver. The driver is
2654  * responsible of clearing it's content and setting initial values for some
2655  * required entries before calling this function.
2656  * q->ops, q->mem_ops, q->type and q->io_modes are mandatory. Please refer
2657  * to the struct vb2_queue description in include/media/videobuf2-core.h
2658  * for more information.
2659  */
vb2_queue_init(struct vb2_queue * q)2660 int vb2_queue_init(struct vb2_queue *q)
2661 {
2662 	/*
2663 	 * Sanity check
2664 	 */
2665 	if (WARN_ON(!q)			  ||
2666 	    WARN_ON(!q->ops)		  ||
2667 	    WARN_ON(!q->mem_ops)	  ||
2668 	    WARN_ON(!q->type)		  ||
2669 	    WARN_ON(!q->io_modes)	  ||
2670 	    WARN_ON(!q->ops->queue_setup) ||
2671 	    WARN_ON(!q->ops->buf_queue)   ||
2672 	    WARN_ON(q->timestamp_flags &
2673 		    ~(V4L2_BUF_FLAG_TIMESTAMP_MASK |
2674 		      V4L2_BUF_FLAG_TSTAMP_SRC_MASK)))
2675 		return -EINVAL;
2676 
2677 	/* Warn that the driver should choose an appropriate timestamp type */
2678 	WARN_ON((q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2679 		V4L2_BUF_FLAG_TIMESTAMP_UNKNOWN);
2680 
2681 	INIT_LIST_HEAD(&q->queued_list);
2682 	INIT_LIST_HEAD(&q->done_list);
2683 	spin_lock_init(&q->done_lock);
2684 	mutex_init(&q->mmap_lock);
2685 	init_waitqueue_head(&q->done_wq);
2686 
2687 	if (q->buf_struct_size == 0)
2688 		q->buf_struct_size = sizeof(struct vb2_buffer);
2689 
2690 	return 0;
2691 }
2692 EXPORT_SYMBOL_GPL(vb2_queue_init);
2693 
2694 /**
2695  * vb2_queue_release() - stop streaming, release the queue and free memory
2696  * @q:		videobuf2 queue
2697  *
2698  * This function stops streaming and performs necessary clean ups, including
2699  * freeing video buffer memory. The driver is responsible for freeing
2700  * the vb2_queue structure itself.
2701  */
vb2_queue_release(struct vb2_queue * q)2702 void vb2_queue_release(struct vb2_queue *q)
2703 {
2704 	__vb2_cleanup_fileio(q);
2705 	__vb2_queue_cancel(q);
2706 	mutex_lock(&q->mmap_lock);
2707 	__vb2_queue_free(q, q->num_buffers);
2708 	mutex_unlock(&q->mmap_lock);
2709 }
2710 EXPORT_SYMBOL_GPL(vb2_queue_release);
2711 
2712 /**
2713  * struct vb2_fileio_buf - buffer context used by file io emulator
2714  *
2715  * vb2 provides a compatibility layer and emulator of file io (read and
2716  * write) calls on top of streaming API. This structure is used for
2717  * tracking context related to the buffers.
2718  */
2719 struct vb2_fileio_buf {
2720 	void *vaddr;
2721 	unsigned int size;
2722 	unsigned int pos;
2723 	unsigned int queued:1;
2724 };
2725 
2726 /**
2727  * struct vb2_fileio_data - queue context used by file io emulator
2728  *
2729  * @cur_index:	the index of the buffer currently being read from or
2730  *		written to. If equal to q->num_buffers then a new buffer
2731  *		must be dequeued.
2732  * @initial_index: in the read() case all buffers are queued up immediately
2733  *		in __vb2_init_fileio() and __vb2_perform_fileio() just cycles
2734  *		buffers. However, in the write() case no buffers are initially
2735  *		queued, instead whenever a buffer is full it is queued up by
2736  *		__vb2_perform_fileio(). Only once all available buffers have
2737  *		been queued up will __vb2_perform_fileio() start to dequeue
2738  *		buffers. This means that initially __vb2_perform_fileio()
2739  *		needs to know what buffer index to use when it is queuing up
2740  *		the buffers for the first time. That initial index is stored
2741  *		in this field. Once it is equal to q->num_buffers all
2742  *		available buffers have been queued and __vb2_perform_fileio()
2743  *		should start the normal dequeue/queue cycle.
2744  *
2745  * vb2 provides a compatibility layer and emulator of file io (read and
2746  * write) calls on top of streaming API. For proper operation it required
2747  * this structure to save the driver state between each call of the read
2748  * or write function.
2749  */
2750 struct vb2_fileio_data {
2751 	struct v4l2_requestbuffers req;
2752 	struct v4l2_plane p;
2753 	struct v4l2_buffer b;
2754 	struct vb2_fileio_buf bufs[VIDEO_MAX_FRAME];
2755 	unsigned int cur_index;
2756 	unsigned int initial_index;
2757 	unsigned int q_count;
2758 	unsigned int dq_count;
2759 	unsigned int flags;
2760 };
2761 
2762 /**
2763  * __vb2_init_fileio() - initialize file io emulator
2764  * @q:		videobuf2 queue
2765  * @read:	mode selector (1 means read, 0 means write)
2766  */
__vb2_init_fileio(struct vb2_queue * q,int read)2767 static int __vb2_init_fileio(struct vb2_queue *q, int read)
2768 {
2769 	struct vb2_fileio_data *fileio;
2770 	int i, ret;
2771 	unsigned int count = 0;
2772 
2773 	/*
2774 	 * Sanity check
2775 	 */
2776 	if (WARN_ON((read && !(q->io_modes & VB2_READ)) ||
2777 		    (!read && !(q->io_modes & VB2_WRITE))))
2778 		return -EINVAL;
2779 
2780 	/*
2781 	 * Check if device supports mapping buffers to kernel virtual space.
2782 	 */
2783 	if (!q->mem_ops->vaddr)
2784 		return -EBUSY;
2785 
2786 	/*
2787 	 * Check if streaming api has not been already activated.
2788 	 */
2789 	if (q->streaming || q->num_buffers > 0)
2790 		return -EBUSY;
2791 
2792 	/*
2793 	 * Start with count 1, driver can increase it in queue_setup()
2794 	 */
2795 	count = 1;
2796 
2797 	dprintk(3, "setting up file io: mode %s, count %d, flags %08x\n",
2798 		(read) ? "read" : "write", count, q->io_flags);
2799 
2800 	fileio = kzalloc(sizeof(struct vb2_fileio_data), GFP_KERNEL);
2801 	if (fileio == NULL)
2802 		return -ENOMEM;
2803 
2804 	fileio->flags = q->io_flags;
2805 
2806 	/*
2807 	 * Request buffers and use MMAP type to force driver
2808 	 * to allocate buffers by itself.
2809 	 */
2810 	fileio->req.count = count;
2811 	fileio->req.memory = V4L2_MEMORY_MMAP;
2812 	fileio->req.type = q->type;
2813 	q->fileio = fileio;
2814 	ret = __reqbufs(q, &fileio->req);
2815 	if (ret)
2816 		goto err_kfree;
2817 
2818 	/*
2819 	 * Check if plane_count is correct
2820 	 * (multiplane buffers are not supported).
2821 	 */
2822 	if (q->bufs[0]->num_planes != 1) {
2823 		ret = -EBUSY;
2824 		goto err_reqbufs;
2825 	}
2826 
2827 	/*
2828 	 * Get kernel address of each buffer.
2829 	 */
2830 	for (i = 0; i < q->num_buffers; i++) {
2831 		fileio->bufs[i].vaddr = vb2_plane_vaddr(q->bufs[i], 0);
2832 		if (fileio->bufs[i].vaddr == NULL) {
2833 			ret = -EINVAL;
2834 			goto err_reqbufs;
2835 		}
2836 		fileio->bufs[i].size = vb2_plane_size(q->bufs[i], 0);
2837 	}
2838 
2839 	/*
2840 	 * Read mode requires pre queuing of all buffers.
2841 	 */
2842 	if (read) {
2843 		bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
2844 
2845 		/*
2846 		 * Queue all buffers.
2847 		 */
2848 		for (i = 0; i < q->num_buffers; i++) {
2849 			struct v4l2_buffer *b = &fileio->b;
2850 
2851 			memset(b, 0, sizeof(*b));
2852 			b->type = q->type;
2853 			if (is_multiplanar) {
2854 				memset(&fileio->p, 0, sizeof(fileio->p));
2855 				b->m.planes = &fileio->p;
2856 				b->length = 1;
2857 			}
2858 			b->memory = q->memory;
2859 			b->index = i;
2860 			ret = vb2_internal_qbuf(q, b);
2861 			if (ret)
2862 				goto err_reqbufs;
2863 			fileio->bufs[i].queued = 1;
2864 		}
2865 		/*
2866 		 * All buffers have been queued, so mark that by setting
2867 		 * initial_index to q->num_buffers
2868 		 */
2869 		fileio->initial_index = q->num_buffers;
2870 		fileio->cur_index = q->num_buffers;
2871 	}
2872 
2873 	/*
2874 	 * Start streaming.
2875 	 */
2876 	ret = vb2_internal_streamon(q, q->type);
2877 	if (ret)
2878 		goto err_reqbufs;
2879 
2880 	return ret;
2881 
2882 err_reqbufs:
2883 	fileio->req.count = 0;
2884 	__reqbufs(q, &fileio->req);
2885 
2886 err_kfree:
2887 	q->fileio = NULL;
2888 	kfree(fileio);
2889 	return ret;
2890 }
2891 
2892 /**
2893  * __vb2_cleanup_fileio() - free resourced used by file io emulator
2894  * @q:		videobuf2 queue
2895  */
__vb2_cleanup_fileio(struct vb2_queue * q)2896 static int __vb2_cleanup_fileio(struct vb2_queue *q)
2897 {
2898 	struct vb2_fileio_data *fileio = q->fileio;
2899 
2900 	if (fileio) {
2901 		vb2_internal_streamoff(q, q->type);
2902 		q->fileio = NULL;
2903 		fileio->req.count = 0;
2904 		vb2_reqbufs(q, &fileio->req);
2905 		kfree(fileio);
2906 		dprintk(3, "file io emulator closed\n");
2907 	}
2908 	return 0;
2909 }
2910 
2911 /**
2912  * __vb2_perform_fileio() - perform a single file io (read or write) operation
2913  * @q:		videobuf2 queue
2914  * @data:	pointed to target userspace buffer
2915  * @count:	number of bytes to read or write
2916  * @ppos:	file handle position tracking pointer
2917  * @nonblock:	mode selector (1 means blocking calls, 0 means nonblocking)
2918  * @read:	access mode selector (1 means read, 0 means write)
2919  */
__vb2_perform_fileio(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblock,int read)2920 static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
2921 		loff_t *ppos, int nonblock, int read)
2922 {
2923 	struct vb2_fileio_data *fileio;
2924 	struct vb2_fileio_buf *buf;
2925 	bool is_multiplanar = V4L2_TYPE_IS_MULTIPLANAR(q->type);
2926 	/*
2927 	 * When using write() to write data to an output video node the vb2 core
2928 	 * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
2929 	 * else is able to provide this information with the write() operation.
2930 	 */
2931 	bool set_timestamp = !read &&
2932 		(q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
2933 		V4L2_BUF_FLAG_TIMESTAMP_COPY;
2934 	int ret, index;
2935 
2936 	dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
2937 		read ? "read" : "write", (long)*ppos, count,
2938 		nonblock ? "non" : "");
2939 
2940 	if (!data)
2941 		return -EINVAL;
2942 
2943 	/*
2944 	 * Initialize emulator on first call.
2945 	 */
2946 	if (!vb2_fileio_is_active(q)) {
2947 		ret = __vb2_init_fileio(q, read);
2948 		dprintk(3, "vb2_init_fileio result: %d\n", ret);
2949 		if (ret)
2950 			return ret;
2951 	}
2952 	fileio = q->fileio;
2953 
2954 	/*
2955 	 * Check if we need to dequeue the buffer.
2956 	 */
2957 	index = fileio->cur_index;
2958 	if (index >= q->num_buffers) {
2959 		/*
2960 		 * Call vb2_dqbuf to get buffer back.
2961 		 */
2962 		memset(&fileio->b, 0, sizeof(fileio->b));
2963 		fileio->b.type = q->type;
2964 		fileio->b.memory = q->memory;
2965 		if (is_multiplanar) {
2966 			memset(&fileio->p, 0, sizeof(fileio->p));
2967 			fileio->b.m.planes = &fileio->p;
2968 			fileio->b.length = 1;
2969 		}
2970 		ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
2971 		dprintk(5, "vb2_dqbuf result: %d\n", ret);
2972 		if (ret)
2973 			return ret;
2974 		fileio->dq_count += 1;
2975 
2976 		fileio->cur_index = index = fileio->b.index;
2977 		buf = &fileio->bufs[index];
2978 
2979 		/*
2980 		 * Get number of bytes filled by the driver
2981 		 */
2982 		buf->pos = 0;
2983 		buf->queued = 0;
2984 		buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
2985 				 : vb2_plane_size(q->bufs[index], 0);
2986 		/* Compensate for data_offset on read in the multiplanar case. */
2987 		if (is_multiplanar && read &&
2988 		    fileio->b.m.planes[0].data_offset < buf->size) {
2989 			buf->pos = fileio->b.m.planes[0].data_offset;
2990 			buf->size -= buf->pos;
2991 		}
2992 	} else {
2993 		buf = &fileio->bufs[index];
2994 	}
2995 
2996 	/*
2997 	 * Limit count on last few bytes of the buffer.
2998 	 */
2999 	if (buf->pos + count > buf->size) {
3000 		count = buf->size - buf->pos;
3001 		dprintk(5, "reducing read count: %zd\n", count);
3002 	}
3003 
3004 	/*
3005 	 * Transfer data to userspace.
3006 	 */
3007 	dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
3008 		count, index, buf->pos);
3009 	if (read)
3010 		ret = copy_to_user(data, buf->vaddr + buf->pos, count);
3011 	else
3012 		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
3013 	if (ret) {
3014 		dprintk(3, "error copying data\n");
3015 		return -EFAULT;
3016 	}
3017 
3018 	/*
3019 	 * Update counters.
3020 	 */
3021 	buf->pos += count;
3022 	*ppos += count;
3023 
3024 	/*
3025 	 * Queue next buffer if required.
3026 	 */
3027 	if (buf->pos == buf->size ||
3028 	   (!read && (fileio->flags & VB2_FILEIO_WRITE_IMMEDIATELY))) {
3029 		/*
3030 		 * Check if this is the last buffer to read.
3031 		 */
3032 		if (read && (fileio->flags & VB2_FILEIO_READ_ONCE) &&
3033 		    fileio->dq_count == 1) {
3034 			dprintk(3, "read limit reached\n");
3035 			return __vb2_cleanup_fileio(q);
3036 		}
3037 
3038 		/*
3039 		 * Call vb2_qbuf and give buffer to the driver.
3040 		 */
3041 		memset(&fileio->b, 0, sizeof(fileio->b));
3042 		fileio->b.type = q->type;
3043 		fileio->b.memory = q->memory;
3044 		fileio->b.index = index;
3045 		fileio->b.bytesused = buf->pos;
3046 		if (is_multiplanar) {
3047 			memset(&fileio->p, 0, sizeof(fileio->p));
3048 			fileio->p.bytesused = buf->pos;
3049 			fileio->b.m.planes = &fileio->p;
3050 			fileio->b.length = 1;
3051 		}
3052 		if (set_timestamp)
3053 			v4l2_get_timestamp(&fileio->b.timestamp);
3054 		ret = vb2_internal_qbuf(q, &fileio->b);
3055 		dprintk(5, "vb2_dbuf result: %d\n", ret);
3056 		if (ret)
3057 			return ret;
3058 
3059 		/*
3060 		 * Buffer has been queued, update the status
3061 		 */
3062 		buf->pos = 0;
3063 		buf->queued = 1;
3064 		buf->size = vb2_plane_size(q->bufs[index], 0);
3065 		fileio->q_count += 1;
3066 		/*
3067 		 * If we are queuing up buffers for the first time, then
3068 		 * increase initial_index by one.
3069 		 */
3070 		if (fileio->initial_index < q->num_buffers)
3071 			fileio->initial_index++;
3072 		/*
3073 		 * The next buffer to use is either a buffer that's going to be
3074 		 * queued for the first time (initial_index < q->num_buffers)
3075 		 * or it is equal to q->num_buffers, meaning that the next
3076 		 * time we need to dequeue a buffer since we've now queued up
3077 		 * all the 'first time' buffers.
3078 		 */
3079 		fileio->cur_index = fileio->initial_index;
3080 	}
3081 
3082 	/*
3083 	 * Return proper number of bytes processed.
3084 	 */
3085 	if (ret == 0)
3086 		ret = count;
3087 	return ret;
3088 }
3089 
vb2_read(struct vb2_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblocking)3090 size_t vb2_read(struct vb2_queue *q, char __user *data, size_t count,
3091 		loff_t *ppos, int nonblocking)
3092 {
3093 	return __vb2_perform_fileio(q, data, count, ppos, nonblocking, 1);
3094 }
3095 EXPORT_SYMBOL_GPL(vb2_read);
3096 
vb2_write(struct vb2_queue * q,const char __user * data,size_t count,loff_t * ppos,int nonblocking)3097 size_t vb2_write(struct vb2_queue *q, const char __user *data, size_t count,
3098 		loff_t *ppos, int nonblocking)
3099 {
3100 	return __vb2_perform_fileio(q, (char __user *) data, count,
3101 							ppos, nonblocking, 0);
3102 }
3103 EXPORT_SYMBOL_GPL(vb2_write);
3104 
3105 struct vb2_threadio_data {
3106 	struct task_struct *thread;
3107 	vb2_thread_fnc fnc;
3108 	void *priv;
3109 	bool stop;
3110 };
3111 
vb2_thread(void * data)3112 static int vb2_thread(void *data)
3113 {
3114 	struct vb2_queue *q = data;
3115 	struct vb2_threadio_data *threadio = q->threadio;
3116 	struct vb2_fileio_data *fileio = q->fileio;
3117 	bool set_timestamp = false;
3118 	int prequeue = 0;
3119 	int index = 0;
3120 	int ret = 0;
3121 
3122 	if (V4L2_TYPE_IS_OUTPUT(q->type)) {
3123 		prequeue = q->num_buffers;
3124 		set_timestamp =
3125 			(q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
3126 			V4L2_BUF_FLAG_TIMESTAMP_COPY;
3127 	}
3128 
3129 	set_freezable();
3130 
3131 	for (;;) {
3132 		struct vb2_buffer *vb;
3133 
3134 		/*
3135 		 * Call vb2_dqbuf to get buffer back.
3136 		 */
3137 		memset(&fileio->b, 0, sizeof(fileio->b));
3138 		fileio->b.type = q->type;
3139 		fileio->b.memory = q->memory;
3140 		if (prequeue) {
3141 			fileio->b.index = index++;
3142 			prequeue--;
3143 		} else {
3144 			call_void_qop(q, wait_finish, q);
3145 			if (!threadio->stop)
3146 				ret = vb2_internal_dqbuf(q, &fileio->b, 0);
3147 			call_void_qop(q, wait_prepare, q);
3148 			dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
3149 		}
3150 		if (ret || threadio->stop)
3151 			break;
3152 		try_to_freeze();
3153 
3154 		vb = q->bufs[fileio->b.index];
3155 		if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
3156 			if (threadio->fnc(vb, threadio->priv))
3157 				break;
3158 		call_void_qop(q, wait_finish, q);
3159 		if (set_timestamp)
3160 			v4l2_get_timestamp(&fileio->b.timestamp);
3161 		if (!threadio->stop)
3162 			ret = vb2_internal_qbuf(q, &fileio->b);
3163 		call_void_qop(q, wait_prepare, q);
3164 		if (ret || threadio->stop)
3165 			break;
3166 	}
3167 
3168 	/* Hmm, linux becomes *very* unhappy without this ... */
3169 	while (!kthread_should_stop()) {
3170 		set_current_state(TASK_INTERRUPTIBLE);
3171 		schedule();
3172 	}
3173 	return 0;
3174 }
3175 
3176 /*
3177  * This function should not be used for anything else but the videobuf2-dvb
3178  * support. If you think you have another good use-case for this, then please
3179  * contact the linux-media mailinglist first.
3180  */
vb2_thread_start(struct vb2_queue * q,vb2_thread_fnc fnc,void * priv,const char * thread_name)3181 int vb2_thread_start(struct vb2_queue *q, vb2_thread_fnc fnc, void *priv,
3182 		     const char *thread_name)
3183 {
3184 	struct vb2_threadio_data *threadio;
3185 	int ret = 0;
3186 
3187 	if (q->threadio)
3188 		return -EBUSY;
3189 	if (vb2_is_busy(q))
3190 		return -EBUSY;
3191 	if (WARN_ON(q->fileio))
3192 		return -EBUSY;
3193 
3194 	threadio = kzalloc(sizeof(*threadio), GFP_KERNEL);
3195 	if (threadio == NULL)
3196 		return -ENOMEM;
3197 	threadio->fnc = fnc;
3198 	threadio->priv = priv;
3199 
3200 	ret = __vb2_init_fileio(q, !V4L2_TYPE_IS_OUTPUT(q->type));
3201 	dprintk(3, "file io: vb2_init_fileio result: %d\n", ret);
3202 	if (ret)
3203 		goto nomem;
3204 	q->threadio = threadio;
3205 	threadio->thread = kthread_run(vb2_thread, q, "vb2-%s", thread_name);
3206 	if (IS_ERR(threadio->thread)) {
3207 		ret = PTR_ERR(threadio->thread);
3208 		threadio->thread = NULL;
3209 		goto nothread;
3210 	}
3211 	return 0;
3212 
3213 nothread:
3214 	__vb2_cleanup_fileio(q);
3215 nomem:
3216 	kfree(threadio);
3217 	return ret;
3218 }
3219 EXPORT_SYMBOL_GPL(vb2_thread_start);
3220 
vb2_thread_stop(struct vb2_queue * q)3221 int vb2_thread_stop(struct vb2_queue *q)
3222 {
3223 	struct vb2_threadio_data *threadio = q->threadio;
3224 	int err;
3225 
3226 	if (threadio == NULL)
3227 		return 0;
3228 	threadio->stop = true;
3229 	/* Wake up all pending sleeps in the thread */
3230 	vb2_queue_error(q);
3231 	err = kthread_stop(threadio->thread);
3232 	__vb2_cleanup_fileio(q);
3233 	threadio->thread = NULL;
3234 	kfree(threadio);
3235 	q->threadio = NULL;
3236 	return err;
3237 }
3238 EXPORT_SYMBOL_GPL(vb2_thread_stop);
3239 
3240 /*
3241  * The following functions are not part of the vb2 core API, but are helper
3242  * functions that plug into struct v4l2_ioctl_ops, struct v4l2_file_operations
3243  * and struct vb2_ops.
3244  * They contain boilerplate code that most if not all drivers have to do
3245  * and so they simplify the driver code.
3246  */
3247 
3248 /* The queue is busy if there is a owner and you are not that owner. */
vb2_queue_is_busy(struct video_device * vdev,struct file * file)3249 static inline bool vb2_queue_is_busy(struct video_device *vdev, struct file *file)
3250 {
3251 	return vdev->queue->owner && vdev->queue->owner != file->private_data;
3252 }
3253 
3254 /* vb2 ioctl helpers */
3255 
vb2_ioctl_reqbufs(struct file * file,void * priv,struct v4l2_requestbuffers * p)3256 int vb2_ioctl_reqbufs(struct file *file, void *priv,
3257 			  struct v4l2_requestbuffers *p)
3258 {
3259 	struct video_device *vdev = video_devdata(file);
3260 	int res = __verify_memory_type(vdev->queue, p->memory, p->type);
3261 
3262 	if (res)
3263 		return res;
3264 	if (vb2_queue_is_busy(vdev, file))
3265 		return -EBUSY;
3266 	res = __reqbufs(vdev->queue, p);
3267 	/* If count == 0, then the owner has released all buffers and he
3268 	   is no longer owner of the queue. Otherwise we have a new owner. */
3269 	if (res == 0)
3270 		vdev->queue->owner = p->count ? file->private_data : NULL;
3271 	return res;
3272 }
3273 EXPORT_SYMBOL_GPL(vb2_ioctl_reqbufs);
3274 
vb2_ioctl_create_bufs(struct file * file,void * priv,struct v4l2_create_buffers * p)3275 int vb2_ioctl_create_bufs(struct file *file, void *priv,
3276 			  struct v4l2_create_buffers *p)
3277 {
3278 	struct video_device *vdev = video_devdata(file);
3279 	int res = __verify_memory_type(vdev->queue, p->memory, p->format.type);
3280 
3281 	p->index = vdev->queue->num_buffers;
3282 	/* If count == 0, then just check if memory and type are valid.
3283 	   Any -EBUSY result from __verify_memory_type can be mapped to 0. */
3284 	if (p->count == 0)
3285 		return res != -EBUSY ? res : 0;
3286 	if (res)
3287 		return res;
3288 	if (vb2_queue_is_busy(vdev, file))
3289 		return -EBUSY;
3290 	res = __create_bufs(vdev->queue, p);
3291 	if (res == 0)
3292 		vdev->queue->owner = file->private_data;
3293 	return res;
3294 }
3295 EXPORT_SYMBOL_GPL(vb2_ioctl_create_bufs);
3296 
vb2_ioctl_prepare_buf(struct file * file,void * priv,struct v4l2_buffer * p)3297 int vb2_ioctl_prepare_buf(struct file *file, void *priv,
3298 			  struct v4l2_buffer *p)
3299 {
3300 	struct video_device *vdev = video_devdata(file);
3301 
3302 	if (vb2_queue_is_busy(vdev, file))
3303 		return -EBUSY;
3304 	return vb2_prepare_buf(vdev->queue, p);
3305 }
3306 EXPORT_SYMBOL_GPL(vb2_ioctl_prepare_buf);
3307 
vb2_ioctl_querybuf(struct file * file,void * priv,struct v4l2_buffer * p)3308 int vb2_ioctl_querybuf(struct file *file, void *priv, struct v4l2_buffer *p)
3309 {
3310 	struct video_device *vdev = video_devdata(file);
3311 
3312 	/* No need to call vb2_queue_is_busy(), anyone can query buffers. */
3313 	return vb2_querybuf(vdev->queue, p);
3314 }
3315 EXPORT_SYMBOL_GPL(vb2_ioctl_querybuf);
3316 
vb2_ioctl_qbuf(struct file * file,void * priv,struct v4l2_buffer * p)3317 int vb2_ioctl_qbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3318 {
3319 	struct video_device *vdev = video_devdata(file);
3320 
3321 	if (vb2_queue_is_busy(vdev, file))
3322 		return -EBUSY;
3323 	return vb2_qbuf(vdev->queue, p);
3324 }
3325 EXPORT_SYMBOL_GPL(vb2_ioctl_qbuf);
3326 
vb2_ioctl_dqbuf(struct file * file,void * priv,struct v4l2_buffer * p)3327 int vb2_ioctl_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p)
3328 {
3329 	struct video_device *vdev = video_devdata(file);
3330 
3331 	if (vb2_queue_is_busy(vdev, file))
3332 		return -EBUSY;
3333 	return vb2_dqbuf(vdev->queue, p, file->f_flags & O_NONBLOCK);
3334 }
3335 EXPORT_SYMBOL_GPL(vb2_ioctl_dqbuf);
3336 
vb2_ioctl_streamon(struct file * file,void * priv,enum v4l2_buf_type i)3337 int vb2_ioctl_streamon(struct file *file, void *priv, enum v4l2_buf_type i)
3338 {
3339 	struct video_device *vdev = video_devdata(file);
3340 
3341 	if (vb2_queue_is_busy(vdev, file))
3342 		return -EBUSY;
3343 	return vb2_streamon(vdev->queue, i);
3344 }
3345 EXPORT_SYMBOL_GPL(vb2_ioctl_streamon);
3346 
vb2_ioctl_streamoff(struct file * file,void * priv,enum v4l2_buf_type i)3347 int vb2_ioctl_streamoff(struct file *file, void *priv, enum v4l2_buf_type i)
3348 {
3349 	struct video_device *vdev = video_devdata(file);
3350 
3351 	if (vb2_queue_is_busy(vdev, file))
3352 		return -EBUSY;
3353 	return vb2_streamoff(vdev->queue, i);
3354 }
3355 EXPORT_SYMBOL_GPL(vb2_ioctl_streamoff);
3356 
vb2_ioctl_expbuf(struct file * file,void * priv,struct v4l2_exportbuffer * p)3357 int vb2_ioctl_expbuf(struct file *file, void *priv, struct v4l2_exportbuffer *p)
3358 {
3359 	struct video_device *vdev = video_devdata(file);
3360 
3361 	if (vb2_queue_is_busy(vdev, file))
3362 		return -EBUSY;
3363 	return vb2_expbuf(vdev->queue, p);
3364 }
3365 EXPORT_SYMBOL_GPL(vb2_ioctl_expbuf);
3366 
3367 /* v4l2_file_operations helpers */
3368 
vb2_fop_mmap(struct file * file,struct vm_area_struct * vma)3369 int vb2_fop_mmap(struct file *file, struct vm_area_struct *vma)
3370 {
3371 	struct video_device *vdev = video_devdata(file);
3372 
3373 	return vb2_mmap(vdev->queue, vma);
3374 }
3375 EXPORT_SYMBOL_GPL(vb2_fop_mmap);
3376 
_vb2_fop_release(struct file * file,struct mutex * lock)3377 int _vb2_fop_release(struct file *file, struct mutex *lock)
3378 {
3379 	struct video_device *vdev = video_devdata(file);
3380 
3381 	if (file->private_data == vdev->queue->owner) {
3382 		if (lock)
3383 			mutex_lock(lock);
3384 		vb2_queue_release(vdev->queue);
3385 		vdev->queue->owner = NULL;
3386 		if (lock)
3387 			mutex_unlock(lock);
3388 	}
3389 	return v4l2_fh_release(file);
3390 }
3391 EXPORT_SYMBOL_GPL(_vb2_fop_release);
3392 
vb2_fop_release(struct file * file)3393 int vb2_fop_release(struct file *file)
3394 {
3395 	struct video_device *vdev = video_devdata(file);
3396 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3397 
3398 	return _vb2_fop_release(file, lock);
3399 }
3400 EXPORT_SYMBOL_GPL(vb2_fop_release);
3401 
vb2_fop_write(struct file * file,const char __user * buf,size_t count,loff_t * ppos)3402 ssize_t vb2_fop_write(struct file *file, const char __user *buf,
3403 		size_t count, loff_t *ppos)
3404 {
3405 	struct video_device *vdev = video_devdata(file);
3406 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3407 	int err = -EBUSY;
3408 
3409 	if (lock && mutex_lock_interruptible(lock))
3410 		return -ERESTARTSYS;
3411 	if (vb2_queue_is_busy(vdev, file))
3412 		goto exit;
3413 	err = vb2_write(vdev->queue, buf, count, ppos,
3414 		       file->f_flags & O_NONBLOCK);
3415 	if (vdev->queue->fileio)
3416 		vdev->queue->owner = file->private_data;
3417 exit:
3418 	if (lock)
3419 		mutex_unlock(lock);
3420 	return err;
3421 }
3422 EXPORT_SYMBOL_GPL(vb2_fop_write);
3423 
vb2_fop_read(struct file * file,char __user * buf,size_t count,loff_t * ppos)3424 ssize_t vb2_fop_read(struct file *file, char __user *buf,
3425 		size_t count, loff_t *ppos)
3426 {
3427 	struct video_device *vdev = video_devdata(file);
3428 	struct mutex *lock = vdev->queue->lock ? vdev->queue->lock : vdev->lock;
3429 	int err = -EBUSY;
3430 
3431 	if (lock && mutex_lock_interruptible(lock))
3432 		return -ERESTARTSYS;
3433 	if (vb2_queue_is_busy(vdev, file))
3434 		goto exit;
3435 	err = vb2_read(vdev->queue, buf, count, ppos,
3436 		       file->f_flags & O_NONBLOCK);
3437 	if (vdev->queue->fileio)
3438 		vdev->queue->owner = file->private_data;
3439 exit:
3440 	if (lock)
3441 		mutex_unlock(lock);
3442 	return err;
3443 }
3444 EXPORT_SYMBOL_GPL(vb2_fop_read);
3445 
vb2_fop_poll(struct file * file,poll_table * wait)3446 unsigned int vb2_fop_poll(struct file *file, poll_table *wait)
3447 {
3448 	struct video_device *vdev = video_devdata(file);
3449 	struct vb2_queue *q = vdev->queue;
3450 	struct mutex *lock = q->lock ? q->lock : vdev->lock;
3451 	unsigned long req_events = poll_requested_events(wait);
3452 	unsigned res;
3453 	void *fileio;
3454 	bool must_lock = false;
3455 
3456 	/* Try to be smart: only lock if polling might start fileio,
3457 	   otherwise locking will only introduce unwanted delays. */
3458 	if (q->num_buffers == 0 && !vb2_fileio_is_active(q)) {
3459 		if (!V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_READ) &&
3460 				(req_events & (POLLIN | POLLRDNORM)))
3461 			must_lock = true;
3462 		else if (V4L2_TYPE_IS_OUTPUT(q->type) && (q->io_modes & VB2_WRITE) &&
3463 				(req_events & (POLLOUT | POLLWRNORM)))
3464 			must_lock = true;
3465 	}
3466 
3467 	/* If locking is needed, but this helper doesn't know how, then you
3468 	   shouldn't be using this helper but you should write your own. */
3469 	WARN_ON(must_lock && !lock);
3470 
3471 	if (must_lock && lock && mutex_lock_interruptible(lock))
3472 		return POLLERR;
3473 
3474 	fileio = q->fileio;
3475 
3476 	res = vb2_poll(vdev->queue, file, wait);
3477 
3478 	/* If fileio was started, then we have a new queue owner. */
3479 	if (must_lock && !fileio && q->fileio)
3480 		q->owner = file->private_data;
3481 	if (must_lock && lock)
3482 		mutex_unlock(lock);
3483 	return res;
3484 }
3485 EXPORT_SYMBOL_GPL(vb2_fop_poll);
3486 
3487 #ifndef CONFIG_MMU
vb2_fop_get_unmapped_area(struct file * file,unsigned long addr,unsigned long len,unsigned long pgoff,unsigned long flags)3488 unsigned long vb2_fop_get_unmapped_area(struct file *file, unsigned long addr,
3489 		unsigned long len, unsigned long pgoff, unsigned long flags)
3490 {
3491 	struct video_device *vdev = video_devdata(file);
3492 
3493 	return vb2_get_unmapped_area(vdev->queue, addr, len, pgoff, flags);
3494 }
3495 EXPORT_SYMBOL_GPL(vb2_fop_get_unmapped_area);
3496 #endif
3497 
3498 /* vb2_ops helpers. Only use if vq->lock is non-NULL. */
3499 
vb2_ops_wait_prepare(struct vb2_queue * vq)3500 void vb2_ops_wait_prepare(struct vb2_queue *vq)
3501 {
3502 	mutex_unlock(vq->lock);
3503 }
3504 EXPORT_SYMBOL_GPL(vb2_ops_wait_prepare);
3505 
vb2_ops_wait_finish(struct vb2_queue * vq)3506 void vb2_ops_wait_finish(struct vb2_queue *vq)
3507 {
3508 	mutex_lock(vq->lock);
3509 }
3510 EXPORT_SYMBOL_GPL(vb2_ops_wait_finish);
3511 
3512 MODULE_DESCRIPTION("Driver helper framework for Video for Linux 2");
3513 MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>, Marek Szyprowski");
3514 MODULE_LICENSE("GPL");
3515