• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * generic helper functions for handling video4linux capture buffers
4  *
5  * (c) 2007 Mauro Carvalho Chehab, <mchehab@kernel.org>
6  *
7  * Highly based on video-buf written originally by:
8  * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org>
9  * (c) 2006 Mauro Carvalho Chehab, <mchehab@kernel.org>
10  * (c) 2006 Ted Walther and John Sokol
11  */
12 
13 #include <linux/init.h>
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16 #include <linux/mm.h>
17 #include <linux/sched.h>
18 #include <linux/slab.h>
19 #include <linux/interrupt.h>
20 
21 #include <media/videobuf-core.h>
22 #include <media/v4l2-common.h>
23 
24 #define MAGIC_BUFFER 0x20070728
25 #define MAGIC_CHECK(is, should)						\
26 	do {								\
27 		if (unlikely((is) != (should))) {			\
28 			printk(KERN_ERR					\
29 				"magic mismatch: %x (expected %x)\n",	\
30 					is, should);			\
31 			BUG();						\
32 		}							\
33 	} while (0)
34 
35 static int debug;
36 module_param(debug, int, 0644);
37 
38 MODULE_DESCRIPTION("helper module to manage video4linux buffers");
39 MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@kernel.org>");
40 MODULE_LICENSE("GPL");
41 
42 #define dprintk(level, fmt, arg...)					\
43 	do {								\
44 		if (debug >= level)					\
45 			printk(KERN_DEBUG "vbuf: " fmt, ## arg);	\
46 	} while (0)
47 
48 /* --------------------------------------------------------------------- */
49 
50 #define CALL(q, f, arg...)						\
51 	((q->int_ops->f) ? q->int_ops->f(arg) : 0)
52 #define CALLPTR(q, f, arg...)						\
53 	((q->int_ops->f) ? q->int_ops->f(arg) : NULL)
54 
videobuf_alloc_vb(struct videobuf_queue * q)55 struct videobuf_buffer *videobuf_alloc_vb(struct videobuf_queue *q)
56 {
57 	struct videobuf_buffer *vb;
58 
59 	BUG_ON(q->msize < sizeof(*vb));
60 
61 	if (!q->int_ops || !q->int_ops->alloc_vb) {
62 		printk(KERN_ERR "No specific ops defined!\n");
63 		BUG();
64 	}
65 
66 	vb = q->int_ops->alloc_vb(q->msize);
67 	if (NULL != vb) {
68 		init_waitqueue_head(&vb->done);
69 		vb->magic = MAGIC_BUFFER;
70 	}
71 
72 	return vb;
73 }
74 EXPORT_SYMBOL_GPL(videobuf_alloc_vb);
75 
state_neither_active_nor_queued(struct videobuf_queue * q,struct videobuf_buffer * vb)76 static int state_neither_active_nor_queued(struct videobuf_queue *q,
77 					   struct videobuf_buffer *vb)
78 {
79 	unsigned long flags;
80 	bool rc;
81 
82 	spin_lock_irqsave(q->irqlock, flags);
83 	rc = vb->state != VIDEOBUF_ACTIVE && vb->state != VIDEOBUF_QUEUED;
84 	spin_unlock_irqrestore(q->irqlock, flags);
85 	return rc;
86 };
87 
videobuf_waiton(struct videobuf_queue * q,struct videobuf_buffer * vb,int non_blocking,int intr)88 int videobuf_waiton(struct videobuf_queue *q, struct videobuf_buffer *vb,
89 		int non_blocking, int intr)
90 {
91 	bool is_ext_locked;
92 	int ret = 0;
93 
94 	MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
95 
96 	if (non_blocking) {
97 		if (state_neither_active_nor_queued(q, vb))
98 			return 0;
99 		return -EAGAIN;
100 	}
101 
102 	is_ext_locked = q->ext_lock && mutex_is_locked(q->ext_lock);
103 
104 	/* Release vdev lock to prevent this wait from blocking outside access to
105 	   the device. */
106 	if (is_ext_locked)
107 		mutex_unlock(q->ext_lock);
108 	if (intr)
109 		ret = wait_event_interruptible(vb->done,
110 					state_neither_active_nor_queued(q, vb));
111 	else
112 		wait_event(vb->done, state_neither_active_nor_queued(q, vb));
113 	/* Relock */
114 	if (is_ext_locked)
115 		mutex_lock(q->ext_lock);
116 
117 	return ret;
118 }
119 EXPORT_SYMBOL_GPL(videobuf_waiton);
120 
videobuf_iolock(struct videobuf_queue * q,struct videobuf_buffer * vb,struct v4l2_framebuffer * fbuf)121 int videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb,
122 		    struct v4l2_framebuffer *fbuf)
123 {
124 	MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
125 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
126 
127 	return CALL(q, iolock, q, vb, fbuf);
128 }
129 EXPORT_SYMBOL_GPL(videobuf_iolock);
130 
videobuf_queue_to_vaddr(struct videobuf_queue * q,struct videobuf_buffer * buf)131 void *videobuf_queue_to_vaddr(struct videobuf_queue *q,
132 			      struct videobuf_buffer *buf)
133 {
134 	if (q->int_ops->vaddr)
135 		return q->int_ops->vaddr(buf);
136 	return NULL;
137 }
138 EXPORT_SYMBOL_GPL(videobuf_queue_to_vaddr);
139 
140 /* --------------------------------------------------------------------- */
141 
142 
videobuf_queue_core_init(struct videobuf_queue * q,const struct videobuf_queue_ops * ops,struct device * dev,spinlock_t * irqlock,enum v4l2_buf_type type,enum v4l2_field field,unsigned int msize,void * priv,struct videobuf_qtype_ops * int_ops,struct mutex * ext_lock)143 void videobuf_queue_core_init(struct videobuf_queue *q,
144 			 const struct videobuf_queue_ops *ops,
145 			 struct device *dev,
146 			 spinlock_t *irqlock,
147 			 enum v4l2_buf_type type,
148 			 enum v4l2_field field,
149 			 unsigned int msize,
150 			 void *priv,
151 			 struct videobuf_qtype_ops *int_ops,
152 			 struct mutex *ext_lock)
153 {
154 	BUG_ON(!q);
155 	memset(q, 0, sizeof(*q));
156 	q->irqlock   = irqlock;
157 	q->ext_lock  = ext_lock;
158 	q->dev       = dev;
159 	q->type      = type;
160 	q->field     = field;
161 	q->msize     = msize;
162 	q->ops       = ops;
163 	q->priv_data = priv;
164 	q->int_ops   = int_ops;
165 
166 	/* All buffer operations are mandatory */
167 	BUG_ON(!q->ops->buf_setup);
168 	BUG_ON(!q->ops->buf_prepare);
169 	BUG_ON(!q->ops->buf_queue);
170 	BUG_ON(!q->ops->buf_release);
171 
172 	/* Lock is mandatory for queue_cancel to work */
173 	BUG_ON(!irqlock);
174 
175 	/* Having implementations for abstract methods are mandatory */
176 	BUG_ON(!q->int_ops);
177 
178 	mutex_init(&q->vb_lock);
179 	init_waitqueue_head(&q->wait);
180 	INIT_LIST_HEAD(&q->stream);
181 }
182 EXPORT_SYMBOL_GPL(videobuf_queue_core_init);
183 
184 /* Locking: Only usage in bttv unsafe find way to remove */
videobuf_queue_is_busy(struct videobuf_queue * q)185 int videobuf_queue_is_busy(struct videobuf_queue *q)
186 {
187 	int i;
188 
189 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
190 
191 	if (q->streaming) {
192 		dprintk(1, "busy: streaming active\n");
193 		return 1;
194 	}
195 	if (q->reading) {
196 		dprintk(1, "busy: pending read #1\n");
197 		return 1;
198 	}
199 	if (q->read_buf) {
200 		dprintk(1, "busy: pending read #2\n");
201 		return 1;
202 	}
203 	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
204 		if (NULL == q->bufs[i])
205 			continue;
206 		if (q->bufs[i]->map) {
207 			dprintk(1, "busy: buffer #%d mapped\n", i);
208 			return 1;
209 		}
210 		if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
211 			dprintk(1, "busy: buffer #%d queued\n", i);
212 			return 1;
213 		}
214 		if (q->bufs[i]->state == VIDEOBUF_ACTIVE) {
215 			dprintk(1, "busy: buffer #%d active\n", i);
216 			return 1;
217 		}
218 	}
219 	return 0;
220 }
221 EXPORT_SYMBOL_GPL(videobuf_queue_is_busy);
222 
223 /*
224  * __videobuf_free() - free all the buffers and their control structures
225  *
226  * This function can only be called if streaming/reading is off, i.e. no buffers
227  * are under control of the driver.
228  */
229 /* Locking: Caller holds q->vb_lock */
__videobuf_free(struct videobuf_queue * q)230 static int __videobuf_free(struct videobuf_queue *q)
231 {
232 	int i;
233 
234 	dprintk(1, "%s\n", __func__);
235 	if (!q)
236 		return 0;
237 
238 	if (q->streaming || q->reading) {
239 		dprintk(1, "Cannot free buffers when streaming or reading\n");
240 		return -EBUSY;
241 	}
242 
243 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
244 
245 	for (i = 0; i < VIDEO_MAX_FRAME; i++)
246 		if (q->bufs[i] && q->bufs[i]->map) {
247 			dprintk(1, "Cannot free mmapped buffers\n");
248 			return -EBUSY;
249 		}
250 
251 	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
252 		if (NULL == q->bufs[i])
253 			continue;
254 		q->ops->buf_release(q, q->bufs[i]);
255 		kfree(q->bufs[i]);
256 		q->bufs[i] = NULL;
257 	}
258 
259 	return 0;
260 }
261 
262 /* Locking: Caller holds q->vb_lock */
videobuf_queue_cancel(struct videobuf_queue * q)263 void videobuf_queue_cancel(struct videobuf_queue *q)
264 {
265 	unsigned long flags = 0;
266 	int i;
267 
268 	q->streaming = 0;
269 	q->reading  = 0;
270 	wake_up_interruptible_sync(&q->wait);
271 
272 	/* remove queued buffers from list */
273 	spin_lock_irqsave(q->irqlock, flags);
274 	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
275 		if (NULL == q->bufs[i])
276 			continue;
277 		if (q->bufs[i]->state == VIDEOBUF_QUEUED) {
278 			list_del(&q->bufs[i]->queue);
279 			q->bufs[i]->state = VIDEOBUF_ERROR;
280 			wake_up_all(&q->bufs[i]->done);
281 		}
282 	}
283 	spin_unlock_irqrestore(q->irqlock, flags);
284 
285 	/* free all buffers + clear queue */
286 	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
287 		if (NULL == q->bufs[i])
288 			continue;
289 		q->ops->buf_release(q, q->bufs[i]);
290 	}
291 	INIT_LIST_HEAD(&q->stream);
292 }
293 EXPORT_SYMBOL_GPL(videobuf_queue_cancel);
294 
295 /* --------------------------------------------------------------------- */
296 
297 /* Locking: Caller holds q->vb_lock */
videobuf_next_field(struct videobuf_queue * q)298 enum v4l2_field videobuf_next_field(struct videobuf_queue *q)
299 {
300 	enum v4l2_field field = q->field;
301 
302 	BUG_ON(V4L2_FIELD_ANY == field);
303 
304 	if (V4L2_FIELD_ALTERNATE == field) {
305 		if (V4L2_FIELD_TOP == q->last) {
306 			field   = V4L2_FIELD_BOTTOM;
307 			q->last = V4L2_FIELD_BOTTOM;
308 		} else {
309 			field   = V4L2_FIELD_TOP;
310 			q->last = V4L2_FIELD_TOP;
311 		}
312 	}
313 	return field;
314 }
315 EXPORT_SYMBOL_GPL(videobuf_next_field);
316 
317 /* Locking: Caller holds q->vb_lock */
videobuf_status(struct videobuf_queue * q,struct v4l2_buffer * b,struct videobuf_buffer * vb,enum v4l2_buf_type type)318 static void videobuf_status(struct videobuf_queue *q, struct v4l2_buffer *b,
319 			    struct videobuf_buffer *vb, enum v4l2_buf_type type)
320 {
321 	MAGIC_CHECK(vb->magic, MAGIC_BUFFER);
322 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
323 
324 	b->index    = vb->i;
325 	b->type     = type;
326 
327 	b->memory   = vb->memory;
328 	switch (b->memory) {
329 	case V4L2_MEMORY_MMAP:
330 		b->m.offset  = vb->boff;
331 		b->length    = vb->bsize;
332 		break;
333 	case V4L2_MEMORY_USERPTR:
334 		b->m.userptr = vb->baddr;
335 		b->length    = vb->bsize;
336 		break;
337 	case V4L2_MEMORY_OVERLAY:
338 		b->m.offset  = vb->boff;
339 		break;
340 	case V4L2_MEMORY_DMABUF:
341 		/* DMABUF is not handled in videobuf framework */
342 		break;
343 	}
344 
345 	b->flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC;
346 	if (vb->map)
347 		b->flags |= V4L2_BUF_FLAG_MAPPED;
348 
349 	switch (vb->state) {
350 	case VIDEOBUF_PREPARED:
351 	case VIDEOBUF_QUEUED:
352 	case VIDEOBUF_ACTIVE:
353 		b->flags |= V4L2_BUF_FLAG_QUEUED;
354 		break;
355 	case VIDEOBUF_ERROR:
356 		b->flags |= V4L2_BUF_FLAG_ERROR;
357 		fallthrough;
358 	case VIDEOBUF_DONE:
359 		b->flags |= V4L2_BUF_FLAG_DONE;
360 		break;
361 	case VIDEOBUF_NEEDS_INIT:
362 	case VIDEOBUF_IDLE:
363 		/* nothing */
364 		break;
365 	}
366 
367 	b->field     = vb->field;
368 	v4l2_buffer_set_timestamp(b, vb->ts);
369 	b->bytesused = vb->size;
370 	b->sequence  = vb->field_count >> 1;
371 }
372 
videobuf_mmap_free(struct videobuf_queue * q)373 int videobuf_mmap_free(struct videobuf_queue *q)
374 {
375 	int ret;
376 	videobuf_queue_lock(q);
377 	ret = __videobuf_free(q);
378 	videobuf_queue_unlock(q);
379 	return ret;
380 }
381 EXPORT_SYMBOL_GPL(videobuf_mmap_free);
382 
383 /* Locking: Caller holds q->vb_lock */
__videobuf_mmap_setup(struct videobuf_queue * q,unsigned int bcount,unsigned int bsize,enum v4l2_memory memory)384 int __videobuf_mmap_setup(struct videobuf_queue *q,
385 			unsigned int bcount, unsigned int bsize,
386 			enum v4l2_memory memory)
387 {
388 	unsigned int i;
389 	int err;
390 
391 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
392 
393 	err = __videobuf_free(q);
394 	if (0 != err)
395 		return err;
396 
397 	/* Allocate and initialize buffers */
398 	for (i = 0; i < bcount; i++) {
399 		q->bufs[i] = videobuf_alloc_vb(q);
400 
401 		if (NULL == q->bufs[i])
402 			break;
403 
404 		q->bufs[i]->i      = i;
405 		q->bufs[i]->memory = memory;
406 		q->bufs[i]->bsize  = bsize;
407 		switch (memory) {
408 		case V4L2_MEMORY_MMAP:
409 			q->bufs[i]->boff = PAGE_ALIGN(bsize) * i;
410 			break;
411 		case V4L2_MEMORY_USERPTR:
412 		case V4L2_MEMORY_OVERLAY:
413 		case V4L2_MEMORY_DMABUF:
414 			/* nothing */
415 			break;
416 		}
417 	}
418 
419 	if (!i)
420 		return -ENOMEM;
421 
422 	dprintk(1, "mmap setup: %d buffers, %d bytes each\n", i, bsize);
423 
424 	return i;
425 }
426 EXPORT_SYMBOL_GPL(__videobuf_mmap_setup);
427 
videobuf_mmap_setup(struct videobuf_queue * q,unsigned int bcount,unsigned int bsize,enum v4l2_memory memory)428 int videobuf_mmap_setup(struct videobuf_queue *q,
429 			unsigned int bcount, unsigned int bsize,
430 			enum v4l2_memory memory)
431 {
432 	int ret;
433 	videobuf_queue_lock(q);
434 	ret = __videobuf_mmap_setup(q, bcount, bsize, memory);
435 	videobuf_queue_unlock(q);
436 	return ret;
437 }
438 EXPORT_SYMBOL_GPL(videobuf_mmap_setup);
439 
videobuf_reqbufs(struct videobuf_queue * q,struct v4l2_requestbuffers * req)440 int videobuf_reqbufs(struct videobuf_queue *q,
441 		 struct v4l2_requestbuffers *req)
442 {
443 	unsigned int size, count;
444 	int retval;
445 
446 	if (req->memory != V4L2_MEMORY_MMAP     &&
447 	    req->memory != V4L2_MEMORY_USERPTR  &&
448 	    req->memory != V4L2_MEMORY_OVERLAY) {
449 		dprintk(1, "reqbufs: memory type invalid\n");
450 		return -EINVAL;
451 	}
452 
453 	videobuf_queue_lock(q);
454 	if (req->type != q->type) {
455 		dprintk(1, "reqbufs: queue type invalid\n");
456 		retval = -EINVAL;
457 		goto done;
458 	}
459 
460 	if (q->streaming) {
461 		dprintk(1, "reqbufs: streaming already exists\n");
462 		retval = -EBUSY;
463 		goto done;
464 	}
465 	if (!list_empty(&q->stream)) {
466 		dprintk(1, "reqbufs: stream running\n");
467 		retval = -EBUSY;
468 		goto done;
469 	}
470 
471 	if (req->count == 0) {
472 		dprintk(1, "reqbufs: count invalid (%d)\n", req->count);
473 		retval = __videobuf_free(q);
474 		goto done;
475 	}
476 
477 	count = req->count;
478 	if (count > VIDEO_MAX_FRAME)
479 		count = VIDEO_MAX_FRAME;
480 	size = 0;
481 	q->ops->buf_setup(q, &count, &size);
482 	dprintk(1, "reqbufs: bufs=%d, size=0x%x [%u pages total]\n",
483 		count, size,
484 		(unsigned int)((count * PAGE_ALIGN(size)) >> PAGE_SHIFT));
485 
486 	retval = __videobuf_mmap_setup(q, count, size, req->memory);
487 	if (retval < 0) {
488 		dprintk(1, "reqbufs: mmap setup returned %d\n", retval);
489 		goto done;
490 	}
491 
492 	req->count = retval;
493 	retval = 0;
494 
495  done:
496 	videobuf_queue_unlock(q);
497 	return retval;
498 }
499 EXPORT_SYMBOL_GPL(videobuf_reqbufs);
500 
videobuf_querybuf(struct videobuf_queue * q,struct v4l2_buffer * b)501 int videobuf_querybuf(struct videobuf_queue *q, struct v4l2_buffer *b)
502 {
503 	int ret = -EINVAL;
504 
505 	videobuf_queue_lock(q);
506 	if (unlikely(b->type != q->type)) {
507 		dprintk(1, "querybuf: Wrong type.\n");
508 		goto done;
509 	}
510 	if (unlikely(b->index >= VIDEO_MAX_FRAME)) {
511 		dprintk(1, "querybuf: index out of range.\n");
512 		goto done;
513 	}
514 	if (unlikely(NULL == q->bufs[b->index])) {
515 		dprintk(1, "querybuf: buffer is null.\n");
516 		goto done;
517 	}
518 
519 	videobuf_status(q, b, q->bufs[b->index], q->type);
520 
521 	ret = 0;
522 done:
523 	videobuf_queue_unlock(q);
524 	return ret;
525 }
526 EXPORT_SYMBOL_GPL(videobuf_querybuf);
527 
videobuf_qbuf(struct videobuf_queue * q,struct v4l2_buffer * b)528 int videobuf_qbuf(struct videobuf_queue *q, struct v4l2_buffer *b)
529 {
530 	struct videobuf_buffer *buf;
531 	enum v4l2_field field;
532 	unsigned long flags = 0;
533 	int retval;
534 
535 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
536 
537 	if (b->memory == V4L2_MEMORY_MMAP)
538 		mmap_read_lock(current->mm);
539 
540 	videobuf_queue_lock(q);
541 	retval = -EBUSY;
542 	if (q->reading) {
543 		dprintk(1, "qbuf: Reading running...\n");
544 		goto done;
545 	}
546 	retval = -EINVAL;
547 	if (b->type != q->type) {
548 		dprintk(1, "qbuf: Wrong type.\n");
549 		goto done;
550 	}
551 	if (b->index >= VIDEO_MAX_FRAME) {
552 		dprintk(1, "qbuf: index out of range.\n");
553 		goto done;
554 	}
555 	buf = q->bufs[b->index];
556 	if (NULL == buf) {
557 		dprintk(1, "qbuf: buffer is null.\n");
558 		goto done;
559 	}
560 	MAGIC_CHECK(buf->magic, MAGIC_BUFFER);
561 	if (buf->memory != b->memory) {
562 		dprintk(1, "qbuf: memory type is wrong.\n");
563 		goto done;
564 	}
565 	if (buf->state != VIDEOBUF_NEEDS_INIT && buf->state != VIDEOBUF_IDLE) {
566 		dprintk(1, "qbuf: buffer is already queued or active.\n");
567 		goto done;
568 	}
569 
570 	switch (b->memory) {
571 	case V4L2_MEMORY_MMAP:
572 		if (0 == buf->baddr) {
573 			dprintk(1, "qbuf: mmap requested but buffer addr is zero!\n");
574 			goto done;
575 		}
576 		if (q->type == V4L2_BUF_TYPE_VIDEO_OUTPUT
577 		    || q->type == V4L2_BUF_TYPE_VBI_OUTPUT
578 		    || q->type == V4L2_BUF_TYPE_SLICED_VBI_OUTPUT
579 		    || q->type == V4L2_BUF_TYPE_SDR_OUTPUT) {
580 			buf->size = b->bytesused;
581 			buf->field = b->field;
582 			buf->ts = v4l2_buffer_get_timestamp(b);
583 		}
584 		break;
585 	case V4L2_MEMORY_USERPTR:
586 		if (b->length < buf->bsize) {
587 			dprintk(1, "qbuf: buffer length is not enough\n");
588 			goto done;
589 		}
590 		if (VIDEOBUF_NEEDS_INIT != buf->state &&
591 		    buf->baddr != b->m.userptr)
592 			q->ops->buf_release(q, buf);
593 		buf->baddr = b->m.userptr;
594 		break;
595 	case V4L2_MEMORY_OVERLAY:
596 		buf->boff = b->m.offset;
597 		break;
598 	default:
599 		dprintk(1, "qbuf: wrong memory type\n");
600 		goto done;
601 	}
602 
603 	dprintk(1, "qbuf: requesting next field\n");
604 	field = videobuf_next_field(q);
605 	retval = q->ops->buf_prepare(q, buf, field);
606 	if (0 != retval) {
607 		dprintk(1, "qbuf: buffer_prepare returned %d\n", retval);
608 		goto done;
609 	}
610 
611 	list_add_tail(&buf->stream, &q->stream);
612 	if (q->streaming) {
613 		spin_lock_irqsave(q->irqlock, flags);
614 		q->ops->buf_queue(q, buf);
615 		spin_unlock_irqrestore(q->irqlock, flags);
616 	}
617 	dprintk(1, "qbuf: succeeded\n");
618 	retval = 0;
619 	wake_up_interruptible_sync(&q->wait);
620 
621 done:
622 	videobuf_queue_unlock(q);
623 
624 	if (b->memory == V4L2_MEMORY_MMAP)
625 		mmap_read_unlock(current->mm);
626 
627 	return retval;
628 }
629 EXPORT_SYMBOL_GPL(videobuf_qbuf);
630 
631 /* Locking: Caller holds q->vb_lock */
stream_next_buffer_check_queue(struct videobuf_queue * q,int noblock)632 static int stream_next_buffer_check_queue(struct videobuf_queue *q, int noblock)
633 {
634 	int retval;
635 
636 checks:
637 	if (!q->streaming) {
638 		dprintk(1, "next_buffer: Not streaming\n");
639 		retval = -EINVAL;
640 		goto done;
641 	}
642 
643 	if (list_empty(&q->stream)) {
644 		if (noblock) {
645 			retval = -EAGAIN;
646 			dprintk(2, "next_buffer: no buffers to dequeue\n");
647 			goto done;
648 		} else {
649 			dprintk(2, "next_buffer: waiting on buffer\n");
650 
651 			/* Drop lock to avoid deadlock with qbuf */
652 			videobuf_queue_unlock(q);
653 
654 			/* Checking list_empty and streaming is safe without
655 			 * locks because we goto checks to validate while
656 			 * holding locks before proceeding */
657 			retval = wait_event_interruptible(q->wait,
658 				!list_empty(&q->stream) || !q->streaming);
659 			videobuf_queue_lock(q);
660 
661 			if (retval)
662 				goto done;
663 
664 			goto checks;
665 		}
666 	}
667 
668 	retval = 0;
669 
670 done:
671 	return retval;
672 }
673 
674 /* Locking: Caller holds q->vb_lock */
stream_next_buffer(struct videobuf_queue * q,struct videobuf_buffer ** vb,int nonblocking)675 static int stream_next_buffer(struct videobuf_queue *q,
676 			struct videobuf_buffer **vb, int nonblocking)
677 {
678 	int retval;
679 	struct videobuf_buffer *buf = NULL;
680 
681 	retval = stream_next_buffer_check_queue(q, nonblocking);
682 	if (retval)
683 		goto done;
684 
685 	buf = list_entry(q->stream.next, struct videobuf_buffer, stream);
686 	retval = videobuf_waiton(q, buf, nonblocking, 1);
687 	if (retval < 0)
688 		goto done;
689 
690 	*vb = buf;
691 done:
692 	return retval;
693 }
694 
videobuf_dqbuf(struct videobuf_queue * q,struct v4l2_buffer * b,int nonblocking)695 int videobuf_dqbuf(struct videobuf_queue *q,
696 		   struct v4l2_buffer *b, int nonblocking)
697 {
698 	struct videobuf_buffer *buf = NULL;
699 	int retval;
700 
701 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
702 
703 	memset(b, 0, sizeof(*b));
704 	videobuf_queue_lock(q);
705 
706 	retval = stream_next_buffer(q, &buf, nonblocking);
707 	if (retval < 0) {
708 		dprintk(1, "dqbuf: next_buffer error: %i\n", retval);
709 		goto done;
710 	}
711 
712 	switch (buf->state) {
713 	case VIDEOBUF_ERROR:
714 		dprintk(1, "dqbuf: state is error\n");
715 		break;
716 	case VIDEOBUF_DONE:
717 		dprintk(1, "dqbuf: state is done\n");
718 		break;
719 	default:
720 		dprintk(1, "dqbuf: state invalid\n");
721 		retval = -EINVAL;
722 		goto done;
723 	}
724 	CALL(q, sync, q, buf);
725 	videobuf_status(q, b, buf, q->type);
726 	list_del(&buf->stream);
727 	buf->state = VIDEOBUF_IDLE;
728 	b->flags &= ~V4L2_BUF_FLAG_DONE;
729 done:
730 	videobuf_queue_unlock(q);
731 	return retval;
732 }
733 EXPORT_SYMBOL_GPL(videobuf_dqbuf);
734 
videobuf_streamon(struct videobuf_queue * q)735 int videobuf_streamon(struct videobuf_queue *q)
736 {
737 	struct videobuf_buffer *buf;
738 	unsigned long flags = 0;
739 	int retval;
740 
741 	videobuf_queue_lock(q);
742 	retval = -EBUSY;
743 	if (q->reading)
744 		goto done;
745 	retval = 0;
746 	if (q->streaming)
747 		goto done;
748 	q->streaming = 1;
749 	spin_lock_irqsave(q->irqlock, flags);
750 	list_for_each_entry(buf, &q->stream, stream)
751 		if (buf->state == VIDEOBUF_PREPARED)
752 			q->ops->buf_queue(q, buf);
753 	spin_unlock_irqrestore(q->irqlock, flags);
754 
755 	wake_up_interruptible_sync(&q->wait);
756 done:
757 	videobuf_queue_unlock(q);
758 	return retval;
759 }
760 EXPORT_SYMBOL_GPL(videobuf_streamon);
761 
762 /* Locking: Caller holds q->vb_lock */
__videobuf_streamoff(struct videobuf_queue * q)763 static int __videobuf_streamoff(struct videobuf_queue *q)
764 {
765 	if (!q->streaming)
766 		return -EINVAL;
767 
768 	videobuf_queue_cancel(q);
769 
770 	return 0;
771 }
772 
videobuf_streamoff(struct videobuf_queue * q)773 int videobuf_streamoff(struct videobuf_queue *q)
774 {
775 	int retval;
776 
777 	videobuf_queue_lock(q);
778 	retval = __videobuf_streamoff(q);
779 	videobuf_queue_unlock(q);
780 
781 	return retval;
782 }
783 EXPORT_SYMBOL_GPL(videobuf_streamoff);
784 
785 /* Locking: Caller holds q->vb_lock */
videobuf_read_zerocopy(struct videobuf_queue * q,char __user * data,size_t count,loff_t * ppos)786 static ssize_t videobuf_read_zerocopy(struct videobuf_queue *q,
787 				      char __user *data,
788 				      size_t count, loff_t *ppos)
789 {
790 	enum v4l2_field field;
791 	unsigned long flags = 0;
792 	int retval;
793 
794 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
795 
796 	/* setup stuff */
797 	q->read_buf = videobuf_alloc_vb(q);
798 	if (NULL == q->read_buf)
799 		return -ENOMEM;
800 
801 	q->read_buf->memory = V4L2_MEMORY_USERPTR;
802 	q->read_buf->baddr  = (unsigned long)data;
803 	q->read_buf->bsize  = count;
804 
805 	field = videobuf_next_field(q);
806 	retval = q->ops->buf_prepare(q, q->read_buf, field);
807 	if (0 != retval)
808 		goto done;
809 
810 	/* start capture & wait */
811 	spin_lock_irqsave(q->irqlock, flags);
812 	q->ops->buf_queue(q, q->read_buf);
813 	spin_unlock_irqrestore(q->irqlock, flags);
814 	retval = videobuf_waiton(q, q->read_buf, 0, 0);
815 	if (0 == retval) {
816 		CALL(q, sync, q, q->read_buf);
817 		if (VIDEOBUF_ERROR == q->read_buf->state)
818 			retval = -EIO;
819 		else
820 			retval = q->read_buf->size;
821 	}
822 
823 done:
824 	/* cleanup */
825 	q->ops->buf_release(q, q->read_buf);
826 	kfree(q->read_buf);
827 	q->read_buf = NULL;
828 	return retval;
829 }
830 
__videobuf_copy_to_user(struct videobuf_queue * q,struct videobuf_buffer * buf,char __user * data,size_t count,int nonblocking)831 static int __videobuf_copy_to_user(struct videobuf_queue *q,
832 				   struct videobuf_buffer *buf,
833 				   char __user *data, size_t count,
834 				   int nonblocking)
835 {
836 	void *vaddr = CALLPTR(q, vaddr, buf);
837 
838 	/* copy to userspace */
839 	if (count > buf->size - q->read_off)
840 		count = buf->size - q->read_off;
841 
842 	if (copy_to_user(data, vaddr + q->read_off, count))
843 		return -EFAULT;
844 
845 	return count;
846 }
847 
__videobuf_copy_stream(struct videobuf_queue * q,struct videobuf_buffer * buf,char __user * data,size_t count,size_t pos,int vbihack,int nonblocking)848 static int __videobuf_copy_stream(struct videobuf_queue *q,
849 				  struct videobuf_buffer *buf,
850 				  char __user *data, size_t count, size_t pos,
851 				  int vbihack, int nonblocking)
852 {
853 	unsigned int *fc = CALLPTR(q, vaddr, buf);
854 
855 	if (vbihack) {
856 		/* dirty, undocumented hack -- pass the frame counter
857 			* within the last four bytes of each vbi data block.
858 			* We need that one to maintain backward compatibility
859 			* to all vbi decoding software out there ... */
860 		fc += (buf->size >> 2) - 1;
861 		*fc = buf->field_count >> 1;
862 		dprintk(1, "vbihack: %d\n", *fc);
863 	}
864 
865 	/* copy stuff using the common method */
866 	count = __videobuf_copy_to_user(q, buf, data, count, nonblocking);
867 
868 	if ((count == -EFAULT) && (pos == 0))
869 		return -EFAULT;
870 
871 	return count;
872 }
873 
videobuf_read_one(struct videobuf_queue * q,char __user * data,size_t count,loff_t * ppos,int nonblocking)874 ssize_t videobuf_read_one(struct videobuf_queue *q,
875 			  char __user *data, size_t count, loff_t *ppos,
876 			  int nonblocking)
877 {
878 	enum v4l2_field field;
879 	unsigned long flags = 0;
880 	unsigned size = 0, nbufs = 1;
881 	int retval;
882 
883 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
884 
885 	videobuf_queue_lock(q);
886 
887 	q->ops->buf_setup(q, &nbufs, &size);
888 
889 	if (NULL == q->read_buf  &&
890 	    count >= size        &&
891 	    !nonblocking) {
892 		retval = videobuf_read_zerocopy(q, data, count, ppos);
893 		if (retval >= 0  ||  retval == -EIO)
894 			/* ok, all done */
895 			goto done;
896 		/* fallback to kernel bounce buffer on failures */
897 	}
898 
899 	if (NULL == q->read_buf) {
900 		/* need to capture a new frame */
901 		retval = -ENOMEM;
902 		q->read_buf = videobuf_alloc_vb(q);
903 
904 		dprintk(1, "video alloc=0x%p\n", q->read_buf);
905 		if (NULL == q->read_buf)
906 			goto done;
907 		q->read_buf->memory = V4L2_MEMORY_USERPTR;
908 		q->read_buf->bsize = count; /* preferred size */
909 		field = videobuf_next_field(q);
910 		retval = q->ops->buf_prepare(q, q->read_buf, field);
911 
912 		if (0 != retval) {
913 			kfree(q->read_buf);
914 			q->read_buf = NULL;
915 			goto done;
916 		}
917 
918 		spin_lock_irqsave(q->irqlock, flags);
919 		q->ops->buf_queue(q, q->read_buf);
920 		spin_unlock_irqrestore(q->irqlock, flags);
921 
922 		q->read_off = 0;
923 	}
924 
925 	/* wait until capture is done */
926 	retval = videobuf_waiton(q, q->read_buf, nonblocking, 1);
927 	if (0 != retval)
928 		goto done;
929 
930 	CALL(q, sync, q, q->read_buf);
931 
932 	if (VIDEOBUF_ERROR == q->read_buf->state) {
933 		/* catch I/O errors */
934 		q->ops->buf_release(q, q->read_buf);
935 		kfree(q->read_buf);
936 		q->read_buf = NULL;
937 		retval = -EIO;
938 		goto done;
939 	}
940 
941 	/* Copy to userspace */
942 	retval = __videobuf_copy_to_user(q, q->read_buf, data, count, nonblocking);
943 	if (retval < 0)
944 		goto done;
945 
946 	q->read_off += retval;
947 	if (q->read_off == q->read_buf->size) {
948 		/* all data copied, cleanup */
949 		q->ops->buf_release(q, q->read_buf);
950 		kfree(q->read_buf);
951 		q->read_buf = NULL;
952 	}
953 
954 done:
955 	videobuf_queue_unlock(q);
956 	return retval;
957 }
958 EXPORT_SYMBOL_GPL(videobuf_read_one);
959 
960 /* Locking: Caller holds q->vb_lock */
__videobuf_read_start(struct videobuf_queue * q)961 static int __videobuf_read_start(struct videobuf_queue *q)
962 {
963 	enum v4l2_field field;
964 	unsigned long flags = 0;
965 	unsigned int count = 0, size = 0;
966 	int err, i;
967 
968 	q->ops->buf_setup(q, &count, &size);
969 	if (count < 2)
970 		count = 2;
971 	if (count > VIDEO_MAX_FRAME)
972 		count = VIDEO_MAX_FRAME;
973 	size = PAGE_ALIGN(size);
974 
975 	err = __videobuf_mmap_setup(q, count, size, V4L2_MEMORY_USERPTR);
976 	if (err < 0)
977 		return err;
978 
979 	count = err;
980 
981 	for (i = 0; i < count; i++) {
982 		field = videobuf_next_field(q);
983 		err = q->ops->buf_prepare(q, q->bufs[i], field);
984 		if (err)
985 			return err;
986 		list_add_tail(&q->bufs[i]->stream, &q->stream);
987 	}
988 	spin_lock_irqsave(q->irqlock, flags);
989 	for (i = 0; i < count; i++)
990 		q->ops->buf_queue(q, q->bufs[i]);
991 	spin_unlock_irqrestore(q->irqlock, flags);
992 	q->reading = 1;
993 	return 0;
994 }
995 
__videobuf_read_stop(struct videobuf_queue * q)996 static void __videobuf_read_stop(struct videobuf_queue *q)
997 {
998 	int i;
999 
1000 	videobuf_queue_cancel(q);
1001 	__videobuf_free(q);
1002 	INIT_LIST_HEAD(&q->stream);
1003 	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1004 		if (NULL == q->bufs[i])
1005 			continue;
1006 		kfree(q->bufs[i]);
1007 		q->bufs[i] = NULL;
1008 	}
1009 	q->read_buf = NULL;
1010 }
1011 
videobuf_read_start(struct videobuf_queue * q)1012 int videobuf_read_start(struct videobuf_queue *q)
1013 {
1014 	int rc;
1015 
1016 	videobuf_queue_lock(q);
1017 	rc = __videobuf_read_start(q);
1018 	videobuf_queue_unlock(q);
1019 
1020 	return rc;
1021 }
1022 EXPORT_SYMBOL_GPL(videobuf_read_start);
1023 
videobuf_read_stop(struct videobuf_queue * q)1024 void videobuf_read_stop(struct videobuf_queue *q)
1025 {
1026 	videobuf_queue_lock(q);
1027 	__videobuf_read_stop(q);
1028 	videobuf_queue_unlock(q);
1029 }
1030 EXPORT_SYMBOL_GPL(videobuf_read_stop);
1031 
videobuf_stop(struct videobuf_queue * q)1032 void videobuf_stop(struct videobuf_queue *q)
1033 {
1034 	videobuf_queue_lock(q);
1035 
1036 	if (q->streaming)
1037 		__videobuf_streamoff(q);
1038 
1039 	if (q->reading)
1040 		__videobuf_read_stop(q);
1041 
1042 	videobuf_queue_unlock(q);
1043 }
1044 EXPORT_SYMBOL_GPL(videobuf_stop);
1045 
videobuf_read_stream(struct videobuf_queue * q,char __user * data,size_t count,loff_t * ppos,int vbihack,int nonblocking)1046 ssize_t videobuf_read_stream(struct videobuf_queue *q,
1047 			     char __user *data, size_t count, loff_t *ppos,
1048 			     int vbihack, int nonblocking)
1049 {
1050 	int rc, retval;
1051 	unsigned long flags = 0;
1052 
1053 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1054 
1055 	dprintk(2, "%s\n", __func__);
1056 	videobuf_queue_lock(q);
1057 	retval = -EBUSY;
1058 	if (q->streaming)
1059 		goto done;
1060 	if (!q->reading) {
1061 		retval = __videobuf_read_start(q);
1062 		if (retval < 0)
1063 			goto done;
1064 	}
1065 
1066 	retval = 0;
1067 	while (count > 0) {
1068 		/* get / wait for data */
1069 		if (NULL == q->read_buf) {
1070 			q->read_buf = list_entry(q->stream.next,
1071 						 struct videobuf_buffer,
1072 						 stream);
1073 			list_del(&q->read_buf->stream);
1074 			q->read_off = 0;
1075 		}
1076 		rc = videobuf_waiton(q, q->read_buf, nonblocking, 1);
1077 		if (rc < 0) {
1078 			if (0 == retval)
1079 				retval = rc;
1080 			break;
1081 		}
1082 
1083 		if (q->read_buf->state == VIDEOBUF_DONE) {
1084 			rc = __videobuf_copy_stream(q, q->read_buf, data + retval, count,
1085 					retval, vbihack, nonblocking);
1086 			if (rc < 0) {
1087 				retval = rc;
1088 				break;
1089 			}
1090 			retval      += rc;
1091 			count       -= rc;
1092 			q->read_off += rc;
1093 		} else {
1094 			/* some error */
1095 			q->read_off = q->read_buf->size;
1096 			if (0 == retval)
1097 				retval = -EIO;
1098 		}
1099 
1100 		/* requeue buffer when done with copying */
1101 		if (q->read_off == q->read_buf->size) {
1102 			list_add_tail(&q->read_buf->stream,
1103 				      &q->stream);
1104 			spin_lock_irqsave(q->irqlock, flags);
1105 			q->ops->buf_queue(q, q->read_buf);
1106 			spin_unlock_irqrestore(q->irqlock, flags);
1107 			q->read_buf = NULL;
1108 		}
1109 		if (retval < 0)
1110 			break;
1111 	}
1112 
1113 done:
1114 	videobuf_queue_unlock(q);
1115 	return retval;
1116 }
1117 EXPORT_SYMBOL_GPL(videobuf_read_stream);
1118 
videobuf_poll_stream(struct file * file,struct videobuf_queue * q,poll_table * wait)1119 __poll_t videobuf_poll_stream(struct file *file,
1120 			      struct videobuf_queue *q,
1121 			      poll_table *wait)
1122 {
1123 	__poll_t req_events = poll_requested_events(wait);
1124 	struct videobuf_buffer *buf = NULL;
1125 	__poll_t rc = 0;
1126 
1127 	videobuf_queue_lock(q);
1128 	if (q->streaming) {
1129 		if (!list_empty(&q->stream))
1130 			buf = list_entry(q->stream.next,
1131 					 struct videobuf_buffer, stream);
1132 	} else if (req_events & (EPOLLIN | EPOLLRDNORM)) {
1133 		if (!q->reading)
1134 			__videobuf_read_start(q);
1135 		if (!q->reading) {
1136 			rc = EPOLLERR;
1137 		} else if (NULL == q->read_buf) {
1138 			q->read_buf = list_entry(q->stream.next,
1139 						 struct videobuf_buffer,
1140 						 stream);
1141 			list_del(&q->read_buf->stream);
1142 			q->read_off = 0;
1143 		}
1144 		buf = q->read_buf;
1145 	}
1146 	if (buf)
1147 		poll_wait(file, &buf->done, wait);
1148 	else
1149 		rc = EPOLLERR;
1150 
1151 	if (0 == rc) {
1152 		if (buf->state == VIDEOBUF_DONE ||
1153 		    buf->state == VIDEOBUF_ERROR) {
1154 			switch (q->type) {
1155 			case V4L2_BUF_TYPE_VIDEO_OUTPUT:
1156 			case V4L2_BUF_TYPE_VBI_OUTPUT:
1157 			case V4L2_BUF_TYPE_SLICED_VBI_OUTPUT:
1158 			case V4L2_BUF_TYPE_SDR_OUTPUT:
1159 				rc = EPOLLOUT | EPOLLWRNORM;
1160 				break;
1161 			default:
1162 				rc = EPOLLIN | EPOLLRDNORM;
1163 				break;
1164 			}
1165 		}
1166 	}
1167 	videobuf_queue_unlock(q);
1168 	return rc;
1169 }
1170 EXPORT_SYMBOL_GPL(videobuf_poll_stream);
1171 
videobuf_mmap_mapper(struct videobuf_queue * q,struct vm_area_struct * vma)1172 int videobuf_mmap_mapper(struct videobuf_queue *q, struct vm_area_struct *vma)
1173 {
1174 	int rc = -EINVAL;
1175 	int i;
1176 
1177 	MAGIC_CHECK(q->int_ops->magic, MAGIC_QTYPE_OPS);
1178 
1179 	if (!(vma->vm_flags & VM_WRITE) || !(vma->vm_flags & VM_SHARED)) {
1180 		dprintk(1, "mmap appl bug: PROT_WRITE and MAP_SHARED are required\n");
1181 		return -EINVAL;
1182 	}
1183 
1184 	videobuf_queue_lock(q);
1185 	for (i = 0; i < VIDEO_MAX_FRAME; i++) {
1186 		struct videobuf_buffer *buf = q->bufs[i];
1187 
1188 		if (buf && buf->memory == V4L2_MEMORY_MMAP &&
1189 				buf->boff == (vma->vm_pgoff << PAGE_SHIFT)) {
1190 			rc = CALL(q, mmap_mapper, q, buf, vma);
1191 			break;
1192 		}
1193 	}
1194 	videobuf_queue_unlock(q);
1195 
1196 	return rc;
1197 }
1198 EXPORT_SYMBOL_GPL(videobuf_mmap_mapper);
1199