• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *      uvc_queue.c  --  USB Video Class driver - Buffers management
3  *
4  *      Copyright (C) 2005-2009
5  *          Laurent Pinchart (laurent.pinchart@skynet.be)
6  *
7  *      This program is free software; you can redistribute it and/or modify
8  *      it under the terms of the GNU General Public License as published by
9  *      the Free Software Foundation; either version 2 of the License, or
10  *      (at your option) any later version.
11  *
12  */
13 
14 #include <linux/kernel.h>
15 #include <linux/mm.h>
16 #include <linux/list.h>
17 #include <linux/module.h>
18 #include <linux/usb.h>
19 #include <linux/videodev2.h>
20 #include <linux/vmalloc.h>
21 #include <linux/wait.h>
22 #include <asm/atomic.h>
23 
24 #include "uvcvideo.h"
25 
26 /* ------------------------------------------------------------------------
27  * Video buffers queue management.
28  *
29  * Video queues is initialized by uvc_queue_init(). The function performs
30  * basic initialization of the uvc_video_queue struct and never fails.
31  *
32  * Video buffer allocation and freeing are performed by uvc_alloc_buffers and
33  * uvc_free_buffers respectively. The former acquires the video queue lock,
34  * while the later must be called with the lock held (so that allocation can
35  * free previously allocated buffers). Trying to free buffers that are mapped
36  * to user space will return -EBUSY.
37  *
38  * Video buffers are managed using two queues. However, unlike most USB video
39  * drivers that use an in queue and an out queue, we use a main queue to hold
40  * all queued buffers (both 'empty' and 'done' buffers), and an irq queue to
41  * hold empty buffers. This design (copied from video-buf) minimizes locking
42  * in interrupt, as only one queue is shared between interrupt and user
43  * contexts.
44  *
45  * Use cases
46  * ---------
47  *
48  * Unless stated otherwise, all operations that modify the irq buffers queue
49  * are protected by the irq spinlock.
50  *
51  * 1. The user queues the buffers, starts streaming and dequeues a buffer.
52  *
53  *    The buffers are added to the main and irq queues. Both operations are
54  *    protected by the queue lock, and the later is protected by the irq
55  *    spinlock as well.
56  *
57  *    The completion handler fetches a buffer from the irq queue and fills it
58  *    with video data. If no buffer is available (irq queue empty), the handler
59  *    returns immediately.
60  *
61  *    When the buffer is full, the completion handler removes it from the irq
62  *    queue, marks it as ready (UVC_BUF_STATE_DONE) and wakes its wait queue.
63  *    At that point, any process waiting on the buffer will be woken up. If a
64  *    process tries to dequeue a buffer after it has been marked ready, the
65  *    dequeing will succeed immediately.
66  *
67  * 2. Buffers are queued, user is waiting on a buffer and the device gets
68  *    disconnected.
69  *
70  *    When the device is disconnected, the kernel calls the completion handler
71  *    with an appropriate status code. The handler marks all buffers in the
72  *    irq queue as being erroneous (UVC_BUF_STATE_ERROR) and wakes them up so
73  *    that any process waiting on a buffer gets woken up.
74  *
75  *    Waking up up the first buffer on the irq list is not enough, as the
76  *    process waiting on the buffer might restart the dequeue operation
77  *    immediately.
78  *
79  */
80 
uvc_queue_init(struct uvc_video_queue * queue,enum v4l2_buf_type type)81 void uvc_queue_init(struct uvc_video_queue *queue, enum v4l2_buf_type type)
82 {
83 	mutex_init(&queue->mutex);
84 	spin_lock_init(&queue->irqlock);
85 	INIT_LIST_HEAD(&queue->mainqueue);
86 	INIT_LIST_HEAD(&queue->irqqueue);
87 	queue->type = type;
88 }
89 
90 /*
91  * Allocate the video buffers.
92  *
93  * Pages are reserved to make sure they will not be swapped, as they will be
94  * filled in the URB completion handler.
95  *
96  * Buffers will be individually mapped, so they must all be page aligned.
97  */
uvc_alloc_buffers(struct uvc_video_queue * queue,unsigned int nbuffers,unsigned int buflength)98 int uvc_alloc_buffers(struct uvc_video_queue *queue, unsigned int nbuffers,
99 		unsigned int buflength)
100 {
101 	unsigned int bufsize = PAGE_ALIGN(buflength);
102 	unsigned int i;
103 	void *mem = NULL;
104 	int ret;
105 
106 	if (nbuffers > UVC_MAX_VIDEO_BUFFERS)
107 		nbuffers = UVC_MAX_VIDEO_BUFFERS;
108 
109 	mutex_lock(&queue->mutex);
110 
111 	if ((ret = uvc_free_buffers(queue)) < 0)
112 		goto done;
113 
114 	/* Bail out if no buffers should be allocated. */
115 	if (nbuffers == 0)
116 		goto done;
117 
118 	/* Decrement the number of buffers until allocation succeeds. */
119 	for (; nbuffers > 0; --nbuffers) {
120 		mem = vmalloc_32(nbuffers * bufsize);
121 		if (mem != NULL)
122 			break;
123 	}
124 
125 	if (mem == NULL) {
126 		ret = -ENOMEM;
127 		goto done;
128 	}
129 
130 	for (i = 0; i < nbuffers; ++i) {
131 		memset(&queue->buffer[i], 0, sizeof queue->buffer[i]);
132 		queue->buffer[i].buf.index = i;
133 		queue->buffer[i].buf.m.offset = i * bufsize;
134 		queue->buffer[i].buf.length = buflength;
135 		queue->buffer[i].buf.type = queue->type;
136 		queue->buffer[i].buf.sequence = 0;
137 		queue->buffer[i].buf.field = V4L2_FIELD_NONE;
138 		queue->buffer[i].buf.memory = V4L2_MEMORY_MMAP;
139 		queue->buffer[i].buf.flags = 0;
140 		init_waitqueue_head(&queue->buffer[i].wait);
141 	}
142 
143 	queue->mem = mem;
144 	queue->count = nbuffers;
145 	queue->buf_size = bufsize;
146 	ret = nbuffers;
147 
148 done:
149 	mutex_unlock(&queue->mutex);
150 	return ret;
151 }
152 
153 /*
154  * Free the video buffers.
155  *
156  * This function must be called with the queue lock held.
157  */
uvc_free_buffers(struct uvc_video_queue * queue)158 int uvc_free_buffers(struct uvc_video_queue *queue)
159 {
160 	unsigned int i;
161 
162 	for (i = 0; i < queue->count; ++i) {
163 		if (queue->buffer[i].vma_use_count != 0)
164 			return -EBUSY;
165 	}
166 
167 	if (queue->count) {
168 		vfree(queue->mem);
169 		queue->count = 0;
170 	}
171 
172 	return 0;
173 }
174 
__uvc_query_buffer(struct uvc_buffer * buf,struct v4l2_buffer * v4l2_buf)175 static void __uvc_query_buffer(struct uvc_buffer *buf,
176 		struct v4l2_buffer *v4l2_buf)
177 {
178 	memcpy(v4l2_buf, &buf->buf, sizeof *v4l2_buf);
179 
180 	if (buf->vma_use_count)
181 		v4l2_buf->flags |= V4L2_BUF_FLAG_MAPPED;
182 
183 	switch (buf->state) {
184 	case UVC_BUF_STATE_ERROR:
185 	case UVC_BUF_STATE_DONE:
186 		v4l2_buf->flags |= V4L2_BUF_FLAG_DONE;
187 		break;
188 	case UVC_BUF_STATE_QUEUED:
189 	case UVC_BUF_STATE_ACTIVE:
190 		v4l2_buf->flags |= V4L2_BUF_FLAG_QUEUED;
191 		break;
192 	case UVC_BUF_STATE_IDLE:
193 	default:
194 		break;
195 	}
196 }
197 
uvc_query_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * v4l2_buf)198 int uvc_query_buffer(struct uvc_video_queue *queue,
199 		struct v4l2_buffer *v4l2_buf)
200 {
201 	int ret = 0;
202 
203 	mutex_lock(&queue->mutex);
204 	if (v4l2_buf->index >= queue->count) {
205 		ret = -EINVAL;
206 		goto done;
207 	}
208 
209 	__uvc_query_buffer(&queue->buffer[v4l2_buf->index], v4l2_buf);
210 
211 done:
212 	mutex_unlock(&queue->mutex);
213 	return ret;
214 }
215 
216 /*
217  * Queue a video buffer. Attempting to queue a buffer that has already been
218  * queued will return -EINVAL.
219  */
uvc_queue_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * v4l2_buf)220 int uvc_queue_buffer(struct uvc_video_queue *queue,
221 	struct v4l2_buffer *v4l2_buf)
222 {
223 	struct uvc_buffer *buf;
224 	unsigned long flags;
225 	int ret = 0;
226 
227 	uvc_trace(UVC_TRACE_CAPTURE, "Queuing buffer %u.\n", v4l2_buf->index);
228 
229 	if (v4l2_buf->type != queue->type ||
230 	    v4l2_buf->memory != V4L2_MEMORY_MMAP) {
231 		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
232 			"and/or memory (%u).\n", v4l2_buf->type,
233 			v4l2_buf->memory);
234 		return -EINVAL;
235 	}
236 
237 	mutex_lock(&queue->mutex);
238 	if (v4l2_buf->index >= queue->count) {
239 		uvc_trace(UVC_TRACE_CAPTURE, "[E] Out of range index.\n");
240 		ret = -EINVAL;
241 		goto done;
242 	}
243 
244 	buf = &queue->buffer[v4l2_buf->index];
245 	if (buf->state != UVC_BUF_STATE_IDLE) {
246 		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state "
247 			"(%u).\n", buf->state);
248 		ret = -EINVAL;
249 		goto done;
250 	}
251 
252 	if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_OUTPUT &&
253 	    v4l2_buf->bytesused > buf->buf.length) {
254 		uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n");
255 		ret = -EINVAL;
256 		goto done;
257 	}
258 
259 	spin_lock_irqsave(&queue->irqlock, flags);
260 	if (queue->flags & UVC_QUEUE_DISCONNECTED) {
261 		spin_unlock_irqrestore(&queue->irqlock, flags);
262 		ret = -ENODEV;
263 		goto done;
264 	}
265 	buf->state = UVC_BUF_STATE_QUEUED;
266 	if (v4l2_buf->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
267 		buf->buf.bytesused = 0;
268 	else
269 		buf->buf.bytesused = v4l2_buf->bytesused;
270 
271 	list_add_tail(&buf->stream, &queue->mainqueue);
272 	list_add_tail(&buf->queue, &queue->irqqueue);
273 	spin_unlock_irqrestore(&queue->irqlock, flags);
274 
275 done:
276 	mutex_unlock(&queue->mutex);
277 	return ret;
278 }
279 
uvc_queue_waiton(struct uvc_buffer * buf,int nonblocking)280 static int uvc_queue_waiton(struct uvc_buffer *buf, int nonblocking)
281 {
282 	if (nonblocking) {
283 		return (buf->state != UVC_BUF_STATE_QUEUED &&
284 			buf->state != UVC_BUF_STATE_ACTIVE)
285 			? 0 : -EAGAIN;
286 	}
287 
288 	return wait_event_interruptible(buf->wait,
289 		buf->state != UVC_BUF_STATE_QUEUED &&
290 		buf->state != UVC_BUF_STATE_ACTIVE);
291 }
292 
293 /*
294  * Dequeue a video buffer. If nonblocking is false, block until a buffer is
295  * available.
296  */
uvc_dequeue_buffer(struct uvc_video_queue * queue,struct v4l2_buffer * v4l2_buf,int nonblocking)297 int uvc_dequeue_buffer(struct uvc_video_queue *queue,
298 		struct v4l2_buffer *v4l2_buf, int nonblocking)
299 {
300 	struct uvc_buffer *buf;
301 	int ret = 0;
302 
303 	if (v4l2_buf->type != queue->type ||
304 	    v4l2_buf->memory != V4L2_MEMORY_MMAP) {
305 		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer type (%u) "
306 			"and/or memory (%u).\n", v4l2_buf->type,
307 			v4l2_buf->memory);
308 		return -EINVAL;
309 	}
310 
311 	mutex_lock(&queue->mutex);
312 	if (list_empty(&queue->mainqueue)) {
313 		uvc_trace(UVC_TRACE_CAPTURE, "[E] Empty buffer queue.\n");
314 		ret = -EINVAL;
315 		goto done;
316 	}
317 
318 	buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
319 	if ((ret = uvc_queue_waiton(buf, nonblocking)) < 0)
320 		goto done;
321 
322 	uvc_trace(UVC_TRACE_CAPTURE, "Dequeuing buffer %u (%u, %u bytes).\n",
323 		buf->buf.index, buf->state, buf->buf.bytesused);
324 
325 	switch (buf->state) {
326 	case UVC_BUF_STATE_ERROR:
327 		uvc_trace(UVC_TRACE_CAPTURE, "[W] Corrupted data "
328 			"(transmission error).\n");
329 		ret = -EIO;
330 	case UVC_BUF_STATE_DONE:
331 		buf->state = UVC_BUF_STATE_IDLE;
332 		break;
333 
334 	case UVC_BUF_STATE_IDLE:
335 	case UVC_BUF_STATE_QUEUED:
336 	case UVC_BUF_STATE_ACTIVE:
337 	default:
338 		uvc_trace(UVC_TRACE_CAPTURE, "[E] Invalid buffer state %u "
339 			"(driver bug?).\n", buf->state);
340 		ret = -EINVAL;
341 		goto done;
342 	}
343 
344 	list_del(&buf->stream);
345 	__uvc_query_buffer(buf, v4l2_buf);
346 
347 done:
348 	mutex_unlock(&queue->mutex);
349 	return ret;
350 }
351 
352 /*
353  * Poll the video queue.
354  *
355  * This function implements video queue polling and is intended to be used by
356  * the device poll handler.
357  */
uvc_queue_poll(struct uvc_video_queue * queue,struct file * file,poll_table * wait)358 unsigned int uvc_queue_poll(struct uvc_video_queue *queue, struct file *file,
359 		poll_table *wait)
360 {
361 	struct uvc_buffer *buf;
362 	unsigned int mask = 0;
363 
364 	mutex_lock(&queue->mutex);
365 	if (list_empty(&queue->mainqueue)) {
366 		mask |= POLLERR;
367 		goto done;
368 	}
369 	buf = list_first_entry(&queue->mainqueue, struct uvc_buffer, stream);
370 
371 	poll_wait(file, &buf->wait, wait);
372 	if (buf->state == UVC_BUF_STATE_DONE ||
373 	    buf->state == UVC_BUF_STATE_ERROR)
374 		mask |= POLLIN | POLLRDNORM;
375 
376 done:
377 	mutex_unlock(&queue->mutex);
378 	return mask;
379 }
380 
381 /*
382  * Enable or disable the video buffers queue.
383  *
384  * The queue must be enabled before starting video acquisition and must be
385  * disabled after stopping it. This ensures that the video buffers queue
386  * state can be properly initialized before buffers are accessed from the
387  * interrupt handler.
388  *
389  * Enabling the video queue initializes parameters (such as sequence number,
390  * sync pattern, ...). If the queue is already enabled, return -EBUSY.
391  *
392  * Disabling the video queue cancels the queue and removes all buffers from
393  * the main queue.
394  *
395  * This function can't be called from interrupt context. Use
396  * uvc_queue_cancel() instead.
397  */
uvc_queue_enable(struct uvc_video_queue * queue,int enable)398 int uvc_queue_enable(struct uvc_video_queue *queue, int enable)
399 {
400 	unsigned int i;
401 	int ret = 0;
402 
403 	mutex_lock(&queue->mutex);
404 	if (enable) {
405 		if (uvc_queue_streaming(queue)) {
406 			ret = -EBUSY;
407 			goto done;
408 		}
409 		queue->sequence = 0;
410 		queue->flags |= UVC_QUEUE_STREAMING;
411 		queue->buf_used = 0;
412 	} else {
413 		uvc_queue_cancel(queue, 0);
414 		INIT_LIST_HEAD(&queue->mainqueue);
415 
416 		for (i = 0; i < queue->count; ++i)
417 			queue->buffer[i].state = UVC_BUF_STATE_IDLE;
418 
419 		queue->flags &= ~UVC_QUEUE_STREAMING;
420 	}
421 
422 done:
423 	mutex_unlock(&queue->mutex);
424 	return ret;
425 }
426 
427 /*
428  * Cancel the video buffers queue.
429  *
430  * Cancelling the queue marks all buffers on the irq queue as erroneous,
431  * wakes them up and removes them from the queue.
432  *
433  * If the disconnect parameter is set, further calls to uvc_queue_buffer will
434  * fail with -ENODEV.
435  *
436  * This function acquires the irq spinlock and can be called from interrupt
437  * context.
438  */
uvc_queue_cancel(struct uvc_video_queue * queue,int disconnect)439 void uvc_queue_cancel(struct uvc_video_queue *queue, int disconnect)
440 {
441 	struct uvc_buffer *buf;
442 	unsigned long flags;
443 
444 	spin_lock_irqsave(&queue->irqlock, flags);
445 	while (!list_empty(&queue->irqqueue)) {
446 		buf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
447 				       queue);
448 		list_del(&buf->queue);
449 		buf->state = UVC_BUF_STATE_ERROR;
450 		wake_up(&buf->wait);
451 	}
452 	/* This must be protected by the irqlock spinlock to avoid race
453 	 * conditions between uvc_queue_buffer and the disconnection event that
454 	 * could result in an interruptible wait in uvc_dequeue_buffer. Do not
455 	 * blindly replace this logic by checking for the UVC_DEV_DISCONNECTED
456 	 * state outside the queue code.
457 	 */
458 	if (disconnect)
459 		queue->flags |= UVC_QUEUE_DISCONNECTED;
460 	spin_unlock_irqrestore(&queue->irqlock, flags);
461 }
462 
uvc_queue_next_buffer(struct uvc_video_queue * queue,struct uvc_buffer * buf)463 struct uvc_buffer *uvc_queue_next_buffer(struct uvc_video_queue *queue,
464 		struct uvc_buffer *buf)
465 {
466 	struct uvc_buffer *nextbuf;
467 	unsigned long flags;
468 
469 	if ((queue->flags & UVC_QUEUE_DROP_INCOMPLETE) &&
470 	    buf->buf.length != buf->buf.bytesused) {
471 		buf->state = UVC_BUF_STATE_QUEUED;
472 		buf->buf.bytesused = 0;
473 		return buf;
474 	}
475 
476 	spin_lock_irqsave(&queue->irqlock, flags);
477 	list_del(&buf->queue);
478 	if (!list_empty(&queue->irqqueue))
479 		nextbuf = list_first_entry(&queue->irqqueue, struct uvc_buffer,
480 					   queue);
481 	else
482 		nextbuf = NULL;
483 	spin_unlock_irqrestore(&queue->irqlock, flags);
484 
485 	buf->buf.sequence = queue->sequence++;
486 	do_gettimeofday(&buf->buf.timestamp);
487 
488 	wake_up(&buf->wait);
489 	return nextbuf;
490 }
491 
492