• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  *	uvc_video.c  --  USB Video Class Gadget driver
4  *
5  *	Copyright (C) 2009-2010
6  *	    Laurent Pinchart (laurent.pinchart@ideasonboard.com)
7  */
8 
9 #include <linux/kernel.h>
10 #include <linux/device.h>
11 #include <linux/errno.h>
12 #include <linux/usb/ch9.h>
13 #include <linux/usb/gadget.h>
14 #include <linux/usb/video.h>
15 #include <linux/unaligned.h>
16 
17 #include <media/v4l2-dev.h>
18 
19 #include "uvc.h"
20 #include "uvc_queue.h"
21 #include "uvc_video.h"
22 
23 /* --------------------------------------------------------------------------
24  * Video codecs
25  */
26 
27 static int
uvc_video_encode_header(struct uvc_video * video,struct uvc_buffer * buf,u8 * data,int len)28 uvc_video_encode_header(struct uvc_video *video, struct uvc_buffer *buf,
29 		u8 *data, int len)
30 {
31 	struct uvc_device *uvc = container_of(video, struct uvc_device, video);
32 	struct usb_composite_dev *cdev = uvc->func.config->cdev;
33 	struct timespec64 ts = ns_to_timespec64(buf->buf.vb2_buf.timestamp);
34 	int pos = 2;
35 
36 	data[1] = UVC_STREAM_EOH | video->fid;
37 
38 	if (video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE)
39 		data[1] |= UVC_STREAM_ERR;
40 
41 	if (video->queue.buf_used == 0 && ts.tv_sec) {
42 		/* dwClockFrequency is 48 MHz */
43 		u32 pts = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
44 
45 		data[1] |= UVC_STREAM_PTS;
46 		put_unaligned_le32(pts, &data[pos]);
47 		pos += 4;
48 	}
49 
50 	if (cdev->gadget->ops->get_frame) {
51 		u32 sof, stc;
52 
53 		sof = usb_gadget_frame_number(cdev->gadget);
54 		ktime_get_ts64(&ts);
55 		stc = ((u64)ts.tv_sec * USEC_PER_SEC + ts.tv_nsec / NSEC_PER_USEC) * 48;
56 
57 		data[1] |= UVC_STREAM_SCR;
58 		put_unaligned_le32(stc, &data[pos]);
59 		put_unaligned_le16(sof, &data[pos+4]);
60 		pos += 6;
61 	}
62 
63 	data[0] = pos;
64 
65 	if (buf->bytesused - video->queue.buf_used <= len - pos)
66 		data[1] |= UVC_STREAM_EOF;
67 
68 	return pos;
69 }
70 
71 static int
uvc_video_encode_data(struct uvc_video * video,struct uvc_buffer * buf,u8 * data,int len)72 uvc_video_encode_data(struct uvc_video *video, struct uvc_buffer *buf,
73 		u8 *data, int len)
74 {
75 	struct uvc_video_queue *queue = &video->queue;
76 	unsigned int nbytes;
77 	void *mem;
78 
79 	/* Copy video data to the USB buffer. */
80 	mem = buf->mem + queue->buf_used;
81 	nbytes = min((unsigned int)len, buf->bytesused - queue->buf_used);
82 
83 	memcpy(data, mem, nbytes);
84 	queue->buf_used += nbytes;
85 
86 	return nbytes;
87 }
88 
89 static void
uvc_video_encode_bulk(struct usb_request * req,struct uvc_video * video,struct uvc_buffer * buf)90 uvc_video_encode_bulk(struct usb_request *req, struct uvc_video *video,
91 		struct uvc_buffer *buf)
92 {
93 	void *mem = req->buf;
94 	struct uvc_request *ureq = req->context;
95 	int len = video->req_size;
96 	int ret;
97 
98 	/* Add a header at the beginning of the payload. */
99 	if (video->payload_size == 0) {
100 		ret = uvc_video_encode_header(video, buf, mem, len);
101 		video->payload_size += ret;
102 		mem += ret;
103 		len -= ret;
104 	}
105 
106 	/* Process video data. */
107 	len = min((int)(video->max_payload_size - video->payload_size), len);
108 	ret = uvc_video_encode_data(video, buf, mem, len);
109 
110 	video->payload_size += ret;
111 	len -= ret;
112 
113 	req->length = video->req_size - len;
114 	req->zero = video->payload_size == video->max_payload_size;
115 
116 	if (buf->bytesused == video->queue.buf_used) {
117 		video->queue.buf_used = 0;
118 		buf->state = UVC_BUF_STATE_DONE;
119 		list_del(&buf->queue);
120 		video->fid ^= UVC_STREAM_FID;
121 		ureq->last_buf = buf;
122 
123 		video->payload_size = 0;
124 	}
125 
126 	if (video->payload_size == video->max_payload_size ||
127 	    video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE ||
128 	    buf->bytesused == video->queue.buf_used)
129 		video->payload_size = 0;
130 }
131 
132 static void
uvc_video_encode_isoc_sg(struct usb_request * req,struct uvc_video * video,struct uvc_buffer * buf)133 uvc_video_encode_isoc_sg(struct usb_request *req, struct uvc_video *video,
134 		struct uvc_buffer *buf)
135 {
136 	unsigned int pending = buf->bytesused - video->queue.buf_used;
137 	struct uvc_request *ureq = req->context;
138 	struct scatterlist *sg, *iter;
139 	unsigned int len = video->req_size;
140 	unsigned int sg_left, part = 0;
141 	unsigned int i;
142 	int header_len;
143 
144 	sg = ureq->sgt.sgl;
145 	sg_init_table(sg, ureq->sgt.nents);
146 
147 	/* Init the header. */
148 	header_len = uvc_video_encode_header(video, buf, ureq->header,
149 				      video->req_size);
150 	sg_set_buf(sg, ureq->header, header_len);
151 	len -= header_len;
152 
153 	if (pending <= len)
154 		len = pending;
155 
156 	req->length = (len == pending) ?
157 		len + header_len : video->req_size;
158 
159 	/* Init the pending sgs with payload */
160 	sg = sg_next(sg);
161 
162 	for_each_sg(sg, iter, ureq->sgt.nents - 1, i) {
163 		if (!len || !buf->sg || !buf->sg->length)
164 			break;
165 
166 		sg_left = buf->sg->length - buf->offset;
167 		part = min_t(unsigned int, len, sg_left);
168 
169 		sg_set_page(iter, sg_page(buf->sg), part, buf->offset);
170 
171 		if (part == sg_left) {
172 			buf->offset = 0;
173 			buf->sg = sg_next(buf->sg);
174 		} else {
175 			buf->offset += part;
176 		}
177 		len -= part;
178 	}
179 
180 	/* Assign the video data with header. */
181 	req->buf = NULL;
182 	req->sg	= ureq->sgt.sgl;
183 	req->num_sgs = i + 1;
184 
185 	req->length -= len;
186 	video->queue.buf_used += req->length - header_len;
187 
188 	if (buf->bytesused == video->queue.buf_used || !buf->sg ||
189 			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
190 		video->queue.buf_used = 0;
191 		buf->state = UVC_BUF_STATE_DONE;
192 		buf->offset = 0;
193 		list_del(&buf->queue);
194 		video->fid ^= UVC_STREAM_FID;
195 		ureq->last_buf = buf;
196 	}
197 }
198 
199 static void
uvc_video_encode_isoc(struct usb_request * req,struct uvc_video * video,struct uvc_buffer * buf)200 uvc_video_encode_isoc(struct usb_request *req, struct uvc_video *video,
201 		struct uvc_buffer *buf)
202 {
203 	void *mem = req->buf;
204 	struct uvc_request *ureq = req->context;
205 	int len = video->req_size;
206 	int ret;
207 
208 	/* Add the header. */
209 	ret = uvc_video_encode_header(video, buf, mem, len);
210 	mem += ret;
211 	len -= ret;
212 
213 	/* Process video data. */
214 	ret = uvc_video_encode_data(video, buf, mem, len);
215 	len -= ret;
216 
217 	req->length = video->req_size - len;
218 
219 	if (buf->bytesused == video->queue.buf_used ||
220 			video->queue.flags & UVC_QUEUE_DROP_INCOMPLETE) {
221 		video->queue.buf_used = 0;
222 		buf->state = UVC_BUF_STATE_DONE;
223 		list_del(&buf->queue);
224 		video->fid ^= UVC_STREAM_FID;
225 		ureq->last_buf = buf;
226 	}
227 }
228 
229 /* --------------------------------------------------------------------------
230  * Request handling
231  */
232 
233 /*
234  * Callers must take care to hold req_lock when this function may be called
235  * from multiple threads. For example, when frames are streaming to the host.
236  */
237 static void
uvc_video_free_request(struct uvc_request * ureq,struct usb_ep * ep)238 uvc_video_free_request(struct uvc_request *ureq, struct usb_ep *ep)
239 {
240 	sg_free_table(&ureq->sgt);
241 	if (ureq->req && ep) {
242 		usb_ep_free_request(ep, ureq->req);
243 		ureq->req = NULL;
244 	}
245 
246 	kfree(ureq->req_buffer);
247 	ureq->req_buffer = NULL;
248 
249 	if (!list_empty(&ureq->list))
250 		list_del_init(&ureq->list);
251 
252 	kfree(ureq);
253 }
254 
uvcg_video_ep_queue(struct uvc_video * video,struct usb_request * req)255 static int uvcg_video_ep_queue(struct uvc_video *video, struct usb_request *req)
256 {
257 	int ret;
258 
259 	ret = usb_ep_queue(video->ep, req, GFP_ATOMIC);
260 	if (ret < 0) {
261 		uvcg_err(&video->uvc->func, "Failed to queue request (%d).\n",
262 			 ret);
263 
264 		/* If the endpoint is disabled the descriptor may be NULL. */
265 		if (video->ep->desc) {
266 			/* Isochronous endpoints can't be halted. */
267 			if (usb_endpoint_xfer_bulk(video->ep->desc))
268 				usb_ep_set_halt(video->ep);
269 		}
270 	}
271 
272 	return ret;
273 }
274 
275 /* This function must be called with video->req_lock held. */
uvcg_video_usb_req_queue(struct uvc_video * video,struct usb_request * req,bool queue_to_ep)276 static int uvcg_video_usb_req_queue(struct uvc_video *video,
277 	struct usb_request *req, bool queue_to_ep)
278 {
279 	bool is_bulk = video->max_payload_size;
280 	struct list_head *list = NULL;
281 
282 	if (!video->is_enabled)
283 		return -ENODEV;
284 
285 	if (queue_to_ep) {
286 		struct uvc_request *ureq = req->context;
287 		/*
288 		 * With USB3 handling more requests at a higher speed, we can't
289 		 * afford to generate an interrupt for every request. Decide to
290 		 * interrupt:
291 		 *
292 		 * - When no more requests are available in the free queue, as
293 		 *   this may be our last chance to refill the endpoint's
294 		 *   request queue.
295 		 *
296 		 * - When this is request is the last request for the video
297 		 *   buffer, as we want to start sending the next video buffer
298 		 *   ASAP in case it doesn't get started already in the next
299 		 *   iteration of this loop.
300 		 *
301 		 * - Four times over the length of the requests queue (as
302 		 *   indicated by video->uvc_num_requests), as a trade-off
303 		 *   between latency and interrupt load.
304 		 */
305 		if (list_empty(&video->req_free) || ureq->last_buf ||
306 			!(video->req_int_count %
307 			DIV_ROUND_UP(video->uvc_num_requests, 4))) {
308 			video->req_int_count = 0;
309 			req->no_interrupt = 0;
310 		} else {
311 			req->no_interrupt = 1;
312 		}
313 		video->req_int_count++;
314 		return uvcg_video_ep_queue(video, req);
315 	}
316 	/*
317 	 * If we're not queuing to the ep, for isoc we're queuing
318 	 * to the req_ready list, otherwise req_free.
319 	 */
320 	list = is_bulk ? &video->req_free : &video->req_ready;
321 	list_add_tail(&req->list, list);
322 	return 0;
323 }
324 
325 /*
326  * Must only be called from uvcg_video_enable - since after that we only want to
327  * queue requests to the endpoint from the uvc_video_complete complete handler.
328  * This function is needed in order to 'kick start' the flow of requests from
329  * gadget driver to the usb controller.
330  */
uvc_video_ep_queue_initial_requests(struct uvc_video * video)331 static void uvc_video_ep_queue_initial_requests(struct uvc_video *video)
332 {
333 	struct usb_request *req = NULL;
334 	unsigned long flags = 0;
335 	unsigned int count = 0;
336 	int ret = 0;
337 
338 	/*
339 	 * We only queue half of the free list since we still want to have
340 	 * some free usb_requests in the free list for the video_pump async_wq
341 	 * thread to encode uvc buffers into. Otherwise we could get into a
342 	 * situation where the free list does not have any usb requests to
343 	 * encode into - we always end up queueing 0 length requests to the
344 	 * end point.
345 	 */
346 	unsigned int half_list_size = video->uvc_num_requests / 2;
347 
348 	spin_lock_irqsave(&video->req_lock, flags);
349 	/*
350 	 * Take these requests off the free list and queue them all to the
351 	 * endpoint. Since we queue 0 length requests with the req_lock held,
352 	 * there isn't any 'data' race involved here with the complete handler.
353 	 */
354 	while (count < half_list_size) {
355 		req = list_first_entry(&video->req_free, struct usb_request,
356 					list);
357 		list_del(&req->list);
358 		req->length = 0;
359 		ret = uvcg_video_ep_queue(video, req);
360 		if (ret < 0) {
361 			uvcg_queue_cancel(&video->queue, 0);
362 			break;
363 		}
364 		count++;
365 	}
366 	spin_unlock_irqrestore(&video->req_lock, flags);
367 }
368 
369 static void
uvc_video_complete(struct usb_ep * ep,struct usb_request * req)370 uvc_video_complete(struct usb_ep *ep, struct usb_request *req)
371 {
372 	struct uvc_request *ureq = req->context;
373 	struct uvc_video *video = ureq->video;
374 	struct uvc_video_queue *queue = &video->queue;
375 	struct uvc_buffer *last_buf;
376 	struct usb_request *to_queue = req;
377 	unsigned long flags;
378 	bool is_bulk = video->max_payload_size;
379 	int ret = 0;
380 
381 	spin_lock_irqsave(&video->req_lock, flags);
382 	if (!video->is_enabled) {
383 		/*
384 		 * When is_enabled is false, uvcg_video_disable() ensures
385 		 * that in-flight uvc_buffers are returned, so we can
386 		 * safely call free_request without worrying about
387 		 * last_buf.
388 		 */
389 		uvc_video_free_request(ureq, ep);
390 		spin_unlock_irqrestore(&video->req_lock, flags);
391 		return;
392 	}
393 
394 	last_buf = ureq->last_buf;
395 	ureq->last_buf = NULL;
396 	spin_unlock_irqrestore(&video->req_lock, flags);
397 
398 	switch (req->status) {
399 	case 0:
400 		break;
401 
402 	case -EXDEV:
403 		uvcg_dbg(&video->uvc->func, "VS request missed xfer.\n");
404 		if (req->length != 0)
405 			queue->flags |= UVC_QUEUE_DROP_INCOMPLETE;
406 		break;
407 
408 	case -ESHUTDOWN:	/* disconnect from host. */
409 		uvcg_dbg(&video->uvc->func, "VS request cancelled.\n");
410 		uvcg_queue_cancel(queue, 1);
411 		break;
412 
413 	default:
414 		uvcg_warn(&video->uvc->func,
415 			  "VS request completed with status %d.\n",
416 			  req->status);
417 		uvcg_queue_cancel(queue, 0);
418 	}
419 
420 	if (last_buf) {
421 		spin_lock_irqsave(&queue->irqlock, flags);
422 		uvcg_complete_buffer(queue, last_buf);
423 		spin_unlock_irqrestore(&queue->irqlock, flags);
424 	}
425 
426 	spin_lock_irqsave(&video->req_lock, flags);
427 	/*
428 	 * Video stream might have been disabled while we were
429 	 * processing the current usb_request. So make sure
430 	 * we're still streaming before queueing the usb_request
431 	 * back to req_free
432 	 */
433 	if (!video->is_enabled) {
434 		uvc_video_free_request(ureq, ep);
435 		spin_unlock_irqrestore(&video->req_lock, flags);
436 		uvcg_queue_cancel(queue, 0);
437 
438 		return;
439 	}
440 
441 	/*
442 	 * Here we check whether any request is available in the ready
443 	 * list. If it is, queue it to the ep and add the current
444 	 * usb_request to the req_free list - for video_pump to fill in.
445 	 * Otherwise, just use the current usb_request to queue a 0
446 	 * length request to the ep. Since we always add to the req_free
447 	 * list if we dequeue from the ready list, there will never
448 	 * be a situation where the req_free list is completely out of
449 	 * requests and cannot recover.
450 	 */
451 	to_queue->length = 0;
452 	if (!list_empty(&video->req_ready)) {
453 		to_queue = list_first_entry(&video->req_ready,
454 			struct usb_request, list);
455 		list_del(&to_queue->list);
456 		list_add_tail(&req->list, &video->req_free);
457 		/*
458 		 * Queue work to the wq as well since it is possible that a
459 		 * buffer may not have been completely encoded with the set of
460 		 * in-flight usb requests for whih the complete callbacks are
461 		 * firing.
462 		 * In that case, if we do not queue work to the worker thread,
463 		 * the buffer will never be marked as complete - and therefore
464 		 * not be returned to userpsace. As a result,
465 		 * dequeue -> queue -> dequeue flow of uvc buffers will not
466 		 * happen.
467 		 */
468 		queue_work(video->async_wq, &video->pump);
469 	}
470 	/*
471 	 * Queue to the endpoint. The actual queueing to ep will
472 	 * only happen on one thread - the async_wq for bulk endpoints
473 	 * and this thread for isoc endpoints.
474 	 */
475 	ret = uvcg_video_usb_req_queue(video, to_queue, !is_bulk);
476 	if (ret < 0) {
477 		/*
478 		 * Endpoint error, but the stream is still enabled.
479 		 * Put request back in req_free for it to be cleaned
480 		 * up later.
481 		 */
482 		list_add_tail(&to_queue->list, &video->req_free);
483 		/*
484 		 * There is a new free request - wake up the pump.
485 		 */
486 		queue_work(video->async_wq, &video->pump);
487 	}
488 
489 	spin_unlock_irqrestore(&video->req_lock, flags);
490 }
491 
492 static int
uvc_video_free_requests(struct uvc_video * video)493 uvc_video_free_requests(struct uvc_video *video)
494 {
495 	struct uvc_request *ureq, *temp;
496 
497 	list_for_each_entry_safe(ureq, temp, &video->ureqs, list)
498 		uvc_video_free_request(ureq, video->ep);
499 
500 	INIT_LIST_HEAD(&video->ureqs);
501 	INIT_LIST_HEAD(&video->req_free);
502 	INIT_LIST_HEAD(&video->req_ready);
503 	video->req_size = 0;
504 	return 0;
505 }
506 
507 static int
uvc_video_alloc_requests(struct uvc_video * video)508 uvc_video_alloc_requests(struct uvc_video *video)
509 {
510 	struct uvc_request *ureq;
511 	unsigned int req_size;
512 	unsigned int i;
513 	int ret = -ENOMEM;
514 
515 	BUG_ON(video->req_size);
516 
517 	req_size = video->ep->maxpacket
518 		 * max_t(unsigned int, video->ep->maxburst, 1)
519 		 * (video->ep->mult);
520 
521 	for (i = 0; i < video->uvc_num_requests; i++) {
522 		ureq = kzalloc(sizeof(struct uvc_request), GFP_KERNEL);
523 		if (ureq == NULL)
524 			goto error;
525 
526 		INIT_LIST_HEAD(&ureq->list);
527 
528 		list_add_tail(&ureq->list, &video->ureqs);
529 
530 		ureq->req_buffer = kmalloc(req_size, GFP_KERNEL);
531 		if (ureq->req_buffer == NULL)
532 			goto error;
533 
534 		ureq->req = usb_ep_alloc_request(video->ep, GFP_KERNEL);
535 		if (ureq->req == NULL)
536 			goto error;
537 
538 		ureq->req->buf = ureq->req_buffer;
539 		ureq->req->length = 0;
540 		ureq->req->complete = uvc_video_complete;
541 		ureq->req->context = ureq;
542 		ureq->video = video;
543 		ureq->last_buf = NULL;
544 
545 		list_add_tail(&ureq->req->list, &video->req_free);
546 		/* req_size/PAGE_SIZE + 1 for overruns and + 1 for header */
547 		sg_alloc_table(&ureq->sgt,
548 			       DIV_ROUND_UP(req_size - UVCG_REQUEST_HEADER_LEN,
549 					    PAGE_SIZE) + 2, GFP_KERNEL);
550 	}
551 
552 	video->req_size = req_size;
553 
554 	return 0;
555 
556 error:
557 	uvc_video_free_requests(video);
558 	return ret;
559 }
560 
561 /* --------------------------------------------------------------------------
562  * Video streaming
563  */
564 
565 /*
566  * uvcg_video_pump - Pump video data into the USB requests
567  *
568  * This function fills the available USB requests (listed in req_free) with
569  * video data from the queued buffers.
570  */
uvcg_video_pump(struct work_struct * work)571 static void uvcg_video_pump(struct work_struct *work)
572 {
573 	struct uvc_video *video = container_of(work, struct uvc_video, pump);
574 	struct uvc_video_queue *queue = &video->queue;
575 	/* video->max_payload_size is only set when using bulk transfer */
576 	bool is_bulk = video->max_payload_size;
577 	struct usb_request *req = NULL;
578 	struct uvc_buffer *buf;
579 	unsigned long flags;
580 	int ret = 0;
581 
582 	while (true) {
583 		if (!video->ep->enabled)
584 			return;
585 
586 		/*
587 		 * Check is_enabled and retrieve the first available USB
588 		 * request, protected by the request lock.
589 		 */
590 		spin_lock_irqsave(&video->req_lock, flags);
591 		if (!video->is_enabled || list_empty(&video->req_free)) {
592 			spin_unlock_irqrestore(&video->req_lock, flags);
593 			return;
594 		}
595 		req = list_first_entry(&video->req_free, struct usb_request,
596 					list);
597 		list_del(&req->list);
598 		spin_unlock_irqrestore(&video->req_lock, flags);
599 
600 		/*
601 		 * Retrieve the first available video buffer and fill the
602 		 * request, protected by the video queue irqlock.
603 		 */
604 		spin_lock_irqsave(&queue->irqlock, flags);
605 		buf = uvcg_queue_head(queue);
606 		if (!buf) {
607 			/*
608 			 * Either the queue has been disconnected or no video buffer
609 			 * available for bulk transfer. Either way, stop processing
610 			 * further.
611 			 */
612 			spin_unlock_irqrestore(&queue->irqlock, flags);
613 			break;
614 		}
615 
616 		video->encode(req, video, buf);
617 
618 		spin_unlock_irqrestore(&queue->irqlock, flags);
619 
620 		spin_lock_irqsave(&video->req_lock, flags);
621 		/* For bulk end points we queue from the worker thread
622 		 * since we would preferably not want to wait on requests
623 		 * to be ready, in the uvcg_video_complete() handler.
624 		 * For isoc endpoints we add the request to the ready list
625 		 * and only queue it to the endpoint from the complete handler.
626 		 */
627 		ret = uvcg_video_usb_req_queue(video, req, is_bulk);
628 		spin_unlock_irqrestore(&video->req_lock, flags);
629 
630 		if (ret < 0) {
631 			uvcg_queue_cancel(queue, 0);
632 			break;
633 		}
634 	}
635 	spin_lock_irqsave(&video->req_lock, flags);
636 	if (video->is_enabled)
637 		list_add_tail(&req->list, &video->req_free);
638 	else
639 		uvc_video_free_request(req->context, video->ep);
640 	spin_unlock_irqrestore(&video->req_lock, flags);
641 }
642 
643 /*
644  * Disable the video stream
645  */
646 int
uvcg_video_disable(struct uvc_video * video)647 uvcg_video_disable(struct uvc_video *video)
648 {
649 	unsigned long flags;
650 	struct list_head inflight_bufs;
651 	struct usb_request *req, *temp;
652 	struct uvc_buffer *buf, *btemp;
653 	struct uvc_request *ureq, *utemp;
654 
655 	if (video->ep == NULL) {
656 		uvcg_info(&video->uvc->func,
657 			  "Video disable failed, device is uninitialized.\n");
658 		return -ENODEV;
659 	}
660 
661 	INIT_LIST_HEAD(&inflight_bufs);
662 	spin_lock_irqsave(&video->req_lock, flags);
663 	video->is_enabled = false;
664 
665 	/*
666 	 * Remove any in-flight buffers from the uvc_requests
667 	 * because we want to return them before cancelling the
668 	 * queue. This ensures that we aren't stuck waiting for
669 	 * all complete callbacks to come through before disabling
670 	 * vb2 queue.
671 	 */
672 	list_for_each_entry(ureq, &video->ureqs, list) {
673 		if (ureq->last_buf) {
674 			list_add_tail(&ureq->last_buf->queue, &inflight_bufs);
675 			ureq->last_buf = NULL;
676 		}
677 	}
678 	spin_unlock_irqrestore(&video->req_lock, flags);
679 
680 	cancel_work_sync(&video->pump);
681 	uvcg_queue_cancel(&video->queue, 0);
682 
683 	spin_lock_irqsave(&video->req_lock, flags);
684 	/*
685 	 * Remove all uvc_requests from ureqs with list_del_init
686 	 * This lets uvc_video_free_request correctly identify
687 	 * if the uvc_request is attached to a list or not when freeing
688 	 * memory.
689 	 */
690 	list_for_each_entry_safe(ureq, utemp, &video->ureqs, list)
691 		list_del_init(&ureq->list);
692 
693 	list_for_each_entry_safe(req, temp, &video->req_free, list) {
694 		list_del(&req->list);
695 		uvc_video_free_request(req->context, video->ep);
696 	}
697 
698 	list_for_each_entry_safe(req, temp, &video->req_ready, list) {
699 		list_del(&req->list);
700 		uvc_video_free_request(req->context, video->ep);
701 	}
702 
703 	INIT_LIST_HEAD(&video->ureqs);
704 	INIT_LIST_HEAD(&video->req_free);
705 	INIT_LIST_HEAD(&video->req_ready);
706 	video->req_size = 0;
707 	spin_unlock_irqrestore(&video->req_lock, flags);
708 
709 	/*
710 	 * Return all the video buffers before disabling the queue.
711 	 */
712 	spin_lock_irqsave(&video->queue.irqlock, flags);
713 	list_for_each_entry_safe(buf, btemp, &inflight_bufs, queue) {
714 		list_del(&buf->queue);
715 		uvcg_complete_buffer(&video->queue, buf);
716 	}
717 	spin_unlock_irqrestore(&video->queue.irqlock, flags);
718 
719 	uvcg_queue_enable(&video->queue, 0);
720 	return 0;
721 }
722 
723 /*
724  * Enable the video stream.
725  */
uvcg_video_enable(struct uvc_video * video)726 int uvcg_video_enable(struct uvc_video *video)
727 {
728 	int ret;
729 
730 	if (video->ep == NULL) {
731 		uvcg_info(&video->uvc->func,
732 			  "Video enable failed, device is uninitialized.\n");
733 		return -ENODEV;
734 	}
735 
736 	/*
737 	 * Safe to access request related fields without req_lock because
738 	 * this is the only thread currently active, and no other
739 	 * request handling thread will become active until this function
740 	 * returns.
741 	 */
742 	video->is_enabled = true;
743 
744 	if ((ret = uvcg_queue_enable(&video->queue, 1)) < 0)
745 		return ret;
746 
747 	if ((ret = uvc_video_alloc_requests(video)) < 0)
748 		return ret;
749 
750 	if (video->max_payload_size) {
751 		video->encode = uvc_video_encode_bulk;
752 		video->payload_size = 0;
753 	} else
754 		video->encode = video->queue.use_sg ?
755 			uvc_video_encode_isoc_sg : uvc_video_encode_isoc;
756 
757 	video->req_int_count = 0;
758 
759 	uvc_video_ep_queue_initial_requests(video);
760 	queue_work(video->async_wq, &video->pump);
761 
762 	return ret;
763 }
764 
765 /*
766  * Initialize the UVC video stream.
767  */
uvcg_video_init(struct uvc_video * video,struct uvc_device * uvc)768 int uvcg_video_init(struct uvc_video *video, struct uvc_device *uvc)
769 {
770 	video->is_enabled = false;
771 	INIT_LIST_HEAD(&video->ureqs);
772 	INIT_LIST_HEAD(&video->req_free);
773 	INIT_LIST_HEAD(&video->req_ready);
774 	spin_lock_init(&video->req_lock);
775 	INIT_WORK(&video->pump, uvcg_video_pump);
776 
777 	/* Allocate a work queue for asynchronous video pump handler. */
778 	video->async_wq = alloc_workqueue("uvcgadget", WQ_UNBOUND | WQ_HIGHPRI, 0);
779 	if (!video->async_wq)
780 		return -EINVAL;
781 
782 	video->uvc = uvc;
783 	video->fcc = V4L2_PIX_FMT_YUYV;
784 	video->bpp = 16;
785 	video->width = 320;
786 	video->height = 240;
787 	video->imagesize = 320 * 240 * 2;
788 
789 	/* Initialize the video buffers queue. */
790 	uvcg_queue_init(&video->queue, uvc->v4l2_dev.dev->parent,
791 			V4L2_BUF_TYPE_VIDEO_OUTPUT, &video->mutex);
792 	return 0;
793 }
794