1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "V4L2Decoder"
7
8 #include <v4l2_codec2/components/V4L2Decoder.h>
9
10 #include <stdint.h>
11
12 #include <algorithm>
13 #include <vector>
14
15 #include <base/bind.h>
16 #include <base/files/scoped_file.h>
17 #include <base/memory/ptr_util.h>
18 #include <log/log.h>
19
20 #include <v4l2_codec2/common/Common.h>
21 #include <v4l2_codec2/common/Fourcc.h>
22
23 namespace android {
24 namespace {
25
26 constexpr size_t kNumInputBuffers = 16;
27 // Extra buffers for transmitting in the whole video pipeline.
28 constexpr size_t kNumExtraOutputBuffers = 4;
29
30 // Currently we only support flexible pixel 420 format YCBCR_420_888 in Android.
31 // Here is the list of flexible 420 format.
32 constexpr std::initializer_list<uint32_t> kSupportedOutputFourccs = {
33 Fourcc::YU12, Fourcc::YV12, Fourcc::YM12, Fourcc::YM21,
34 Fourcc::NV12, Fourcc::NV21, Fourcc::NM12, Fourcc::NM21,
35 };
36
VideoCodecToV4L2PixFmt(VideoCodec codec)37 uint32_t VideoCodecToV4L2PixFmt(VideoCodec codec) {
38 switch (codec) {
39 case VideoCodec::H264:
40 return V4L2_PIX_FMT_H264;
41 case VideoCodec::VP8:
42 return V4L2_PIX_FMT_VP8;
43 case VideoCodec::VP9:
44 return V4L2_PIX_FMT_VP9;
45 }
46 }
47
48 } // namespace
49
50 // static
Create(const VideoCodec & codec,const size_t inputBufferSize,const size_t minNumOutputBuffers,GetPoolCB getPoolCb,OutputCB outputCb,ErrorCB errorCb,scoped_refptr<::base::SequencedTaskRunner> taskRunner)51 std::unique_ptr<VideoDecoder> V4L2Decoder::Create(
52 const VideoCodec& codec, const size_t inputBufferSize, const size_t minNumOutputBuffers,
53 GetPoolCB getPoolCb, OutputCB outputCb, ErrorCB errorCb,
54 scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
55 std::unique_ptr<V4L2Decoder> decoder =
56 ::base::WrapUnique<V4L2Decoder>(new V4L2Decoder(taskRunner));
57 if (!decoder->start(codec, inputBufferSize, minNumOutputBuffers, std::move(getPoolCb),
58 std::move(outputCb), std::move(errorCb))) {
59 return nullptr;
60 }
61 return decoder;
62 }
63
V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner)64 V4L2Decoder::V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner)
65 : mTaskRunner(std::move(taskRunner)) {
66 ALOGV("%s()", __func__);
67
68 mWeakThis = mWeakThisFactory.GetWeakPtr();
69 }
70
~V4L2Decoder()71 V4L2Decoder::~V4L2Decoder() {
72 ALOGV("%s()", __func__);
73 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
74
75 mWeakThisFactory.InvalidateWeakPtrs();
76
77 // Streamoff input and output queue.
78 if (mOutputQueue) {
79 mOutputQueue->streamoff();
80 mOutputQueue->deallocateBuffers();
81 mOutputQueue = nullptr;
82 }
83 if (mInputQueue) {
84 mInputQueue->streamoff();
85 mInputQueue->deallocateBuffers();
86 mInputQueue = nullptr;
87 }
88 if (mDevice) {
89 mDevice->stopPolling();
90 mDevice = nullptr;
91 }
92 }
93
start(const VideoCodec & codec,const size_t inputBufferSize,const size_t minNumOutputBuffers,GetPoolCB getPoolCb,OutputCB outputCb,ErrorCB errorCb)94 bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize,
95 const size_t minNumOutputBuffers, GetPoolCB getPoolCb, OutputCB outputCb,
96 ErrorCB errorCb) {
97 ALOGV("%s(codec=%s, inputBufferSize=%zu, minNumOutputBuffers=%zu)", __func__,
98 VideoCodecToString(codec), inputBufferSize, minNumOutputBuffers);
99 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
100
101 mMinNumOutputBuffers = minNumOutputBuffers;
102 mGetPoolCb = std::move(getPoolCb);
103 mOutputCb = std::move(outputCb);
104 mErrorCb = std::move(errorCb);
105
106 if (mState == State::Error) {
107 ALOGE("Ignore due to error state.");
108 return false;
109 }
110
111 mDevice = V4L2Device::create();
112
113 const uint32_t inputPixelFormat = VideoCodecToV4L2PixFmt(codec);
114 if (!mDevice->open(V4L2Device::Type::kDecoder, inputPixelFormat)) {
115 ALOGE("Failed to open device for %s", VideoCodecToString(codec));
116 return false;
117 }
118
119 if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
120 ALOGE("Device does not have VIDEO_M2M_MPLANE and STREAMING capabilities.");
121 return false;
122 }
123
124 struct v4l2_decoder_cmd cmd;
125 memset(&cmd, 0, sizeof(cmd));
126 cmd.cmd = V4L2_DEC_CMD_STOP;
127 if (mDevice->ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
128 ALOGE("Device does not support flushing (V4L2_DEC_CMD_STOP)");
129 return false;
130 }
131
132 // Subscribe to the resolution change event.
133 struct v4l2_event_subscription sub;
134 memset(&sub, 0, sizeof(sub));
135 sub.type = V4L2_EVENT_SOURCE_CHANGE;
136 if (mDevice->ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
137 ALOGE("ioctl() failed: VIDIOC_SUBSCRIBE_EVENT: V4L2_EVENT_SOURCE_CHANGE");
138 return false;
139 }
140
141 // Create Input/Output V4L2Queue, and setup input queue.
142 mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
143 mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
144 if (!mInputQueue || !mOutputQueue) {
145 ALOGE("Failed to create V4L2 queue.");
146 return false;
147 }
148 if (!setupInputFormat(inputPixelFormat, inputBufferSize)) {
149 ALOGE("Failed to setup input format.");
150 return false;
151 }
152
153 if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
154 ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
155 ALOGE("Failed to start polling V4L2 device.");
156 return false;
157 }
158
159 setState(State::Idle);
160 return true;
161 }
162
setupInputFormat(const uint32_t inputPixelFormat,const size_t inputBufferSize)163 bool V4L2Decoder::setupInputFormat(const uint32_t inputPixelFormat, const size_t inputBufferSize) {
164 ALOGV("%s(inputPixelFormat=%u, inputBufferSize=%zu)", __func__, inputPixelFormat,
165 inputBufferSize);
166 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
167
168 // Check if the format is supported.
169 std::vector<uint32_t> formats =
170 mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
171 if (std::find(formats.begin(), formats.end(), inputPixelFormat) == formats.end()) {
172 ALOGE("Input codec s not supported by device.");
173 return false;
174 }
175
176 // Setup the input format.
177 auto format = mInputQueue->setFormat(inputPixelFormat, ui::Size(), inputBufferSize, 0);
178 if (!format) {
179 ALOGE("Failed to call IOCTL to set input format.");
180 return false;
181 }
182 ALOG_ASSERT(format->fmt.pix_mp.pixelformat == inputPixelFormat);
183
184 if (mInputQueue->allocateBuffers(kNumInputBuffers, V4L2_MEMORY_DMABUF) == 0) {
185 ALOGE("Failed to allocate input buffer.");
186 return false;
187 }
188 if (!mInputQueue->streamon()) {
189 ALOGE("Failed to streamon input queue.");
190 return false;
191 }
192 return true;
193 }
194
decode(std::unique_ptr<ConstBitstreamBuffer> buffer,DecodeCB decodeCb)195 void V4L2Decoder::decode(std::unique_ptr<ConstBitstreamBuffer> buffer, DecodeCB decodeCb) {
196 ALOGV("%s(id=%d)", __func__, buffer->id);
197 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
198
199 if (mState == State::Error) {
200 ALOGE("Ignore due to error state.");
201 mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(decodeCb),
202 VideoDecoder::DecodeStatus::kError));
203 return;
204 }
205
206 if (mState == State::Idle) {
207 setState(State::Decoding);
208 }
209
210 mDecodeRequests.push(DecodeRequest(std::move(buffer), std::move(decodeCb)));
211 pumpDecodeRequest();
212 }
213
drain(DecodeCB drainCb)214 void V4L2Decoder::drain(DecodeCB drainCb) {
215 ALOGV("%s()", __func__);
216 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
217
218 switch (mState) {
219 case State::Idle:
220 ALOGV("Nothing need to drain, ignore.");
221 mTaskRunner->PostTask(
222 FROM_HERE, ::base::BindOnce(std::move(drainCb), VideoDecoder::DecodeStatus::kOk));
223 return;
224
225 case State::Decoding:
226 mDecodeRequests.push(DecodeRequest(nullptr, std::move(drainCb)));
227 pumpDecodeRequest();
228 return;
229
230 case State::Draining:
231 case State::Error:
232 ALOGE("Ignore due to wrong state: %s", StateToString(mState));
233 mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(drainCb),
234 VideoDecoder::DecodeStatus::kError));
235 return;
236 }
237 }
238
pumpDecodeRequest()239 void V4L2Decoder::pumpDecodeRequest() {
240 ALOGV("%s()", __func__);
241 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
242
243 if (mState != State::Decoding) return;
244
245 while (!mDecodeRequests.empty()) {
246 // Drain the decoder.
247 if (mDecodeRequests.front().buffer == nullptr) {
248 ALOGV("Get drain request.");
249 // Send the flush command after all input buffers are dequeued. This makes
250 // sure all previous resolution changes have been handled because the
251 // driver must hold the input buffer that triggers resolution change. The
252 // driver cannot decode data in it without new output buffers. If we send
253 // the flush now and a queued input buffer triggers resolution change
254 // later, the driver will send an output buffer that has
255 // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
256 // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
257 // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
258 // to the decoder.
259 if (mInputQueue->queuedBuffersCount() > 0) {
260 ALOGV("Wait for all input buffers dequeued.");
261 return;
262 }
263
264 auto request = std::move(mDecodeRequests.front());
265 mDecodeRequests.pop();
266
267 if (!sendV4L2DecoderCmd(false)) {
268 std::move(request.decodeCb).Run(VideoDecoder::DecodeStatus::kError);
269 onError();
270 return;
271 }
272 mDrainCb = std::move(request.decodeCb);
273 setState(State::Draining);
274 return;
275 }
276
277 // Pause if no free input buffer. We resume decoding after dequeueing input buffers.
278 auto inputBuffer = mInputQueue->getFreeBuffer();
279 if (!inputBuffer) {
280 ALOGV("There is no free input buffer.");
281 return;
282 }
283
284 auto request = std::move(mDecodeRequests.front());
285 mDecodeRequests.pop();
286
287 const int32_t bitstreamId = request.buffer->id;
288 ALOGV("QBUF to input queue, bitstreadId=%d", bitstreamId);
289 inputBuffer->setTimeStamp({.tv_sec = bitstreamId});
290 size_t planeSize = inputBuffer->getPlaneSize(0);
291 if (request.buffer->size > planeSize) {
292 ALOGE("The input size (%zu) is not enough, we need %zu", planeSize,
293 request.buffer->size);
294 onError();
295 return;
296 }
297
298 ALOGV("Set bytes_used=%zu, offset=%zu", request.buffer->offset + request.buffer->size,
299 request.buffer->offset);
300 inputBuffer->setPlaneDataOffset(0, request.buffer->offset);
301 inputBuffer->setPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
302 std::vector<int> fds;
303 fds.push_back(std::move(request.buffer->dmabuf.handle()->data[0]));
304 if (!std::move(*inputBuffer).queueDMABuf(fds)) {
305 ALOGE("%s(): Failed to QBUF to input queue, bitstreamId=%d", __func__, bitstreamId);
306 onError();
307 return;
308 }
309
310 mPendingDecodeCbs.insert(std::make_pair(bitstreamId, std::move(request.decodeCb)));
311 }
312 }
313
flush()314 void V4L2Decoder::flush() {
315 ALOGV("%s()", __func__);
316 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
317
318 if (mState == State::Idle) {
319 ALOGV("Nothing need to flush, ignore.");
320 return;
321 }
322 if (mState == State::Error) {
323 ALOGE("Ignore due to error state.");
324 return;
325 }
326
327 // Call all pending callbacks.
328 for (auto& item : mPendingDecodeCbs) {
329 std::move(item.second).Run(VideoDecoder::DecodeStatus::kAborted);
330 }
331 mPendingDecodeCbs.clear();
332 if (mDrainCb) {
333 std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kAborted);
334 }
335
336 // Streamoff both V4L2 queues to drop input and output buffers.
337 const bool isOutputStreaming = mOutputQueue->isStreaming();
338 mDevice->stopPolling();
339 mOutputQueue->streamoff();
340 mFrameAtDevice.clear();
341 mInputQueue->streamoff();
342
343 // Streamon both V4L2 queues.
344 mInputQueue->streamon();
345 if (isOutputStreaming) {
346 mOutputQueue->streamon();
347 }
348
349 // If there is no free buffer at mOutputQueue, tryFetchVideoFrame() should be triggerred after
350 // a buffer is DQBUF from output queue. Now all the buffers are dropped at mOutputQueue, we
351 // have to trigger tryFetchVideoFrame() here.
352 if (mVideoFramePool) {
353 tryFetchVideoFrame();
354 }
355
356 if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
357 ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
358 ALOGE("Failed to start polling V4L2 device.");
359 onError();
360 return;
361 }
362
363 setState(State::Idle);
364 }
365
serviceDeviceTask(bool event)366 void V4L2Decoder::serviceDeviceTask(bool event) {
367 ALOGV("%s(event=%d) state=%s InputQueue(%s):%zu+%zu/%zu, OutputQueue(%s):%zu+%zu/%zu", __func__,
368 event, StateToString(mState), (mInputQueue->isStreaming() ? "streamon" : "streamoff"),
369 mInputQueue->freeBuffersCount(), mInputQueue->queuedBuffersCount(),
370 mInputQueue->allocatedBuffersCount(),
371 (mOutputQueue->isStreaming() ? "streamon" : "streamoff"),
372 mOutputQueue->freeBuffersCount(), mOutputQueue->queuedBuffersCount(),
373 mOutputQueue->allocatedBuffersCount());
374 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
375
376 if (mState == State::Error) return;
377
378 // Dequeue output and input queue.
379 bool inputDequeued = false;
380 while (mInputQueue->queuedBuffersCount() > 0) {
381 bool success;
382 V4L2ReadableBufferRef dequeuedBuffer;
383 std::tie(success, dequeuedBuffer) = mInputQueue->dequeueBuffer();
384 if (!success) {
385 ALOGE("Failed to dequeue buffer from input queue.");
386 onError();
387 return;
388 }
389 if (!dequeuedBuffer) break;
390
391 inputDequeued = true;
392
393 // Run the corresponding decode callback.
394 int32_t id = dequeuedBuffer->getTimeStamp().tv_sec;
395 ALOGV("DQBUF from input queue, bitstreamId=%d", id);
396 auto it = mPendingDecodeCbs.find(id);
397 if (it == mPendingDecodeCbs.end()) {
398 ALOGW("Callback is already abandoned.");
399 continue;
400 }
401 std::move(it->second).Run(VideoDecoder::DecodeStatus::kOk);
402 mPendingDecodeCbs.erase(it);
403 }
404
405 bool outputDequeued = false;
406 while (mOutputQueue->queuedBuffersCount() > 0) {
407 bool success;
408 V4L2ReadableBufferRef dequeuedBuffer;
409 std::tie(success, dequeuedBuffer) = mOutputQueue->dequeueBuffer();
410 if (!success) {
411 ALOGE("Failed to dequeue buffer from output queue.");
412 onError();
413 return;
414 }
415 if (!dequeuedBuffer) break;
416
417 outputDequeued = true;
418
419 const size_t bufferId = dequeuedBuffer->bufferId();
420 const int32_t bitstreamId = static_cast<int32_t>(dequeuedBuffer->getTimeStamp().tv_sec);
421 const size_t bytesUsed = dequeuedBuffer->getPlaneBytesUsed(0);
422 const bool isLast = dequeuedBuffer->isLast();
423 ALOGV("DQBUF from output queue, bufferId=%zu, bitstreamId=%d, bytesused=%zu, isLast=%d",
424 bufferId, bitstreamId, bytesUsed, isLast);
425
426 // Get the corresponding VideoFrame of the dequeued buffer.
427 auto it = mFrameAtDevice.find(bufferId);
428 ALOG_ASSERT(it != mFrameAtDevice.end(), "buffer %zu is not found at mFrameAtDevice",
429 bufferId);
430 auto frame = std::move(it->second);
431 mFrameAtDevice.erase(it);
432
433 if (bytesUsed > 0) {
434 ALOGV("Send output frame(bitstreamId=%d) to client", bitstreamId);
435 frame->setBitstreamId(bitstreamId);
436 frame->setVisibleRect(mVisibleRect);
437 mOutputCb.Run(std::move(frame));
438 } else {
439 // Workaround(b/168750131): If the buffer is not enqueued before the next drain is done,
440 // then the driver will fail to notify EOS. So we recycle the buffer immediately.
441 ALOGV("Recycle empty buffer %zu back to V4L2 output queue.", bufferId);
442 dequeuedBuffer.reset();
443 auto outputBuffer = mOutputQueue->getFreeBuffer(bufferId);
444 ALOG_ASSERT(outputBuffer, "V4L2 output queue slot %zu is not freed.", bufferId);
445
446 if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
447 ALOGE("%s(): Failed to recycle empty buffer to output queue.", __func__);
448 onError();
449 return;
450 }
451 mFrameAtDevice.insert(std::make_pair(bufferId, std::move(frame)));
452 }
453
454 if (mDrainCb && isLast) {
455 ALOGV("All buffers are drained.");
456 sendV4L2DecoderCmd(true);
457 std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kOk);
458 setState(State::Idle);
459 }
460 }
461
462 // Handle resolution change event.
463 if (event && dequeueResolutionChangeEvent()) {
464 if (!changeResolution()) {
465 onError();
466 return;
467 }
468 }
469
470 // We freed some input buffers, continue handling decode requests.
471 if (inputDequeued) {
472 mTaskRunner->PostTask(FROM_HERE,
473 ::base::BindOnce(&V4L2Decoder::pumpDecodeRequest, mWeakThis));
474 }
475 // We free some output buffers, try to get VideoFrame.
476 if (outputDequeued) {
477 mTaskRunner->PostTask(FROM_HERE,
478 ::base::BindOnce(&V4L2Decoder::tryFetchVideoFrame, mWeakThis));
479 }
480 }
481
dequeueResolutionChangeEvent()482 bool V4L2Decoder::dequeueResolutionChangeEvent() {
483 ALOGV("%s()", __func__);
484 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
485
486 struct v4l2_event ev;
487 memset(&ev, 0, sizeof(ev));
488 while (mDevice->ioctl(VIDIOC_DQEVENT, &ev) == 0) {
489 if (ev.type == V4L2_EVENT_SOURCE_CHANGE &&
490 ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
491 return true;
492 }
493 }
494 return false;
495 }
496
changeResolution()497 bool V4L2Decoder::changeResolution() {
498 ALOGV("%s()", __func__);
499 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
500
501 const std::optional<struct v4l2_format> format = getFormatInfo();
502 std::optional<size_t> numOutputBuffers = getNumOutputBuffers();
503 if (!format || !numOutputBuffers) {
504 return false;
505 }
506 *numOutputBuffers = std::max(*numOutputBuffers, mMinNumOutputBuffers);
507
508 const ui::Size codedSize(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
509 if (!setupOutputFormat(codedSize)) {
510 return false;
511 }
512
513 const std::optional<struct v4l2_format> adjustedFormat = getFormatInfo();
514 if (!adjustedFormat) {
515 return false;
516 }
517 mCodedSize.set(adjustedFormat->fmt.pix_mp.width, adjustedFormat->fmt.pix_mp.height);
518 mVisibleRect = getVisibleRect(mCodedSize);
519
520 ALOGI("Need %zu output buffers. coded size: %s, visible rect: %s", *numOutputBuffers,
521 toString(mCodedSize).c_str(), toString(mVisibleRect).c_str());
522 if (isEmpty(mCodedSize)) {
523 ALOGE("Failed to get resolution from V4L2 driver.");
524 return false;
525 }
526
527 mOutputQueue->streamoff();
528 mOutputQueue->deallocateBuffers();
529 mFrameAtDevice.clear();
530 mBlockIdToV4L2Id.clear();
531
532 const size_t adjustedNumOutputBuffers =
533 mOutputQueue->allocateBuffers(*numOutputBuffers, V4L2_MEMORY_DMABUF);
534 if (adjustedNumOutputBuffers == 0) {
535 ALOGE("Failed to allocate output buffer.");
536 return false;
537 }
538 ALOGV("Allocated %zu output buffers.", adjustedNumOutputBuffers);
539 if (!mOutputQueue->streamon()) {
540 ALOGE("Failed to streamon output queue.");
541 return false;
542 }
543
544 // Release the previous VideoFramePool before getting a new one to guarantee only one pool
545 // exists at the same time.
546 mVideoFramePool.reset();
547 // Always use flexible pixel 420 format YCBCR_420_888 in Android.
548 mVideoFramePool =
549 mGetPoolCb.Run(mCodedSize, HalPixelFormat::YCBCR_420_888, adjustedNumOutputBuffers);
550 if (!mVideoFramePool) {
551 ALOGE("Failed to get block pool with size: %s", toString(mCodedSize).c_str());
552 return false;
553 }
554
555 tryFetchVideoFrame();
556 return true;
557 }
558
setupOutputFormat(const ui::Size & size)559 bool V4L2Decoder::setupOutputFormat(const ui::Size& size) {
560 for (const uint32_t& pixfmt :
561 mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
562 if (std::find(kSupportedOutputFourccs.begin(), kSupportedOutputFourccs.end(), pixfmt) ==
563 kSupportedOutputFourccs.end()) {
564 ALOGD("Pixel format %s is not supported, skipping...", fourccToString(pixfmt).c_str());
565 continue;
566 }
567
568 if (mOutputQueue->setFormat(pixfmt, size, 0) != std::nullopt) {
569 return true;
570 }
571 }
572
573 ALOGE("Failed to find supported pixel format");
574 return false;
575 }
576
tryFetchVideoFrame()577 void V4L2Decoder::tryFetchVideoFrame() {
578 ALOGV("%s()", __func__);
579 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
580
581 if (!mVideoFramePool) {
582 ALOGE("mVideoFramePool is null, failed to get the instance after resolution change?");
583 onError();
584 return;
585 }
586
587 if (mOutputQueue->freeBuffersCount() == 0) {
588 ALOGV("No free V4L2 output buffers, ignore.");
589 return;
590 }
591
592 if (!mVideoFramePool->getVideoFrame(
593 ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis))) {
594 ALOGV("%s(): Previous callback is running, ignore.", __func__);
595 }
596 }
597
onVideoFrameReady(std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId)598 void V4L2Decoder::onVideoFrameReady(
599 std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId) {
600 ALOGV("%s()", __func__);
601 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
602
603 if (!frameWithBlockId) {
604 ALOGE("Got nullptr VideoFrame.");
605 onError();
606 return;
607 }
608
609 // Unwrap our arguments.
610 std::unique_ptr<VideoFrame> frame;
611 uint32_t blockId;
612 std::tie(frame, blockId) = std::move(*frameWithBlockId);
613
614 std::optional<V4L2WritableBufferRef> outputBuffer;
615 // Find the V4L2 buffer that is associated with this block.
616 auto iter = mBlockIdToV4L2Id.find(blockId);
617 if (iter != mBlockIdToV4L2Id.end()) {
618 // If we have met this block in the past, reuse the same V4L2 buffer.
619 outputBuffer = mOutputQueue->getFreeBuffer(iter->second);
620 } else if (mBlockIdToV4L2Id.size() < mOutputQueue->allocatedBuffersCount()) {
621 // If this is the first time we see this block, give it the next
622 // available V4L2 buffer.
623 const size_t v4l2BufferId = mBlockIdToV4L2Id.size();
624 mBlockIdToV4L2Id.emplace(blockId, v4l2BufferId);
625 outputBuffer = mOutputQueue->getFreeBuffer(v4l2BufferId);
626 } else {
627 // If this happens, this is a bug in VideoFramePool. It should never
628 // provide more blocks than we have V4L2 buffers.
629 ALOGE("Got more different blocks than we have V4L2 buffers for.");
630 }
631
632 if (!outputBuffer) {
633 ALOGE("V4L2 buffer not available. blockId=%u", blockId);
634 onError();
635 return;
636 }
637
638 uint32_t v4l2Id = outputBuffer->bufferId();
639 ALOGV("QBUF to output queue, blockId=%u, V4L2Id=%u", blockId, v4l2Id);
640
641 if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
642 ALOGE("%s(): Failed to QBUF to output queue, blockId=%u, V4L2Id=%u", __func__, blockId,
643 v4l2Id);
644 onError();
645 return;
646 }
647 if (mFrameAtDevice.find(v4l2Id) != mFrameAtDevice.end()) {
648 ALOGE("%s(): V4L2 buffer %d already enqueued.", __func__, v4l2Id);
649 onError();
650 return;
651 }
652 mFrameAtDevice.insert(std::make_pair(v4l2Id, std::move(frame)));
653
654 tryFetchVideoFrame();
655 }
656
getNumOutputBuffers()657 std::optional<size_t> V4L2Decoder::getNumOutputBuffers() {
658 ALOGV("%s()", __func__);
659 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
660
661 struct v4l2_control ctrl;
662 memset(&ctrl, 0, sizeof(ctrl));
663 ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
664 if (mDevice->ioctl(VIDIOC_G_CTRL, &ctrl) != 0) {
665 ALOGE("ioctl() failed: VIDIOC_G_CTRL");
666 return std::nullopt;
667 }
668 ALOGV("%s() V4L2_CID_MIN_BUFFERS_FOR_CAPTURE returns %u", __func__, ctrl.value);
669
670 return ctrl.value + kNumExtraOutputBuffers;
671 }
672
getFormatInfo()673 std::optional<struct v4l2_format> V4L2Decoder::getFormatInfo() {
674 ALOGV("%s()", __func__);
675 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
676
677 struct v4l2_format format;
678 memset(&format, 0, sizeof(format));
679 format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
680 if (mDevice->ioctl(VIDIOC_G_FMT, &format) != 0) {
681 ALOGE("ioctl() failed: VIDIOC_G_FMT");
682 return std::nullopt;
683 }
684
685 return format;
686 }
687
getVisibleRect(const ui::Size & codedSize)688 Rect V4L2Decoder::getVisibleRect(const ui::Size& codedSize) {
689 ALOGV("%s()", __func__);
690 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
691
692 struct v4l2_rect* visible_rect = nullptr;
693 struct v4l2_selection selection_arg;
694 memset(&selection_arg, 0, sizeof(selection_arg));
695 selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
696 selection_arg.target = V4L2_SEL_TGT_COMPOSE;
697
698 if (mDevice->ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
699 ALOGV("VIDIOC_G_SELECTION is supported");
700 visible_rect = &selection_arg.r;
701 } else {
702 ALOGV("Fallback to VIDIOC_G_CROP");
703 struct v4l2_crop crop_arg;
704 memset(&crop_arg, 0, sizeof(crop_arg));
705 crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
706
707 if (mDevice->ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
708 ALOGW("ioctl() VIDIOC_G_CROP failed");
709 return Rect(codedSize.width, codedSize.height);
710 }
711 visible_rect = &crop_arg.c;
712 }
713
714 Rect rect(visible_rect->left, visible_rect->top, visible_rect->left + visible_rect->width,
715 visible_rect->top + visible_rect->height);
716 ALOGV("visible rectangle is %s", toString(rect).c_str());
717 if (!contains(Rect(codedSize.width, codedSize.height), rect)) {
718 ALOGW("visible rectangle %s is not inside coded size %s", toString(rect).c_str(),
719 toString(codedSize).c_str());
720 return Rect(codedSize.width, codedSize.height);
721 }
722 if (rect.isEmpty()) {
723 ALOGW("visible size is empty");
724 return Rect(codedSize.width, codedSize.height);
725 }
726
727 return rect;
728 }
729
sendV4L2DecoderCmd(bool start)730 bool V4L2Decoder::sendV4L2DecoderCmd(bool start) {
731 ALOGV("%s(start=%d)", __func__, start);
732 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
733
734 struct v4l2_decoder_cmd cmd;
735 memset(&cmd, 0, sizeof(cmd));
736 cmd.cmd = start ? V4L2_DEC_CMD_START : V4L2_DEC_CMD_STOP;
737 if (mDevice->ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
738 ALOGE("ioctl() VIDIOC_DECODER_CMD failed: start=%d", start);
739 return false;
740 }
741
742 return true;
743 }
744
onError()745 void V4L2Decoder::onError() {
746 ALOGV("%s()", __func__);
747 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
748
749 setState(State::Error);
750 mErrorCb.Run();
751 }
752
setState(State newState)753 void V4L2Decoder::setState(State newState) {
754 ALOGV("%s(%s)", __func__, StateToString(newState));
755 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
756
757 if (mState == newState) return;
758 if (mState == State::Error) {
759 ALOGV("Already in Error state.");
760 return;
761 }
762
763 switch (newState) {
764 case State::Idle:
765 break;
766 case State::Decoding:
767 break;
768 case State::Draining:
769 if (mState != State::Decoding) newState = State::Error;
770 break;
771 case State::Error:
772 break;
773 }
774
775 ALOGI("Set state %s => %s", StateToString(mState), StateToString(newState));
776 mState = newState;
777 }
778
779 // static
StateToString(State state)780 const char* V4L2Decoder::StateToString(State state) {
781 switch (state) {
782 case State::Idle:
783 return "Idle";
784 case State::Decoding:
785 return "Decoding";
786 case State::Draining:
787 return "Draining";
788 case State::Error:
789 return "Error";
790 }
791 }
792
793 } // namespace android
794