1 // Copyright 2020 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "V4L2Decoder"
7
8 #include <v4l2_codec2/components/V4L2Decoder.h>
9
10 #include <stdint.h>
11
12 #include <algorithm>
13 #include <vector>
14
15 #include <base/bind.h>
16 #include <base/files/scoped_file.h>
17 #include <base/memory/ptr_util.h>
18 #include <log/log.h>
19
20 #include <v4l2_codec2/common/Common.h>
21 #include <v4l2_codec2/common/Fourcc.h>
22
23 namespace android {
24 namespace {
25
26 constexpr size_t kNumInputBuffers = 16;
27 // Extra buffers for transmitting in the whole video pipeline.
28 constexpr size_t kNumExtraOutputBuffers = 4;
29
30 // Currently we only support flexible pixel 420 format YCBCR_420_888 in Android.
31 // Here is the list of flexible 420 format.
32 constexpr std::initializer_list<uint32_t> kSupportedOutputFourccs = {
33 Fourcc::YU12, Fourcc::YV12, Fourcc::YM12, Fourcc::YM21,
34 Fourcc::NV12, Fourcc::NV21, Fourcc::NM12, Fourcc::NM21,
35 };
36
VideoCodecToV4L2PixFmt(VideoCodec codec)37 uint32_t VideoCodecToV4L2PixFmt(VideoCodec codec) {
38 switch (codec) {
39 case VideoCodec::H264:
40 return V4L2_PIX_FMT_H264;
41 case VideoCodec::VP8:
42 return V4L2_PIX_FMT_VP8;
43 case VideoCodec::VP9:
44 return V4L2_PIX_FMT_VP9;
45 case VideoCodec::HEVC:
46 return V4L2_PIX_FMT_HEVC;
47 }
48 }
49
50 } // namespace
51
52 // static
Create(const VideoCodec & codec,const size_t inputBufferSize,const size_t minNumOutputBuffers,GetPoolCB getPoolCb,OutputCB outputCb,ErrorCB errorCb,scoped_refptr<::base::SequencedTaskRunner> taskRunner)53 std::unique_ptr<VideoDecoder> V4L2Decoder::Create(
54 const VideoCodec& codec, const size_t inputBufferSize, const size_t minNumOutputBuffers,
55 GetPoolCB getPoolCb, OutputCB outputCb, ErrorCB errorCb,
56 scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
57 std::unique_ptr<V4L2Decoder> decoder =
58 ::base::WrapUnique<V4L2Decoder>(new V4L2Decoder(taskRunner));
59 if (!decoder->start(codec, inputBufferSize, minNumOutputBuffers, std::move(getPoolCb),
60 std::move(outputCb), std::move(errorCb))) {
61 return nullptr;
62 }
63 return decoder;
64 }
65
V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner)66 V4L2Decoder::V4L2Decoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner)
67 : mTaskRunner(std::move(taskRunner)) {
68 ALOGV("%s()", __func__);
69
70 mWeakThis = mWeakThisFactory.GetWeakPtr();
71 }
72
~V4L2Decoder()73 V4L2Decoder::~V4L2Decoder() {
74 ALOGV("%s()", __func__);
75 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
76
77 mWeakThisFactory.InvalidateWeakPtrs();
78
79 // Streamoff input and output queue.
80 if (mOutputQueue) {
81 mOutputQueue->streamoff();
82 mOutputQueue->deallocateBuffers();
83 mOutputQueue = nullptr;
84 }
85 if (mInputQueue) {
86 mInputQueue->streamoff();
87 mInputQueue->deallocateBuffers();
88 mInputQueue = nullptr;
89 }
90 if (mDevice) {
91 mDevice->stopPolling();
92 mDevice = nullptr;
93 }
94 }
95
start(const VideoCodec & codec,const size_t inputBufferSize,const size_t minNumOutputBuffers,GetPoolCB getPoolCb,OutputCB outputCb,ErrorCB errorCb)96 bool V4L2Decoder::start(const VideoCodec& codec, const size_t inputBufferSize,
97 const size_t minNumOutputBuffers, GetPoolCB getPoolCb, OutputCB outputCb,
98 ErrorCB errorCb) {
99 ALOGV("%s(codec=%s, inputBufferSize=%zu, minNumOutputBuffers=%zu)", __func__,
100 VideoCodecToString(codec), inputBufferSize, minNumOutputBuffers);
101 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
102
103 mMinNumOutputBuffers = minNumOutputBuffers;
104 mGetPoolCb = std::move(getPoolCb);
105 mOutputCb = std::move(outputCb);
106 mErrorCb = std::move(errorCb);
107
108 if (mState == State::Error) {
109 ALOGE("Ignore due to error state.");
110 return false;
111 }
112
113 mDevice = V4L2Device::create();
114
115 const uint32_t inputPixelFormat = VideoCodecToV4L2PixFmt(codec);
116 if (!mDevice->open(V4L2Device::Type::kDecoder, inputPixelFormat)) {
117 ALOGE("Failed to open device for %s", VideoCodecToString(codec));
118 return false;
119 }
120
121 if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
122 ALOGE("Device does not have VIDEO_M2M_MPLANE and STREAMING capabilities.");
123 return false;
124 }
125
126 struct v4l2_decoder_cmd cmd;
127 memset(&cmd, 0, sizeof(cmd));
128 cmd.cmd = V4L2_DEC_CMD_STOP;
129 if (mDevice->ioctl(VIDIOC_TRY_DECODER_CMD, &cmd) != 0) {
130 ALOGE("Device does not support flushing (V4L2_DEC_CMD_STOP)");
131 return false;
132 }
133
134 // Subscribe to the resolution change event.
135 struct v4l2_event_subscription sub;
136 memset(&sub, 0, sizeof(sub));
137 sub.type = V4L2_EVENT_SOURCE_CHANGE;
138 if (mDevice->ioctl(VIDIOC_SUBSCRIBE_EVENT, &sub) != 0) {
139 ALOGE("ioctl() failed: VIDIOC_SUBSCRIBE_EVENT: V4L2_EVENT_SOURCE_CHANGE");
140 return false;
141 }
142
143 // Create Input/Output V4L2Queue, and setup input queue.
144 mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
145 mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
146 if (!mInputQueue || !mOutputQueue) {
147 ALOGE("Failed to create V4L2 queue.");
148 return false;
149 }
150 if (!setupInputFormat(inputPixelFormat, inputBufferSize)) {
151 ALOGE("Failed to setup input format.");
152 return false;
153 }
154
155 if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
156 ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
157 ALOGE("Failed to start polling V4L2 device.");
158 return false;
159 }
160
161 setState(State::Idle);
162 return true;
163 }
164
setupInputFormat(const uint32_t inputPixelFormat,const size_t inputBufferSize)165 bool V4L2Decoder::setupInputFormat(const uint32_t inputPixelFormat, const size_t inputBufferSize) {
166 ALOGV("%s(inputPixelFormat=%u, inputBufferSize=%zu)", __func__, inputPixelFormat,
167 inputBufferSize);
168 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
169
170 // Check if the format is supported.
171 std::vector<uint32_t> formats =
172 mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
173 if (std::find(formats.begin(), formats.end(), inputPixelFormat) == formats.end()) {
174 ALOGE("Input codec s not supported by device.");
175 return false;
176 }
177
178 // Setup the input format.
179 auto format = mInputQueue->setFormat(inputPixelFormat, ui::Size(), inputBufferSize, 0);
180 if (!format) {
181 ALOGE("Failed to call IOCTL to set input format.");
182 return false;
183 }
184 ALOG_ASSERT(format->fmt.pix_mp.pixelformat == inputPixelFormat);
185
186 if (mInputQueue->allocateBuffers(kNumInputBuffers, V4L2_MEMORY_DMABUF) == 0) {
187 ALOGE("Failed to allocate input buffer.");
188 return false;
189 }
190 if (!mInputQueue->streamon()) {
191 ALOGE("Failed to streamon input queue.");
192 return false;
193 }
194 return true;
195 }
196
decode(std::unique_ptr<ConstBitstreamBuffer> buffer,DecodeCB decodeCb)197 void V4L2Decoder::decode(std::unique_ptr<ConstBitstreamBuffer> buffer, DecodeCB decodeCb) {
198 ALOGV("%s(id=%d)", __func__, buffer->id);
199 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
200
201 if (mState == State::Error) {
202 ALOGE("Ignore due to error state.");
203 mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(decodeCb),
204 VideoDecoder::DecodeStatus::kError));
205 return;
206 }
207
208 if (mState == State::Idle) {
209 setState(State::Decoding);
210 }
211
212 mDecodeRequests.push(DecodeRequest(std::move(buffer), std::move(decodeCb)));
213 pumpDecodeRequest();
214 }
215
drain(DecodeCB drainCb)216 void V4L2Decoder::drain(DecodeCB drainCb) {
217 ALOGV("%s()", __func__);
218 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
219
220 switch (mState) {
221 case State::Idle:
222 ALOGV("Nothing need to drain, ignore.");
223 mTaskRunner->PostTask(
224 FROM_HERE, ::base::BindOnce(std::move(drainCb), VideoDecoder::DecodeStatus::kOk));
225 return;
226
227 case State::Decoding:
228 mDecodeRequests.push(DecodeRequest(nullptr, std::move(drainCb)));
229 pumpDecodeRequest();
230 return;
231
232 case State::Draining:
233 case State::Error:
234 ALOGE("Ignore due to wrong state: %s", StateToString(mState));
235 mTaskRunner->PostTask(FROM_HERE, ::base::BindOnce(std::move(drainCb),
236 VideoDecoder::DecodeStatus::kError));
237 return;
238 }
239 }
240
pumpDecodeRequest()241 void V4L2Decoder::pumpDecodeRequest() {
242 ALOGV("%s()", __func__);
243 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
244
245 if (mState != State::Decoding) return;
246
247 while (!mDecodeRequests.empty()) {
248 // Drain the decoder.
249 if (mDecodeRequests.front().buffer == nullptr) {
250 ALOGV("Get drain request.");
251 // Send the flush command after all input buffers are dequeued. This makes
252 // sure all previous resolution changes have been handled because the
253 // driver must hold the input buffer that triggers resolution change. The
254 // driver cannot decode data in it without new output buffers. If we send
255 // the flush now and a queued input buffer triggers resolution change
256 // later, the driver will send an output buffer that has
257 // V4L2_BUF_FLAG_LAST. But some queued input buffer have not been decoded
258 // yet. Also, V4L2VDA calls STREAMOFF and STREAMON after resolution
259 // change. They implicitly send a V4L2_DEC_CMD_STOP and V4L2_DEC_CMD_START
260 // to the decoder.
261 if (mInputQueue->queuedBuffersCount() > 0) {
262 ALOGV("Wait for all input buffers dequeued.");
263 return;
264 }
265
266 auto request = std::move(mDecodeRequests.front());
267 mDecodeRequests.pop();
268
269 if (!sendV4L2DecoderCmd(false)) {
270 std::move(request.decodeCb).Run(VideoDecoder::DecodeStatus::kError);
271 onError();
272 return;
273 }
274 mDrainCb = std::move(request.decodeCb);
275 setState(State::Draining);
276 return;
277 }
278
279 // Pause if no free input buffer. We resume decoding after dequeueing input buffers.
280 auto inputBuffer = mInputQueue->getFreeBuffer();
281 if (!inputBuffer) {
282 ALOGV("There is no free input buffer.");
283 return;
284 }
285
286 auto request = std::move(mDecodeRequests.front());
287 mDecodeRequests.pop();
288
289 const int32_t bitstreamId = request.buffer->id;
290 ALOGV("QBUF to input queue, bitstreadId=%d", bitstreamId);
291 inputBuffer->setTimeStamp({.tv_sec = bitstreamId});
292 size_t planeSize = inputBuffer->getPlaneSize(0);
293 if (request.buffer->size > planeSize) {
294 ALOGE("The input size (%zu) is not enough, we need %zu", planeSize,
295 request.buffer->size);
296 onError();
297 return;
298 }
299
300 ALOGV("Set bytes_used=%zu, offset=%zu", request.buffer->offset + request.buffer->size,
301 request.buffer->offset);
302 inputBuffer->setPlaneDataOffset(0, request.buffer->offset);
303 inputBuffer->setPlaneBytesUsed(0, request.buffer->offset + request.buffer->size);
304 std::vector<int> fds;
305 fds.push_back(std::move(request.buffer->dmabuf.handle()->data[0]));
306 if (!std::move(*inputBuffer).queueDMABuf(fds)) {
307 ALOGE("%s(): Failed to QBUF to input queue, bitstreamId=%d", __func__, bitstreamId);
308 onError();
309 return;
310 }
311
312 mPendingDecodeCbs.insert(std::make_pair(bitstreamId, std::move(request.decodeCb)));
313 }
314 }
315
flush()316 void V4L2Decoder::flush() {
317 ALOGV("%s()", __func__);
318 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
319
320 if (mState == State::Idle) {
321 ALOGV("Nothing need to flush, ignore.");
322 return;
323 }
324 if (mState == State::Error) {
325 ALOGE("Ignore due to error state.");
326 return;
327 }
328
329 // Call all pending callbacks.
330 for (auto& item : mPendingDecodeCbs) {
331 std::move(item.second).Run(VideoDecoder::DecodeStatus::kAborted);
332 }
333 mPendingDecodeCbs.clear();
334 if (mDrainCb) {
335 std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kAborted);
336 }
337
338 // Streamoff both V4L2 queues to drop input and output buffers.
339 const bool isOutputStreaming = mOutputQueue->isStreaming();
340 mDevice->stopPolling();
341 mOutputQueue->streamoff();
342 mFrameAtDevice.clear();
343 mInputQueue->streamoff();
344
345 // Streamon both V4L2 queues.
346 mInputQueue->streamon();
347 if (isOutputStreaming) {
348 mOutputQueue->streamon();
349 }
350
351 // If there is no free buffer at mOutputQueue, tryFetchVideoFrame() should be triggerred after
352 // a buffer is DQBUF from output queue. Now all the buffers are dropped at mOutputQueue, we
353 // have to trigger tryFetchVideoFrame() here.
354 if (mVideoFramePool) {
355 tryFetchVideoFrame();
356 }
357
358 if (!mDevice->startPolling(::base::BindRepeating(&V4L2Decoder::serviceDeviceTask, mWeakThis),
359 ::base::BindRepeating(&V4L2Decoder::onError, mWeakThis))) {
360 ALOGE("Failed to start polling V4L2 device.");
361 onError();
362 return;
363 }
364
365 setState(State::Idle);
366 }
367
serviceDeviceTask(bool event)368 void V4L2Decoder::serviceDeviceTask(bool event) {
369 ALOGV("%s(event=%d) state=%s InputQueue(%s):%zu+%zu/%zu, OutputQueue(%s):%zu+%zu/%zu", __func__,
370 event, StateToString(mState), (mInputQueue->isStreaming() ? "streamon" : "streamoff"),
371 mInputQueue->freeBuffersCount(), mInputQueue->queuedBuffersCount(),
372 mInputQueue->allocatedBuffersCount(),
373 (mOutputQueue->isStreaming() ? "streamon" : "streamoff"),
374 mOutputQueue->freeBuffersCount(), mOutputQueue->queuedBuffersCount(),
375 mOutputQueue->allocatedBuffersCount());
376 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
377
378 if (mState == State::Error) return;
379
380 // Dequeue output and input queue.
381 bool inputDequeued = false;
382 while (mInputQueue->queuedBuffersCount() > 0) {
383 bool success;
384 V4L2ReadableBufferRef dequeuedBuffer;
385 std::tie(success, dequeuedBuffer) = mInputQueue->dequeueBuffer();
386 if (!success) {
387 ALOGE("Failed to dequeue buffer from input queue.");
388 onError();
389 return;
390 }
391 if (!dequeuedBuffer) break;
392
393 inputDequeued = true;
394
395 // Run the corresponding decode callback.
396 int32_t id = dequeuedBuffer->getTimeStamp().tv_sec;
397 ALOGV("DQBUF from input queue, bitstreamId=%d", id);
398 auto it = mPendingDecodeCbs.find(id);
399 if (it == mPendingDecodeCbs.end()) {
400 ALOGW("Callback is already abandoned.");
401 continue;
402 }
403 std::move(it->second).Run(VideoDecoder::DecodeStatus::kOk);
404 mPendingDecodeCbs.erase(it);
405 }
406
407 bool outputDequeued = false;
408 while (mOutputQueue->queuedBuffersCount() > 0) {
409 bool success;
410 V4L2ReadableBufferRef dequeuedBuffer;
411 std::tie(success, dequeuedBuffer) = mOutputQueue->dequeueBuffer();
412 if (!success) {
413 ALOGE("Failed to dequeue buffer from output queue.");
414 onError();
415 return;
416 }
417 if (!dequeuedBuffer) break;
418
419 outputDequeued = true;
420
421 const size_t bufferId = dequeuedBuffer->bufferId();
422 const int32_t bitstreamId = static_cast<int32_t>(dequeuedBuffer->getTimeStamp().tv_sec);
423 const size_t bytesUsed = dequeuedBuffer->getPlaneBytesUsed(0);
424 const bool isLast = dequeuedBuffer->isLast();
425 ALOGV("DQBUF from output queue, bufferId=%zu, bitstreamId=%d, bytesused=%zu, isLast=%d",
426 bufferId, bitstreamId, bytesUsed, isLast);
427
428 // Get the corresponding VideoFrame of the dequeued buffer.
429 auto it = mFrameAtDevice.find(bufferId);
430 ALOG_ASSERT(it != mFrameAtDevice.end(), "buffer %zu is not found at mFrameAtDevice",
431 bufferId);
432 auto frame = std::move(it->second);
433 mFrameAtDevice.erase(it);
434
435 if (bytesUsed > 0) {
436 ALOGV("Send output frame(bitstreamId=%d) to client", bitstreamId);
437 frame->setBitstreamId(bitstreamId);
438 frame->setVisibleRect(mVisibleRect);
439 mOutputCb.Run(std::move(frame));
440 } else {
441 // Workaround(b/168750131): If the buffer is not enqueued before the next drain is done,
442 // then the driver will fail to notify EOS. So we recycle the buffer immediately.
443 ALOGV("Recycle empty buffer %zu back to V4L2 output queue.", bufferId);
444 dequeuedBuffer.reset();
445 auto outputBuffer = mOutputQueue->getFreeBuffer(bufferId);
446 ALOG_ASSERT(outputBuffer, "V4L2 output queue slot %zu is not freed.", bufferId);
447
448 if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
449 ALOGE("%s(): Failed to recycle empty buffer to output queue.", __func__);
450 onError();
451 return;
452 }
453 mFrameAtDevice.insert(std::make_pair(bufferId, std::move(frame)));
454 }
455
456 if (mDrainCb && isLast) {
457 ALOGV("All buffers are drained.");
458 sendV4L2DecoderCmd(true);
459 std::move(mDrainCb).Run(VideoDecoder::DecodeStatus::kOk);
460 setState(State::Idle);
461 }
462 }
463
464 // Handle resolution change event.
465 if (event && dequeueResolutionChangeEvent()) {
466 if (!changeResolution()) {
467 onError();
468 return;
469 }
470 }
471
472 // We freed some input buffers, continue handling decode requests.
473 if (inputDequeued) {
474 mTaskRunner->PostTask(FROM_HERE,
475 ::base::BindOnce(&V4L2Decoder::pumpDecodeRequest, mWeakThis));
476 }
477 // We free some output buffers, try to get VideoFrame.
478 if (outputDequeued) {
479 mTaskRunner->PostTask(FROM_HERE,
480 ::base::BindOnce(&V4L2Decoder::tryFetchVideoFrame, mWeakThis));
481 }
482 }
483
dequeueResolutionChangeEvent()484 bool V4L2Decoder::dequeueResolutionChangeEvent() {
485 ALOGV("%s()", __func__);
486 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
487
488 struct v4l2_event ev;
489 memset(&ev, 0, sizeof(ev));
490 while (mDevice->ioctl(VIDIOC_DQEVENT, &ev) == 0) {
491 if (ev.type == V4L2_EVENT_SOURCE_CHANGE &&
492 ev.u.src_change.changes & V4L2_EVENT_SRC_CH_RESOLUTION) {
493 return true;
494 }
495 }
496 return false;
497 }
498
changeResolution()499 bool V4L2Decoder::changeResolution() {
500 ALOGV("%s()", __func__);
501 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
502
503 const std::optional<struct v4l2_format> format = getFormatInfo();
504 std::optional<size_t> numOutputBuffers = getNumOutputBuffers();
505 if (!format || !numOutputBuffers) {
506 return false;
507 }
508 *numOutputBuffers = std::max(*numOutputBuffers, mMinNumOutputBuffers);
509
510 const ui::Size codedSize(format->fmt.pix_mp.width, format->fmt.pix_mp.height);
511 if (!setupOutputFormat(codedSize)) {
512 return false;
513 }
514
515 const std::optional<struct v4l2_format> adjustedFormat = getFormatInfo();
516 if (!adjustedFormat) {
517 return false;
518 }
519 mCodedSize.set(adjustedFormat->fmt.pix_mp.width, adjustedFormat->fmt.pix_mp.height);
520 mVisibleRect = getVisibleRect(mCodedSize);
521
522 ALOGI("Need %zu output buffers. coded size: %s, visible rect: %s", *numOutputBuffers,
523 toString(mCodedSize).c_str(), toString(mVisibleRect).c_str());
524 if (isEmpty(mCodedSize)) {
525 ALOGE("Failed to get resolution from V4L2 driver.");
526 return false;
527 }
528
529 mOutputQueue->streamoff();
530 mOutputQueue->deallocateBuffers();
531 mFrameAtDevice.clear();
532 mBlockIdToV4L2Id.clear();
533
534 const size_t adjustedNumOutputBuffers =
535 mOutputQueue->allocateBuffers(*numOutputBuffers, V4L2_MEMORY_DMABUF);
536 if (adjustedNumOutputBuffers == 0) {
537 ALOGE("Failed to allocate output buffer.");
538 return false;
539 }
540 ALOGV("Allocated %zu output buffers.", adjustedNumOutputBuffers);
541 if (!mOutputQueue->streamon()) {
542 ALOGE("Failed to streamon output queue.");
543 return false;
544 }
545
546 // Release the previous VideoFramePool before getting a new one to guarantee only one pool
547 // exists at the same time.
548 mVideoFramePool.reset();
549 // Always use flexible pixel 420 format YCBCR_420_888 in Android.
550 mVideoFramePool =
551 mGetPoolCb.Run(mCodedSize, HalPixelFormat::YCBCR_420_888, adjustedNumOutputBuffers);
552 if (!mVideoFramePool) {
553 ALOGE("Failed to get block pool with size: %s", toString(mCodedSize).c_str());
554 return false;
555 }
556
557 tryFetchVideoFrame();
558 return true;
559 }
560
setupOutputFormat(const ui::Size & size)561 bool V4L2Decoder::setupOutputFormat(const ui::Size& size) {
562 for (const uint32_t& pixfmt :
563 mDevice->enumerateSupportedPixelformats(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)) {
564 if (std::find(kSupportedOutputFourccs.begin(), kSupportedOutputFourccs.end(), pixfmt) ==
565 kSupportedOutputFourccs.end()) {
566 ALOGD("Pixel format %s is not supported, skipping...", fourccToString(pixfmt).c_str());
567 continue;
568 }
569
570 if (mOutputQueue->setFormat(pixfmt, size, 0) != std::nullopt) {
571 return true;
572 }
573 }
574
575 ALOGE("Failed to find supported pixel format");
576 return false;
577 }
578
tryFetchVideoFrame()579 void V4L2Decoder::tryFetchVideoFrame() {
580 ALOGV("%s()", __func__);
581 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
582
583 if (!mVideoFramePool) {
584 ALOGE("mVideoFramePool is null, failed to get the instance after resolution change?");
585 onError();
586 return;
587 }
588
589 if (mOutputQueue->freeBuffersCount() == 0) {
590 ALOGV("No free V4L2 output buffers, ignore.");
591 return;
592 }
593
594 if (!mVideoFramePool->getVideoFrame(
595 ::base::BindOnce(&V4L2Decoder::onVideoFrameReady, mWeakThis))) {
596 ALOGV("%s(): Previous callback is running, ignore.", __func__);
597 }
598 }
599
onVideoFrameReady(std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId)600 void V4L2Decoder::onVideoFrameReady(
601 std::optional<VideoFramePool::FrameWithBlockId> frameWithBlockId) {
602 ALOGV("%s()", __func__);
603 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
604
605 if (!frameWithBlockId) {
606 ALOGE("Got nullptr VideoFrame.");
607 onError();
608 return;
609 }
610
611 // Unwrap our arguments.
612 std::unique_ptr<VideoFrame> frame;
613 uint32_t blockId;
614 std::tie(frame, blockId) = std::move(*frameWithBlockId);
615
616 std::optional<V4L2WritableBufferRef> outputBuffer;
617 // Find the V4L2 buffer that is associated with this block.
618 auto iter = mBlockIdToV4L2Id.find(blockId);
619 if (iter != mBlockIdToV4L2Id.end()) {
620 // If we have met this block in the past, reuse the same V4L2 buffer.
621 outputBuffer = mOutputQueue->getFreeBuffer(iter->second);
622 } else if (mBlockIdToV4L2Id.size() < mOutputQueue->allocatedBuffersCount()) {
623 // If this is the first time we see this block, give it the next
624 // available V4L2 buffer.
625 const size_t v4l2BufferId = mBlockIdToV4L2Id.size();
626 mBlockIdToV4L2Id.emplace(blockId, v4l2BufferId);
627 outputBuffer = mOutputQueue->getFreeBuffer(v4l2BufferId);
628 } else {
629 // If this happens, this is a bug in VideoFramePool. It should never
630 // provide more blocks than we have V4L2 buffers.
631 ALOGE("Got more different blocks than we have V4L2 buffers for.");
632 }
633
634 if (!outputBuffer) {
635 ALOGE("V4L2 buffer not available. blockId=%u", blockId);
636 onError();
637 return;
638 }
639
640 uint32_t v4l2Id = outputBuffer->bufferId();
641 ALOGV("QBUF to output queue, blockId=%u, V4L2Id=%u", blockId, v4l2Id);
642
643 if (!std::move(*outputBuffer).queueDMABuf(frame->getFDs())) {
644 ALOGE("%s(): Failed to QBUF to output queue, blockId=%u, V4L2Id=%u", __func__, blockId,
645 v4l2Id);
646 onError();
647 return;
648 }
649 if (mFrameAtDevice.find(v4l2Id) != mFrameAtDevice.end()) {
650 ALOGE("%s(): V4L2 buffer %d already enqueued.", __func__, v4l2Id);
651 onError();
652 return;
653 }
654 mFrameAtDevice.insert(std::make_pair(v4l2Id, std::move(frame)));
655
656 tryFetchVideoFrame();
657 }
658
getNumOutputBuffers()659 std::optional<size_t> V4L2Decoder::getNumOutputBuffers() {
660 ALOGV("%s()", __func__);
661 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
662
663 struct v4l2_control ctrl;
664 memset(&ctrl, 0, sizeof(ctrl));
665 ctrl.id = V4L2_CID_MIN_BUFFERS_FOR_CAPTURE;
666 if (mDevice->ioctl(VIDIOC_G_CTRL, &ctrl) != 0) {
667 ALOGE("ioctl() failed: VIDIOC_G_CTRL");
668 return std::nullopt;
669 }
670 ALOGV("%s() V4L2_CID_MIN_BUFFERS_FOR_CAPTURE returns %u", __func__, ctrl.value);
671
672 return ctrl.value + kNumExtraOutputBuffers;
673 }
674
getFormatInfo()675 std::optional<struct v4l2_format> V4L2Decoder::getFormatInfo() {
676 ALOGV("%s()", __func__);
677 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
678
679 struct v4l2_format format;
680 memset(&format, 0, sizeof(format));
681 format.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
682 if (mDevice->ioctl(VIDIOC_G_FMT, &format) != 0) {
683 ALOGE("ioctl() failed: VIDIOC_G_FMT");
684 return std::nullopt;
685 }
686
687 return format;
688 }
689
getVisibleRect(const ui::Size & codedSize)690 Rect V4L2Decoder::getVisibleRect(const ui::Size& codedSize) {
691 ALOGV("%s()", __func__);
692 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
693
694 struct v4l2_rect* visible_rect = nullptr;
695 struct v4l2_selection selection_arg;
696 memset(&selection_arg, 0, sizeof(selection_arg));
697 selection_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE;
698 selection_arg.target = V4L2_SEL_TGT_COMPOSE;
699
700 if (mDevice->ioctl(VIDIOC_G_SELECTION, &selection_arg) == 0) {
701 ALOGV("VIDIOC_G_SELECTION is supported");
702 visible_rect = &selection_arg.r;
703 } else {
704 ALOGV("Fallback to VIDIOC_G_CROP");
705 struct v4l2_crop crop_arg;
706 memset(&crop_arg, 0, sizeof(crop_arg));
707 crop_arg.type = V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE;
708
709 if (mDevice->ioctl(VIDIOC_G_CROP, &crop_arg) != 0) {
710 ALOGW("ioctl() VIDIOC_G_CROP failed");
711 return Rect(codedSize.width, codedSize.height);
712 }
713 visible_rect = &crop_arg.c;
714 }
715
716 Rect rect(visible_rect->left, visible_rect->top, visible_rect->left + visible_rect->width,
717 visible_rect->top + visible_rect->height);
718 ALOGV("visible rectangle is %s", toString(rect).c_str());
719 if (!contains(Rect(codedSize.width, codedSize.height), rect)) {
720 ALOGW("visible rectangle %s is not inside coded size %s", toString(rect).c_str(),
721 toString(codedSize).c_str());
722 return Rect(codedSize.width, codedSize.height);
723 }
724 if (rect.isEmpty()) {
725 ALOGW("visible size is empty");
726 return Rect(codedSize.width, codedSize.height);
727 }
728
729 return rect;
730 }
731
sendV4L2DecoderCmd(bool start)732 bool V4L2Decoder::sendV4L2DecoderCmd(bool start) {
733 ALOGV("%s(start=%d)", __func__, start);
734 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
735
736 struct v4l2_decoder_cmd cmd;
737 memset(&cmd, 0, sizeof(cmd));
738 cmd.cmd = start ? V4L2_DEC_CMD_START : V4L2_DEC_CMD_STOP;
739 if (mDevice->ioctl(VIDIOC_DECODER_CMD, &cmd) != 0) {
740 ALOGE("ioctl() VIDIOC_DECODER_CMD failed: start=%d", start);
741 return false;
742 }
743
744 return true;
745 }
746
onError()747 void V4L2Decoder::onError() {
748 ALOGV("%s()", __func__);
749 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
750
751 setState(State::Error);
752 mErrorCb.Run();
753 }
754
setState(State newState)755 void V4L2Decoder::setState(State newState) {
756 ALOGV("%s(%s)", __func__, StateToString(newState));
757 ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
758
759 if (mState == newState) return;
760 if (mState == State::Error) {
761 ALOGV("Already in Error state.");
762 return;
763 }
764
765 switch (newState) {
766 case State::Idle:
767 break;
768 case State::Decoding:
769 break;
770 case State::Draining:
771 if (mState != State::Decoding) newState = State::Error;
772 break;
773 case State::Error:
774 break;
775 }
776
777 ALOGI("Set state %s => %s", StateToString(mState), StateToString(newState));
778 mState = newState;
779 }
780
781 // static
StateToString(State state)782 const char* V4L2Decoder::StateToString(State state) {
783 switch (state) {
784 case State::Idle:
785 return "Idle";
786 case State::Decoding:
787 return "Decoding";
788 case State::Draining:
789 return "Draining";
790 case State::Error:
791 return "Error";
792 }
793 }
794
795 } // namespace android
796