• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2021 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 //#define LOG_NDEBUG 0
6 #define LOG_TAG "V4L2Encoder"
7 
8 #include <v4l2_codec2/components/V4L2Encoder.h>
9 
10 #include <stdint.h>
11 #include <optional>
12 #include <vector>
13 
14 #include <base/bind.h>
15 #include <base/files/scoped_file.h>
16 #include <base/memory/ptr_util.h>
17 #include <log/log.h>
18 #include <ui/Rect.h>
19 
20 #include <v4l2_codec2/common/EncodeHelpers.h>
21 #include <v4l2_codec2/common/Fourcc.h>
22 #include <v4l2_codec2/common/V4L2Device.h>
23 #include <v4l2_codec2/components/BitstreamBuffer.h>
24 
25 namespace android {
26 
27 namespace {
28 
29 const VideoPixelFormat kInputPixelFormat = VideoPixelFormat::NV12;
30 
31 // The maximum size for output buffer, which is chosen empirically for a 1080p video.
32 constexpr size_t kMaxBitstreamBufferSizeInBytes = 2 * 1024 * 1024;  // 2MB
33 // The frame size for 1080p (FHD) video in pixels.
34 constexpr int k1080PSizeInPixels = 1920 * 1080;
35 // The frame size for 1440p (QHD) video in pixels.
36 constexpr int k1440PSizeInPixels = 2560 * 1440;
37 
38 // Use quadruple size of kMaxBitstreamBufferSizeInBytes when the input frame size is larger than
39 // 1440p, double if larger than 1080p. This is chosen empirically for some 4k encoding use cases and
40 // the Android CTS VideoEncoderTest (crbug.com/927284).
GetMaxOutputBufferSize(const ui::Size & size)41 size_t GetMaxOutputBufferSize(const ui::Size& size) {
42     if (getArea(size) > k1440PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 4;
43     if (getArea(size) > k1080PSizeInPixels) return kMaxBitstreamBufferSizeInBytes * 2;
44     return kMaxBitstreamBufferSizeInBytes;
45 }
46 
47 // Define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR control code if not present in header files.
48 #ifndef V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR
49 #define V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR (V4L2_CID_MPEG_BASE + 644)
50 #endif
51 
52 }  // namespace
53 
54 // static
create(C2Config::profile_t outputProfile,std::optional<uint8_t> level,const ui::Size & visibleSize,uint32_t stride,uint32_t keyFramePeriod,C2Config::bitrate_mode_t bitrateMode,uint32_t bitrate,std::optional<uint32_t> peakBitrate,FetchOutputBufferCB fetchOutputBufferCb,InputBufferDoneCB inputBufferDoneCb,OutputBufferDoneCB outputBufferDoneCb,DrainDoneCB drainDoneCb,ErrorCB errorCb,scoped_refptr<::base::SequencedTaskRunner> taskRunner)55 std::unique_ptr<VideoEncoder> V4L2Encoder::create(
56         C2Config::profile_t outputProfile, std::optional<uint8_t> level,
57         const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod,
58         C2Config::bitrate_mode_t bitrateMode, uint32_t bitrate, std::optional<uint32_t> peakBitrate,
59         FetchOutputBufferCB fetchOutputBufferCb, InputBufferDoneCB inputBufferDoneCb,
60         OutputBufferDoneCB outputBufferDoneCb, DrainDoneCB drainDoneCb, ErrorCB errorCb,
61         scoped_refptr<::base::SequencedTaskRunner> taskRunner) {
62     ALOGV("%s()", __func__);
63 
64     std::unique_ptr<V4L2Encoder> encoder = ::base::WrapUnique<V4L2Encoder>(new V4L2Encoder(
65             std::move(taskRunner), std::move(fetchOutputBufferCb), std::move(inputBufferDoneCb),
66             std::move(outputBufferDoneCb), std::move(drainDoneCb), std::move(errorCb)));
67     if (!encoder->initialize(outputProfile, level, visibleSize, stride, keyFramePeriod, bitrateMode,
68                              bitrate, peakBitrate)) {
69         return nullptr;
70     }
71     return encoder;
72 }
73 
V4L2Encoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner,FetchOutputBufferCB fetchOutputBufferCb,InputBufferDoneCB inputBufferDoneCb,OutputBufferDoneCB outputBufferDoneCb,DrainDoneCB drainDoneCb,ErrorCB errorCb)74 V4L2Encoder::V4L2Encoder(scoped_refptr<::base::SequencedTaskRunner> taskRunner,
75                          FetchOutputBufferCB fetchOutputBufferCb,
76                          InputBufferDoneCB inputBufferDoneCb, OutputBufferDoneCB outputBufferDoneCb,
77                          DrainDoneCB drainDoneCb, ErrorCB errorCb)
78       : mFetchOutputBufferCb(fetchOutputBufferCb),
79         mInputBufferDoneCb(inputBufferDoneCb),
80         mOutputBufferDoneCb(outputBufferDoneCb),
81         mDrainDoneCb(std::move(drainDoneCb)),
82         mErrorCb(std::move(errorCb)),
83         mTaskRunner(std::move(taskRunner)) {
84     ALOGV("%s()", __func__);
85 
86     mWeakThis = mWeakThisFactory.GetWeakPtr();
87 }
88 
~V4L2Encoder()89 V4L2Encoder::~V4L2Encoder() {
90     ALOGV("%s()", __func__);
91     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
92 
93     mWeakThisFactory.InvalidateWeakPtrs();
94 
95     // Flushing the encoder will stop polling and streaming on the V4L2 device queues.
96     flush();
97 
98     // Deallocate all V4L2 device input and output buffers.
99     destroyInputBuffers();
100     destroyOutputBuffers();
101 }
102 
encode(std::unique_ptr<InputFrame> frame)103 bool V4L2Encoder::encode(std::unique_ptr<InputFrame> frame) {
104     ALOGV("%s()", __func__);
105     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
106     ALOG_ASSERT(mState != State::UNINITIALIZED);
107 
108     // If we're in the error state we can immediately return, freeing the input buffer.
109     if (mState == State::ERROR) {
110         return false;
111     }
112 
113     if (!frame) {
114         ALOGW("Empty encode request scheduled");
115         return false;
116     }
117 
118     mEncodeRequests.push(EncodeRequest(std::move(frame)));
119 
120     // If we were waiting for encode requests, start encoding again.
121     if (mState == State::WAITING_FOR_INPUT_FRAME) {
122         setState(State::ENCODING);
123         mTaskRunner->PostTask(FROM_HERE,
124                               ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
125     }
126 
127     return true;
128 }
129 
drain()130 void V4L2Encoder::drain() {
131     ALOGV("%s()", __func__);
132     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
133 
134     // We can only start draining if all the requests in our input queue has been queued on the V4L2
135     // device input queue, so we mark the last item in the input queue as EOS.
136     if (!mEncodeRequests.empty()) {
137         ALOGV("Marking last item (index: %" PRIu64 ") in encode request queue as EOS",
138               mEncodeRequests.back().video_frame->index());
139         mEncodeRequests.back().end_of_stream = true;
140         return;
141     }
142 
143     // Start a drain operation on the device. If no buffers are currently queued the device will
144     // return an empty buffer with the V4L2_BUF_FLAG_LAST flag set.
145     handleDrainRequest();
146 }
147 
flush()148 void V4L2Encoder::flush() {
149     ALOGV("%s()", __func__);
150     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
151 
152     handleFlushRequest();
153 }
154 
setBitrate(uint32_t bitrate)155 bool V4L2Encoder::setBitrate(uint32_t bitrate) {
156     ALOGV("%s()", __func__);
157     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
158 
159     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
160                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE, bitrate)})) {
161         ALOGE("Setting bitrate to %u failed", bitrate);
162         return false;
163     }
164     return true;
165 }
166 
setPeakBitrate(uint32_t peakBitrate)167 bool V4L2Encoder::setPeakBitrate(uint32_t peakBitrate) {
168     ALOGV("%s()", __func__);
169     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
170 
171     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
172                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, peakBitrate)})) {
173         // TODO(b/190336806): Our stack doesn't support dynamic peak bitrate changes yet, ignore
174         // errors for now.
175         ALOGW("Setting peak bitrate to %u failed", peakBitrate);
176     }
177     return true;
178 }
179 
setFramerate(uint32_t framerate)180 bool V4L2Encoder::setFramerate(uint32_t framerate) {
181     ALOGV("%s()", __func__);
182     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
183 
184     struct v4l2_streamparm parms;
185     memset(&parms, 0, sizeof(v4l2_streamparm));
186     parms.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
187     parms.parm.output.timeperframe.numerator = 1;
188     parms.parm.output.timeperframe.denominator = framerate;
189     if (mDevice->ioctl(VIDIOC_S_PARM, &parms) != 0) {
190         ALOGE("Setting framerate to %u failed", framerate);
191         return false;
192     }
193     return true;
194 }
195 
requestKeyframe()196 void V4L2Encoder::requestKeyframe() {
197     ALOGV("%s()", __func__);
198     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
199 
200     mKeyFrameCounter = 0;
201 }
202 
inputFormat() const203 VideoPixelFormat V4L2Encoder::inputFormat() const {
204     return mInputLayout ? mInputLayout.value().mFormat : VideoPixelFormat::UNKNOWN;
205 }
206 
initialize(C2Config::profile_t outputProfile,std::optional<uint8_t> level,const ui::Size & visibleSize,uint32_t stride,uint32_t keyFramePeriod,C2Config::bitrate_mode_t bitrateMode,uint32_t bitrate,std::optional<uint32_t> peakBitrate)207 bool V4L2Encoder::initialize(C2Config::profile_t outputProfile, std::optional<uint8_t> level,
208                              const ui::Size& visibleSize, uint32_t stride, uint32_t keyFramePeriod,
209                              C2Config::bitrate_mode_t bitrateMode, uint32_t bitrate,
210                              std::optional<uint32_t> peakBitrate) {
211     ALOGV("%s()", __func__);
212     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
213     ALOG_ASSERT(keyFramePeriod > 0);
214 
215     mVisibleSize = visibleSize;
216     mKeyFramePeriod = keyFramePeriod;
217     mKeyFrameCounter = 0;
218 
219     // Open the V4L2 device for encoding to the requested output format.
220     // TODO(dstaessens): Avoid conversion to VideoCodecProfile and use C2Config::profile_t directly.
221     uint32_t outputPixelFormat = V4L2Device::C2ProfileToV4L2PixFmt(outputProfile, false);
222     if (!outputPixelFormat) {
223         ALOGE("Invalid output profile %s", profileToString(outputProfile));
224         return false;
225     }
226 
227     mDevice = V4L2Device::create();
228     if (!mDevice) {
229         ALOGE("Failed to create V4L2 device");
230         return false;
231     }
232 
233     if (!mDevice->open(V4L2Device::Type::kEncoder, outputPixelFormat)) {
234         ALOGE("Failed to open device for profile %s (%s)", profileToString(outputProfile),
235               fourccToString(outputPixelFormat).c_str());
236         return false;
237     }
238 
239     // Make sure the device has all required capabilities (multi-planar Memory-To-Memory and
240     // streaming I/O), and whether flushing is supported.
241     if (!mDevice->hasCapabilities(V4L2_CAP_VIDEO_M2M_MPLANE | V4L2_CAP_STREAMING)) {
242         ALOGE("Device doesn't have the required capabilities");
243         return false;
244     }
245     if (!mDevice->isCommandSupported(V4L2_ENC_CMD_STOP)) {
246         ALOGE("Device does not support flushing (V4L2_ENC_CMD_STOP)");
247         return false;
248     }
249 
250     // Get input/output queues so we can send encode request to the device and get back the results.
251     mInputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE);
252     mOutputQueue = mDevice->getQueue(V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE);
253     if (!mInputQueue || !mOutputQueue) {
254         ALOGE("Failed to get V4L2 device queues");
255         return false;
256     }
257 
258     // Configure the requested bitrate mode and bitrate on the device.
259     if (!configureBitrateMode(bitrateMode) || !setBitrate(bitrate)) return false;
260 
261     // If the bitrate mode is VBR we also need to configure the peak bitrate on the device.
262     if ((bitrateMode == C2Config::BITRATE_VARIABLE) && !setPeakBitrate(*peakBitrate)) return false;
263 
264     // First try to configure the specified output format, as changing the output format can affect
265     // the configured input format.
266     if (!configureOutputFormat(outputProfile)) return false;
267 
268     // Configure the input format. If the device doesn't support the specified format we'll use one
269     // of the device's preferred formats in combination with an input format convertor.
270     if (!configureInputFormat(kInputPixelFormat, stride)) return false;
271 
272     // Create input and output buffers.
273     if (!createInputBuffers() || !createOutputBuffers()) return false;
274 
275     // Configure the device, setting all required controls.
276     if (!configureDevice(outputProfile, level)) return false;
277 
278     // We're ready to start encoding now.
279     setState(State::WAITING_FOR_INPUT_FRAME);
280     return true;
281 }
282 
handleEncodeRequest()283 void V4L2Encoder::handleEncodeRequest() {
284     ALOGV("%s()", __func__);
285     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
286     ALOG_ASSERT(mState == State::ENCODING || mState == State::ERROR);
287 
288     // If we're in the error state we can immediately return.
289     if (mState == State::ERROR) {
290         return;
291     }
292 
293     // It's possible we flushed the encoder since this function was scheduled.
294     if (mEncodeRequests.empty()) {
295         return;
296     }
297 
298     // Get the next encode request from the queue.
299     EncodeRequest& encodeRequest = mEncodeRequests.front();
300 
301     // Check if the device has free input buffers available. If not we'll switch to the
302     // WAITING_FOR_INPUT_BUFFERS state, and resume encoding once we've dequeued an input buffer.
303     // Note: The input buffers are not copied into the device's input buffers, but rather a memory
304     // pointer is imported. We still have to throttle the number of enqueues queued simultaneously
305     // on the device however.
306     if (mInputQueue->freeBuffersCount() == 0) {
307         ALOGV("Waiting for device to return input buffers");
308         setState(State::WAITING_FOR_V4L2_BUFFER);
309         return;
310     }
311 
312     // Request the next frame to be a key frame each time the counter reaches 0.
313     if (mKeyFrameCounter == 0) {
314         if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
315                                   {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FORCE_KEY_FRAME)})) {
316             ALOGE("Failed requesting key frame");
317             onError();
318             return;
319         }
320     }
321     mKeyFrameCounter = (mKeyFrameCounter + 1) % mKeyFramePeriod;
322 
323     // Enqueue the input frame in the V4L2 device.
324     uint64_t index = encodeRequest.video_frame->index();
325     uint64_t timestamp = encodeRequest.video_frame->timestamp();
326     bool end_of_stream = encodeRequest.end_of_stream;
327     if (!enqueueInputBuffer(std::move(encodeRequest.video_frame))) {
328         ALOGE("Failed to enqueue input frame (index: %" PRIu64 ", timestamp: %" PRId64 ")", index,
329               timestamp);
330         onError();
331         return;
332     }
333     mEncodeRequests.pop();
334 
335     // Start streaming and polling on the input and output queue if required.
336     if (!mInputQueue->isStreaming()) {
337         ALOG_ASSERT(!mOutputQueue->isStreaming());
338         if (!mOutputQueue->streamon() || !mInputQueue->streamon()) {
339             ALOGE("Failed to start streaming on input and output queue");
340             onError();
341             return;
342         }
343         startDevicePoll();
344     }
345 
346     // Queue buffers on output queue. These buffers will be used to store the encoded bitstream.
347     while (mOutputQueue->freeBuffersCount() > 0) {
348         if (!enqueueOutputBuffer()) return;
349     }
350 
351     // Drain the encoder if requested.
352     if (end_of_stream) {
353         handleDrainRequest();
354         return;
355     }
356 
357     if (mEncodeRequests.empty()) {
358         setState(State::WAITING_FOR_INPUT_FRAME);
359         return;
360     }
361 
362     // Schedule the next buffer to be encoded.
363     mTaskRunner->PostTask(FROM_HERE,
364                           ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
365 }
366 
handleFlushRequest()367 void V4L2Encoder::handleFlushRequest() {
368     ALOGV("%s()", __func__);
369     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
370 
371     // Stop the device poll thread.
372     stopDevicePoll();
373 
374     // Stop streaming on the V4L2 device, which stops all currently queued encode operations and
375     // releases all buffers currently in use by the device.
376     for (auto& queue : {mInputQueue, mOutputQueue}) {
377         if (queue && queue->isStreaming() && !queue->streamoff()) {
378             ALOGE("Failed to stop streaming on the device queue");
379             onError();
380         }
381     }
382 
383     // Clear all outstanding encode requests and references to input and output queue buffers.
384     while (!mEncodeRequests.empty()) {
385         mEncodeRequests.pop();
386     }
387     for (auto& buf : mInputBuffers) {
388         buf = nullptr;
389     }
390     for (auto& buf : mOutputBuffers) {
391         buf = nullptr;
392     }
393 
394     // Streaming and polling on the V4L2 device input and output queues will be resumed once new
395     // encode work is queued.
396     if (mState != State::ERROR) {
397         setState(State::WAITING_FOR_INPUT_FRAME);
398     }
399 }
400 
handleDrainRequest()401 void V4L2Encoder::handleDrainRequest() {
402     ALOGV("%s()", __func__);
403     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
404 
405     if (mState == State::DRAINING || mState == State::ERROR) {
406         return;
407     }
408 
409     setState(State::DRAINING);
410 
411     // If we're not streaming we can consider the request completed immediately.
412     if (!mInputQueue->isStreaming()) {
413         onDrainDone(true);
414         return;
415     }
416 
417     struct v4l2_encoder_cmd cmd;
418     memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
419     cmd.cmd = V4L2_ENC_CMD_STOP;
420     if (mDevice->ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
421         ALOGE("Failed to stop encoder");
422         onDrainDone(false);
423         return;
424     }
425     ALOGV("%s(): Sent STOP command to encoder", __func__);
426 }
427 
onDrainDone(bool done)428 void V4L2Encoder::onDrainDone(bool done) {
429     ALOGV("%s()", __func__);
430     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
431     ALOG_ASSERT(mState == State::DRAINING || mState == State::ERROR);
432 
433     if (mState == State::ERROR) {
434         return;
435     }
436 
437     if (!done) {
438         ALOGE("draining the encoder failed");
439         mDrainDoneCb.Run(false);
440         onError();
441         return;
442     }
443 
444     ALOGV("Draining done");
445     mDrainDoneCb.Run(true);
446 
447     // Draining the encoder is done, we can now start encoding again.
448     if (!mEncodeRequests.empty()) {
449         setState(State::ENCODING);
450         mTaskRunner->PostTask(FROM_HERE,
451                               ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
452     } else {
453         setState(State::WAITING_FOR_INPUT_FRAME);
454     }
455 }
456 
configureInputFormat(VideoPixelFormat inputFormat,uint32_t stride)457 bool V4L2Encoder::configureInputFormat(VideoPixelFormat inputFormat, uint32_t stride) {
458     ALOGV("%s()", __func__);
459     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
460     ALOG_ASSERT(mState == State::UNINITIALIZED);
461     ALOG_ASSERT(!mInputQueue->isStreaming());
462     ALOG_ASSERT(!isEmpty(mVisibleSize));
463 
464     // First try to use the requested pixel format directly.
465     std::optional<struct v4l2_format> format;
466     auto fourcc = Fourcc::fromVideoPixelFormat(inputFormat, false);
467     if (fourcc) {
468         format = mInputQueue->setFormat(fourcc->toV4L2PixFmt(), mVisibleSize, 0, stride);
469     }
470 
471     // If the device doesn't support the requested input format we'll try the device's preferred
472     // input pixel formats and use a format convertor. We need to try all formats as some formats
473     // might not be supported for the configured output format.
474     if (!format) {
475         std::vector<uint32_t> preferredFormats =
476                 mDevice->preferredInputFormat(V4L2Device::Type::kEncoder);
477         for (uint32_t i = 0; !format && i < preferredFormats.size(); ++i) {
478             format = mInputQueue->setFormat(preferredFormats[i], mVisibleSize, 0, stride);
479         }
480     }
481 
482     if (!format) {
483         ALOGE("Failed to set input format to %s", videoPixelFormatToString(inputFormat).c_str());
484         return false;
485     }
486 
487     // Check whether the negotiated input format is valid. The coded size might be adjusted to match
488     // encoder minimums, maximums and alignment requirements of the currently selected formats.
489     auto layout = V4L2Device::v4L2FormatToVideoFrameLayout(*format);
490     if (!layout) {
491         ALOGE("Invalid input layout");
492         return false;
493     }
494 
495     mInputLayout = layout.value();
496     if (!contains(Rect(mInputLayout->mCodedSize.width, mInputLayout->mCodedSize.height),
497                   Rect(mVisibleSize.width, mVisibleSize.height))) {
498         ALOGE("Input size %s exceeds encoder capability, encoder can handle %s",
499               toString(mVisibleSize).c_str(), toString(mInputLayout->mCodedSize).c_str());
500         return false;
501     }
502 
503     // Calculate the input coded size from the format.
504     // TODO(dstaessens): How is this different from mInputLayout->coded_size()?
505     mInputCodedSize = V4L2Device::allocatedSizeFromV4L2Format(*format);
506 
507     // Configuring the input format might cause the output buffer size to change.
508     auto outputFormat = mOutputQueue->getFormat();
509     if (!outputFormat.first) {
510         ALOGE("Failed to get output format (errno: %i)", outputFormat.second);
511         return false;
512     }
513     uint32_t AdjustedOutputBufferSize = outputFormat.first->fmt.pix_mp.plane_fmt[0].sizeimage;
514     if (mOutputBufferSize != AdjustedOutputBufferSize) {
515         mOutputBufferSize = AdjustedOutputBufferSize;
516         ALOGV("Output buffer size adjusted to: %u", mOutputBufferSize);
517     }
518 
519     // The coded input size might be different from the visible size due to alignment requirements,
520     // So we need to specify the visible rectangle. Note that this rectangle might still be adjusted
521     // due to hardware limitations.
522     Rect visibleRectangle(mVisibleSize.width, mVisibleSize.height);
523 
524     struct v4l2_rect rect;
525     memset(&rect, 0, sizeof(rect));
526     rect.left = visibleRectangle.left;
527     rect.top = visibleRectangle.top;
528     rect.width = visibleRectangle.width();
529     rect.height = visibleRectangle.height();
530 
531     // Try to adjust the visible rectangle using the VIDIOC_S_SELECTION command. If this is not
532     // supported we'll try to use the VIDIOC_S_CROP command instead. The visible rectangle might be
533     // adjusted to conform to hardware limitations (e.g. round to closest horizontal and vertical
534     // offsets, width and height).
535     struct v4l2_selection selection_arg;
536     memset(&selection_arg, 0, sizeof(selection_arg));
537     selection_arg.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
538     selection_arg.target = V4L2_SEL_TGT_CROP;
539     selection_arg.r = rect;
540     if (mDevice->ioctl(VIDIOC_S_SELECTION, &selection_arg) == 0) {
541         visibleRectangle = Rect(selection_arg.r.left, selection_arg.r.top,
542                                 selection_arg.r.left + selection_arg.r.width,
543                                 selection_arg.r.top + selection_arg.r.height);
544     } else {
545         struct v4l2_crop crop;
546         memset(&crop, 0, sizeof(v4l2_crop));
547         crop.type = V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE;
548         crop.c = rect;
549         if (mDevice->ioctl(VIDIOC_S_CROP, &crop) != 0 ||
550             mDevice->ioctl(VIDIOC_G_CROP, &crop) != 0) {
551             ALOGE("Failed to crop to specified visible rectangle");
552             return false;
553         }
554         visibleRectangle = Rect(crop.c.left, crop.c.top, crop.c.left + crop.c.width,
555                                 crop.c.top + crop.c.height);
556     }
557 
558     ALOGV("Input format set to %s (size: %s, adjusted size: %dx%d, coded size: %s)",
559           videoPixelFormatToString(mInputLayout->mFormat).c_str(), toString(mVisibleSize).c_str(),
560           visibleRectangle.width(), visibleRectangle.height(), toString(mInputCodedSize).c_str());
561 
562     mVisibleSize.set(visibleRectangle.width(), visibleRectangle.height());
563     return true;
564 }
565 
configureOutputFormat(C2Config::profile_t outputProfile)566 bool V4L2Encoder::configureOutputFormat(C2Config::profile_t outputProfile) {
567     ALOGV("%s()", __func__);
568     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
569     ALOG_ASSERT(mState == State::UNINITIALIZED);
570     ALOG_ASSERT(!mOutputQueue->isStreaming());
571     ALOG_ASSERT(!isEmpty(mVisibleSize));
572 
573     auto format = mOutputQueue->setFormat(V4L2Device::C2ProfileToV4L2PixFmt(outputProfile, false),
574                                           mVisibleSize, GetMaxOutputBufferSize(mVisibleSize));
575     if (!format) {
576         ALOGE("Failed to set output format to %s", profileToString(outputProfile));
577         return false;
578     }
579 
580     // The device might adjust the requested output buffer size to match hardware requirements.
581     mOutputBufferSize = format->fmt.pix_mp.plane_fmt[0].sizeimage;
582 
583     ALOGV("Output format set to %s (buffer size: %u)", profileToString(outputProfile),
584           mOutputBufferSize);
585     return true;
586 }
587 
configureDevice(C2Config::profile_t outputProfile,std::optional<const uint8_t> outputH264Level)588 bool V4L2Encoder::configureDevice(C2Config::profile_t outputProfile,
589                                   std::optional<const uint8_t> outputH264Level) {
590     ALOGV("%s()", __func__);
591     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
592 
593     // Enable frame-level bitrate control. This is the only mandatory general control.
594     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
595                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE, 1)})) {
596         ALOGW("Failed enabling bitrate control");
597         // TODO(b/161508368): V4L2_CID_MPEG_VIDEO_FRAME_RC_ENABLE is currently not supported yet,
598         // assume the operation was successful for now.
599     }
600 
601     // Additional optional controls:
602     // - Enable macroblock-level bitrate control.
603     // - Set GOP length to 0 to disable periodic key frames.
604     mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG, {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_MB_RC_ENABLE, 1),
605                                                 V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_GOP_SIZE, 0)});
606 
607     // All controls below are H.264-specific, so we can return here if the profile is not H.264.
608     if (outputProfile >= C2Config::PROFILE_AVC_BASELINE &&
609         outputProfile <= C2Config::PROFILE_AVC_ENHANCED_MULTIVIEW_DEPTH_HIGH) {
610         return configureH264(outputProfile, outputH264Level);
611     }
612 
613     return true;
614 }
615 
configureH264(C2Config::profile_t outputProfile,std::optional<const uint8_t> outputH264Level)616 bool V4L2Encoder::configureH264(C2Config::profile_t outputProfile,
617                                 std::optional<const uint8_t> outputH264Level) {
618     // When encoding H.264 we want to prepend SPS and PPS to each IDR for resilience. Some
619     // devices support this through the V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR control.
620     // Otherwise we have to cache the latest SPS and PPS and inject these manually.
621     if (mDevice->isCtrlExposed(V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR)) {
622         if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
623                                   {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_PREPEND_SPSPPS_TO_IDR, 1)})) {
624             ALOGE("Failed to configure device to prepend SPS and PPS to each IDR");
625             return false;
626         }
627         mInjectParamsBeforeIDR = false;
628         ALOGV("Device supports prepending SPS and PPS to each IDR");
629     } else {
630         mInjectParamsBeforeIDR = true;
631         ALOGV("Device doesn't support prepending SPS and PPS to IDR, injecting manually.");
632     }
633 
634     std::vector<V4L2ExtCtrl> h264Ctrls;
635 
636     // No B-frames, for lowest decoding latency.
637     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_B_FRAMES, 0);
638     // Quantization parameter maximum value (for variable bitrate control).
639     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_MAX_QP, 51);
640 
641     // Set H.264 profile.
642     int32_t profile = V4L2Device::c2ProfileToV4L2H264Profile(outputProfile);
643     if (profile < 0) {
644         ALOGE("Trying to set invalid H.264 profile");
645         return false;
646     }
647     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_PROFILE, profile);
648 
649     // Set H.264 output level. Use Level 4.0 as fallback default.
650     int32_t h264Level =
651             static_cast<int32_t>(outputH264Level.value_or(V4L2_MPEG_VIDEO_H264_LEVEL_4_0));
652     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_H264_LEVEL, h264Level);
653 
654     // Ask not to put SPS and PPS into separate bitstream buffers.
655     h264Ctrls.emplace_back(V4L2_CID_MPEG_VIDEO_HEADER_MODE,
656                            V4L2_MPEG_VIDEO_HEADER_MODE_JOINED_WITH_1ST_FRAME);
657 
658     // Ignore return value as these controls are optional.
659     mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG, std::move(h264Ctrls));
660 
661     return true;
662 }
663 
configureBitrateMode(C2Config::bitrate_mode_t bitrateMode)664 bool V4L2Encoder::configureBitrateMode(C2Config::bitrate_mode_t bitrateMode) {
665     ALOGV("%s()", __func__);
666     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
667 
668     v4l2_mpeg_video_bitrate_mode v4l2BitrateMode =
669             V4L2Device::C2BitrateModeToV4L2BitrateMode(bitrateMode);
670     if (!mDevice->setExtCtrls(V4L2_CTRL_CLASS_MPEG,
671                               {V4L2ExtCtrl(V4L2_CID_MPEG_VIDEO_BITRATE_MODE, v4l2BitrateMode)})) {
672         // TODO(b/190336806): Our stack doesn't support bitrate mode changes yet. We default to CBR
673         // which is currently the only supported mode so we can safely ignore this for now.
674         ALOGW("Setting bitrate mode to %u failed", v4l2BitrateMode);
675     }
676     return true;
677 }
678 
startDevicePoll()679 bool V4L2Encoder::startDevicePoll() {
680     ALOGV("%s()", __func__);
681     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
682 
683     if (!mDevice->startPolling(::base::BindRepeating(&V4L2Encoder::serviceDeviceTask, mWeakThis),
684                                ::base::BindRepeating(&V4L2Encoder::onPollError, mWeakThis))) {
685         ALOGE("Device poll thread failed to start");
686         onError();
687         return false;
688     }
689 
690     ALOGV("Device poll started");
691     return true;
692 }
693 
stopDevicePoll()694 bool V4L2Encoder::stopDevicePoll() {
695     ALOGV("%s()", __func__);
696     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
697 
698     if (!mDevice->stopPolling()) {
699         ALOGE("Failed to stop polling on the device");
700         onError();
701         return false;
702     }
703 
704     ALOGV("Device poll stopped");
705     return true;
706 }
707 
onPollError()708 void V4L2Encoder::onPollError() {
709     ALOGV("%s()", __func__);
710     onError();
711 }
712 
serviceDeviceTask(bool)713 void V4L2Encoder::serviceDeviceTask(bool /*event*/) {
714     ALOGV("%s()", __func__);
715     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
716     ALOG_ASSERT(mState != State::UNINITIALIZED);
717 
718     if (mState == State::ERROR) {
719         return;
720     }
721 
722     // Dequeue completed input (VIDEO_OUTPUT) buffers, and recycle to the free list.
723     while (mInputQueue->queuedBuffersCount() > 0) {
724         if (!dequeueInputBuffer()) break;
725     }
726 
727     // Dequeue completed output (VIDEO_CAPTURE) buffers, and recycle to the free list.
728     while (mOutputQueue->queuedBuffersCount() > 0) {
729         if (!dequeueOutputBuffer()) break;
730     }
731 
732     ALOGV("%s() - done", __func__);
733 }
734 
enqueueInputBuffer(std::unique_ptr<InputFrame> frame)735 bool V4L2Encoder::enqueueInputBuffer(std::unique_ptr<InputFrame> frame) {
736     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
737     ALOG_ASSERT(mInputQueue->freeBuffersCount() > 0);
738     ALOG_ASSERT(mState == State::ENCODING);
739     ALOG_ASSERT(frame);
740     ALOG_ASSERT(mInputLayout->mFormat == frame->pixelFormat());
741     ALOG_ASSERT(mInputLayout->mPlanes.size() == frame->planes().size());
742 
743     auto format = frame->pixelFormat();
744     auto planes = frame->planes();
745     auto index = frame->index();
746     auto timestamp = frame->timestamp();
747 
748     ALOGV("%s(): queuing input buffer (index: %" PRId64 ")", __func__, index);
749 
750     auto buffer = mInputQueue->getFreeBuffer();
751     if (!buffer) {
752         ALOGE("Failed to get free buffer from device input queue");
753         return false;
754     }
755 
756     // Mark the buffer with the frame's timestamp so we can identify the associated output buffers.
757     buffer->setTimeStamp(
758             {.tv_sec = static_cast<time_t>(timestamp / ::base::Time::kMicrosecondsPerSecond),
759              .tv_usec = static_cast<time_t>(timestamp % ::base::Time::kMicrosecondsPerSecond)});
760     size_t bufferId = buffer->bufferId();
761 
762     for (size_t i = 0; i < planes.size(); ++i) {
763         // Single-buffer input format may have multiple color planes, so bytesUsed of the single
764         // buffer should be sum of each color planes' size.
765         size_t bytesUsed = 0;
766         if (planes.size() == 1) {
767             bytesUsed = allocationSize(format, mInputLayout->mCodedSize);
768         } else {
769             bytesUsed = ::base::checked_cast<size_t>(
770                     getArea(planeSize(format, i, mInputLayout->mCodedSize)).value());
771         }
772 
773         // TODO(crbug.com/901264): The way to pass an offset within a DMA-buf is not defined
774         // in V4L2 specification, so we abuse data_offset for now. Fix it when we have the
775         // right interface, including any necessary validation and potential alignment.
776         buffer->setPlaneDataOffset(i, planes[i].mOffset);
777         bytesUsed += planes[i].mOffset;
778         // Workaround: filling length should not be needed. This is a bug of videobuf2 library.
779         buffer->setPlaneSize(i, mInputLayout->mPlanes[i].mSize + planes[i].mOffset);
780         buffer->setPlaneBytesUsed(i, bytesUsed);
781     }
782 
783     if (!std::move(*buffer).queueDMABuf(frame->fds())) {
784         ALOGE("Failed to queue input buffer using QueueDMABuf");
785         onError();
786         return false;
787     }
788 
789     ALOGV("Queued buffer in input queue (index: %" PRId64 ", timestamp: %" PRId64
790           ", bufferId: %zu)",
791           index, timestamp, bufferId);
792 
793     ALOG_ASSERT(!mInputBuffers[bufferId]);
794     mInputBuffers[bufferId] = std::move(frame);
795 
796     return true;
797 }
798 
enqueueOutputBuffer()799 bool V4L2Encoder::enqueueOutputBuffer() {
800     ALOGV("%s()", __func__);
801     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
802     ALOG_ASSERT(mOutputQueue->freeBuffersCount() > 0);
803 
804     auto buffer = mOutputQueue->getFreeBuffer();
805     if (!buffer) {
806         ALOGE("Failed to get free buffer from device output queue");
807         onError();
808         return false;
809     }
810 
811     std::unique_ptr<BitstreamBuffer> bitstreamBuffer;
812     mFetchOutputBufferCb.Run(mOutputBufferSize, &bitstreamBuffer);
813     if (!bitstreamBuffer) {
814         ALOGE("Failed to fetch output block");
815         onError();
816         return false;
817     }
818 
819     size_t bufferId = buffer->bufferId();
820 
821     std::vector<int> fds;
822     fds.push_back(bitstreamBuffer->dmabuf->handle()->data[0]);
823     if (!std::move(*buffer).queueDMABuf(fds)) {
824         ALOGE("Failed to queue output buffer using QueueDMABuf");
825         onError();
826         return false;
827     }
828 
829     ALOG_ASSERT(!mOutputBuffers[bufferId]);
830     mOutputBuffers[bufferId] = std::move(bitstreamBuffer);
831     ALOGV("%s(): Queued buffer in output queue (bufferId: %zu)", __func__, bufferId);
832     return true;
833 }
834 
dequeueInputBuffer()835 bool V4L2Encoder::dequeueInputBuffer() {
836     ALOGV("%s()", __func__);
837     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
838     ALOG_ASSERT(mState != State::UNINITIALIZED);
839     ALOG_ASSERT(mInputQueue->queuedBuffersCount() > 0);
840 
841     if (mState == State::ERROR) {
842         return false;
843     }
844 
845     bool success;
846     V4L2ReadableBufferRef buffer;
847     std::tie(success, buffer) = mInputQueue->dequeueBuffer();
848     if (!success) {
849         ALOGE("Failed to dequeue buffer from input queue");
850         onError();
851         return false;
852     }
853     if (!buffer) {
854         // No more buffers ready to be dequeued in input queue.
855         return false;
856     }
857 
858     uint64_t index = mInputBuffers[buffer->bufferId()]->index();
859     int64_t timestamp = buffer->getTimeStamp().tv_usec +
860                         buffer->getTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond;
861     ALOGV("Dequeued buffer from input queue (index: %" PRId64 ", timestamp: %" PRId64
862           ", bufferId: %zu)",
863           index, timestamp, buffer->bufferId());
864 
865     mInputBuffers[buffer->bufferId()] = nullptr;
866 
867     mInputBufferDoneCb.Run(index);
868 
869     // If we previously used up all input queue buffers we can start encoding again now.
870     if ((mState == State::WAITING_FOR_V4L2_BUFFER) && !mEncodeRequests.empty()) {
871         setState(State::ENCODING);
872         mTaskRunner->PostTask(FROM_HERE,
873                               ::base::BindOnce(&V4L2Encoder::handleEncodeRequest, mWeakThis));
874     }
875 
876     return true;
877 }
878 
dequeueOutputBuffer()879 bool V4L2Encoder::dequeueOutputBuffer() {
880     ALOGV("%s()", __func__);
881     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
882     ALOG_ASSERT(mState != State::UNINITIALIZED);
883     ALOG_ASSERT(mOutputQueue->queuedBuffersCount() > 0);
884 
885     if (mState == State::ERROR) {
886         return false;
887     }
888 
889     bool success;
890     V4L2ReadableBufferRef buffer;
891     std::tie(success, buffer) = mOutputQueue->dequeueBuffer();
892     if (!success) {
893         ALOGE("Failed to dequeue buffer from output queue");
894         onError();
895         return false;
896     }
897     if (!buffer) {
898         // No more buffers ready to be dequeued in output queue.
899         return false;
900     }
901 
902     size_t encodedDataSize = buffer->getPlaneBytesUsed(0) - buffer->getPlaneDataOffset(0);
903     ::base::TimeDelta timestamp = ::base::TimeDelta::FromMicroseconds(
904             buffer->getTimeStamp().tv_usec +
905             buffer->getTimeStamp().tv_sec * ::base::Time::kMicrosecondsPerSecond);
906 
907     ALOGV("Dequeued buffer from output queue (timestamp: %" PRId64
908           ", bufferId: %zu, data size: %zu, EOS: %d)",
909           timestamp.InMicroseconds(), buffer->bufferId(), encodedDataSize, buffer->isLast());
910 
911     if (!mOutputBuffers[buffer->bufferId()]) {
912         ALOGE("Failed to find output block associated with output buffer");
913         onError();
914         return false;
915     }
916 
917     std::unique_ptr<BitstreamBuffer> bitstreamBuffer =
918             std::move(mOutputBuffers[buffer->bufferId()]);
919     if (encodedDataSize > 0) {
920         if (!mInjectParamsBeforeIDR) {
921             // No need to inject SPS or PPS before IDR frames, we can just return the buffer as-is.
922             mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
923                                     buffer->isKeyframe(), std::move(bitstreamBuffer));
924         } else if (!buffer->isKeyframe()) {
925             // We need to inject SPS and PPS before IDR frames, but this frame is not a key frame.
926             // We can return the buffer as-is, but need to update our SPS and PPS cache if required.
927             C2ConstLinearBlock constBlock = bitstreamBuffer->dmabuf->share(
928                     bitstreamBuffer->dmabuf->offset(), encodedDataSize, C2Fence());
929             C2ReadView readView = constBlock.map().get();
930             extractSPSPPS(readView.data(), encodedDataSize, &mCachedSPS, &mCachedPPS);
931             mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
932                                     buffer->isKeyframe(), std::move(bitstreamBuffer));
933         } else {
934             // We need to inject our cached SPS and PPS NAL units to the IDR frame. It's possible
935             // this frame already has SPS and PPS NAL units attached, in which case we only need to
936             // update our cached SPS and PPS.
937             C2ConstLinearBlock constBlock = bitstreamBuffer->dmabuf->share(
938                     bitstreamBuffer->dmabuf->offset(), encodedDataSize, C2Fence());
939             C2ReadView readView = constBlock.map().get();
940 
941             // Allocate a new buffer to copy the data with prepended SPS and PPS into.
942             std::unique_ptr<BitstreamBuffer> prependedBitstreamBuffer;
943             mFetchOutputBufferCb.Run(mOutputBufferSize, &prependedBitstreamBuffer);
944             if (!prependedBitstreamBuffer) {
945                 ALOGE("Failed to fetch output block");
946                 onError();
947                 return false;
948             }
949             C2WriteView writeView = prependedBitstreamBuffer->dmabuf->map().get();
950 
951             // If there is not enough space in the output buffer just return the original buffer.
952             size_t newSize = prependSPSPPSToIDR(readView.data(), encodedDataSize, writeView.data(),
953                                                 writeView.size(), &mCachedSPS, &mCachedPPS);
954             if (newSize > 0) {
955                 mOutputBufferDoneCb.Run(newSize, timestamp.InMicroseconds(), buffer->isKeyframe(),
956                                         std::move(prependedBitstreamBuffer));
957             } else {
958                 mOutputBufferDoneCb.Run(encodedDataSize, timestamp.InMicroseconds(),
959                                         buffer->isKeyframe(), std::move(bitstreamBuffer));
960             }
961         }
962     }
963 
964     // If the buffer is marked as last and we were flushing the encoder, flushing is now done.
965     if ((mState == State::DRAINING) && buffer->isLast()) {
966         onDrainDone(true);
967         // Start the encoder again.
968         struct v4l2_encoder_cmd cmd;
969         memset(&cmd, 0, sizeof(v4l2_encoder_cmd));
970         cmd.cmd = V4L2_ENC_CMD_START;
971         if (mDevice->ioctl(VIDIOC_ENCODER_CMD, &cmd) != 0) {
972             ALOGE("Failed to restart encoder after draining (V4L2_ENC_CMD_START)");
973             onError();
974             return false;
975         }
976     }
977 
978     // Queue a new output buffer to replace the one we dequeued.
979     buffer = nullptr;
980     enqueueOutputBuffer();
981 
982     return true;
983 }
984 
createInputBuffers()985 bool V4L2Encoder::createInputBuffers() {
986     ALOGV("%s()", __func__);
987     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
988     ALOG_ASSERT(!mInputQueue->isStreaming());
989     ALOG_ASSERT(mInputBuffers.empty());
990 
991     // No memory is allocated here, we just generate a list of buffers on the input queue, which
992     // will hold memory handles to the real buffers.
993     if (mInputQueue->allocateBuffers(kInputBufferCount, V4L2_MEMORY_DMABUF) < kInputBufferCount) {
994         ALOGE("Failed to create V4L2 input buffers.");
995         return false;
996     }
997 
998     mInputBuffers.resize(mInputQueue->allocatedBuffersCount());
999     return true;
1000 }
1001 
createOutputBuffers()1002 bool V4L2Encoder::createOutputBuffers() {
1003     ALOGV("%s()", __func__);
1004     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1005     ALOG_ASSERT(!mOutputQueue->isStreaming());
1006     ALOG_ASSERT(mOutputBuffers.empty());
1007 
1008     // No memory is allocated here, we just generate a list of buffers on the output queue, which
1009     // will hold memory handles to the real buffers.
1010     if (mOutputQueue->allocateBuffers(kOutputBufferCount, V4L2_MEMORY_DMABUF) <
1011         kOutputBufferCount) {
1012         ALOGE("Failed to create V4L2 output buffers.");
1013         return false;
1014     }
1015 
1016     mOutputBuffers.resize(mOutputQueue->allocatedBuffersCount());
1017     return true;
1018 }
1019 
destroyInputBuffers()1020 void V4L2Encoder::destroyInputBuffers() {
1021     ALOGV("%s()", __func__);
1022     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1023     ALOG_ASSERT(!mInputQueue->isStreaming());
1024 
1025     if (!mInputQueue || mInputQueue->allocatedBuffersCount() == 0) return;
1026     mInputQueue->deallocateBuffers();
1027     mInputBuffers.clear();
1028 }
1029 
destroyOutputBuffers()1030 void V4L2Encoder::destroyOutputBuffers() {
1031     ALOGV("%s()", __func__);
1032     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1033     ALOG_ASSERT(!mOutputQueue->isStreaming());
1034 
1035     if (!mOutputQueue || mOutputQueue->allocatedBuffersCount() == 0) return;
1036     mOutputQueue->deallocateBuffers();
1037     mOutputBuffers.clear();
1038 }
1039 
onError()1040 void V4L2Encoder::onError() {
1041     ALOGV("%s()", __func__);
1042     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1043 
1044     if (mState != State::ERROR) {
1045         setState(State::ERROR);
1046         mErrorCb.Run();
1047     }
1048 }
1049 
setState(State state)1050 void V4L2Encoder::setState(State state) {
1051     ALOG_ASSERT(mTaskRunner->RunsTasksInCurrentSequence());
1052 
1053     // Check whether the state change is valid.
1054     switch (state) {
1055     case State::UNINITIALIZED:
1056         break;
1057     case State::WAITING_FOR_INPUT_FRAME:
1058         ALOG_ASSERT(mState != State::ERROR);
1059         break;
1060     case State::WAITING_FOR_V4L2_BUFFER:
1061         ALOG_ASSERT(mState == State::ENCODING);
1062         break;
1063     case State::ENCODING:
1064         ALOG_ASSERT(mState == State::WAITING_FOR_INPUT_FRAME ||
1065                     mState == State::WAITING_FOR_V4L2_BUFFER || mState == State::DRAINING);
1066         break;
1067     case State::DRAINING:
1068         ALOG_ASSERT(mState == State::ENCODING || mState == State::WAITING_FOR_INPUT_FRAME);
1069         break;
1070     case State::ERROR:
1071         break;
1072     }
1073 
1074     ALOGV("Changed encoder state from %s to %s", stateToString(mState), stateToString(state));
1075     mState = state;
1076 }
1077 
stateToString(State state)1078 const char* V4L2Encoder::stateToString(State state) {
1079     switch (state) {
1080     case State::UNINITIALIZED:
1081         return "UNINITIALIZED";
1082     case State::WAITING_FOR_INPUT_FRAME:
1083         return "WAITING_FOR_INPUT_FRAME";
1084     case State::WAITING_FOR_V4L2_BUFFER:
1085         return "WAITING_FOR_V4L2_BUFFER";
1086     case State::ENCODING:
1087         return "ENCODING";
1088     case State::DRAINING:
1089         return "DRAINING";
1090     case State::ERROR:
1091         return "ERROR";
1092     }
1093 }
1094 
1095 }  // namespace android
1096