1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "FrameDecoder"
19 #define ATRACE_TAG ATRACE_TAG_VIDEO
20 #include "include/FrameDecoder.h"
21 #include <android_media_codec.h>
22 #include <binder/MemoryBase.h>
23 #include <binder/MemoryHeapBase.h>
24 #include <gui/Surface.h>
25 #include <inttypes.h>
26 #include <media/IMediaSource.h>
27 #include <media/MediaCodecBuffer.h>
28 #include <media/stagefright/CodecBase.h>
29 #include <media/stagefright/ColorConverter.h>
30 #include <media/stagefright/FrameCaptureProcessor.h>
31 #include <media/stagefright/MediaBuffer.h>
32 #include <media/stagefright/MediaCodec.h>
33 #include <media/stagefright/MediaCodecConstants.h>
34 #include <media/stagefright/MediaDefs.h>
35 #include <media/stagefright/MediaErrors.h>
36 #include <media/stagefright/Utils.h>
37 #include <media/stagefright/foundation/ADebug.h>
38 #include <media/stagefright/foundation/AMessage.h>
39 #include <media/stagefright/foundation/ColorUtils.h>
40 #include <media/stagefright/foundation/avc_utils.h>
41 #include <mediadrm/ICrypto.h>
42 #include <private/media/VideoFrame.h>
43 #include <utils/Log.h>
44 #include <utils/Trace.h>
45 #include "include/FrameCaptureLayer.h"
46 #include "include/HevcUtils.h"
47
48 #include <C2Buffer.h>
49 #include <Codec2BufferUtils.h>
50
51 namespace android {
52
53 static const int64_t kBufferTimeOutUs = 10000LL; // 10 msec
54 static const int64_t kAsyncBufferTimeOutUs = 2000000LL; // 2000 msec
55 static const size_t kRetryCount = 100; // must be >0
56 static const int64_t kDefaultSampleDurationUs = 33333LL; // 33ms
57 // For codec, 0 is the highest importance; higher the number lesser important.
58 // To make codec for thumbnail less important, give it a value more than 0.
59 static const int kThumbnailImportance = 1;
60
allocVideoFrame(const sp<MetaData> & trackMeta,int32_t width,int32_t height,int32_t tileWidth,int32_t tileHeight,int32_t dstBpp,uint32_t bitDepth,bool allocRotated,bool metaOnly)61 sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
62 int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
63 int32_t dstBpp, uint32_t bitDepth, bool allocRotated, bool metaOnly) {
64 int32_t rotationAngle;
65 if (!trackMeta->findInt32(kKeyRotation, &rotationAngle)) {
66 rotationAngle = 0; // By default, no rotation
67 }
68 uint32_t type;
69 const void *iccData;
70 size_t iccSize;
71 if (!trackMeta->findData(kKeyIccProfile, &type, &iccData, &iccSize)){
72 iccData = NULL;
73 iccSize = 0;
74 }
75
76 int32_t sarWidth, sarHeight;
77 int32_t displayWidth, displayHeight;
78 if (trackMeta->findInt32(kKeySARWidth, &sarWidth)
79 && trackMeta->findInt32(kKeySARHeight, &sarHeight)
80 && sarHeight != 0) {
81 int32_t multVal;
82 if (width < 0 || sarWidth < 0 ||
83 __builtin_mul_overflow(width, sarWidth, &multVal)) {
84 ALOGE("displayWidth overflow %dx%d", width, sarWidth);
85 return NULL;
86 }
87 displayWidth = (width * sarWidth) / sarHeight;
88 displayHeight = height;
89 } else if (trackMeta->findInt32(kKeyDisplayWidth, &displayWidth)
90 && trackMeta->findInt32(kKeyDisplayHeight, &displayHeight)
91 && displayWidth > 0 && displayHeight > 0
92 && width > 0 && height > 0) {
93 ALOGV("found display size %dx%d", displayWidth, displayHeight);
94 } else {
95 displayWidth = width;
96 displayHeight = height;
97 }
98 int32_t displayLeft = 0;
99 int32_t displayTop = 0;
100 int32_t displayRight;
101 int32_t displayBottom;
102 if (trackMeta->findRect(kKeyCropRect, &displayLeft, &displayTop, &displayRight,
103 &displayBottom)) {
104 if (displayLeft >= 0 && displayTop >= 0 && displayRight < width && displayBottom < height &&
105 displayLeft <= displayRight && displayTop <= displayBottom) {
106 displayWidth = displayRight - displayLeft + 1;
107 displayHeight = displayBottom - displayTop + 1;
108 } else {
109 // Crop rectangle is invalid, use the whole frame.
110 displayLeft = 0;
111 displayTop = 0;
112 }
113 }
114
115 if (allocRotated) {
116 if (rotationAngle == 90 || rotationAngle == 270) {
117 // swap width and height for 90 & 270 degrees rotation
118 std::swap(width, height);
119 std::swap(displayWidth, displayHeight);
120 std::swap(tileWidth, tileHeight);
121 }
122 // Rotation is already applied.
123 rotationAngle = 0;
124 }
125
126 if (!metaOnly) {
127 int32_t multVal;
128 if (width < 0 || height < 0 || dstBpp < 0 ||
129 __builtin_mul_overflow(dstBpp, width, &multVal) ||
130 __builtin_mul_overflow(multVal, height, &multVal)) {
131 ALOGE("Frame size overflow %dx%d bpp %d", width, height, dstBpp);
132 return NULL;
133 }
134 }
135
136 VideoFrame frame(width, height, displayWidth, displayHeight, displayLeft, displayTop, tileWidth,
137 tileHeight, rotationAngle, dstBpp, bitDepth, !metaOnly, iccSize);
138
139 size_t size = frame.getFlattenedSize();
140 sp<MemoryHeapBase> heap = new MemoryHeapBase(size, 0, "MetadataRetrieverClient");
141 if (heap == NULL) {
142 ALOGE("failed to create MemoryDealer");
143 return NULL;
144 }
145 sp<IMemory> frameMem = new MemoryBase(heap, 0, size);
146 if (frameMem == NULL || frameMem->unsecurePointer() == NULL) {
147 ALOGE("not enough memory for VideoFrame size=%zu", size);
148 return NULL;
149 }
150 VideoFrame* frameCopy = static_cast<VideoFrame*>(frameMem->unsecurePointer());
151 frameCopy->init(frame, iccData, iccSize);
152
153 return frameMem;
154 }
155
allocVideoFrame(const sp<MetaData> & trackMeta,int32_t width,int32_t height,int32_t tileWidth,int32_t tileHeight,int32_t dstBpp,uint8_t bitDepth,bool allocRotated=false)156 sp<IMemory> allocVideoFrame(const sp<MetaData>& trackMeta,
157 int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
158 int32_t dstBpp, uint8_t bitDepth, bool allocRotated = false) {
159 return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth,
160 allocRotated, false /*metaOnly*/);
161 }
162
allocMetaFrame(const sp<MetaData> & trackMeta,int32_t width,int32_t height,int32_t tileWidth,int32_t tileHeight,int32_t dstBpp,uint8_t bitDepth)163 sp<IMemory> allocMetaFrame(const sp<MetaData>& trackMeta,
164 int32_t width, int32_t height, int32_t tileWidth, int32_t tileHeight,
165 int32_t dstBpp, uint8_t bitDepth) {
166 return allocVideoFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth,
167 false /*allocRotated*/, true /*metaOnly*/);
168 }
169
isAvif(const sp<MetaData> & trackMeta)170 bool isAvif(const sp<MetaData> &trackMeta) {
171 const char *mime;
172 return trackMeta->findCString(kKeyMIMEType, &mime)
173 && (!strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AV1)
174 || !strcasecmp(mime, MEDIA_MIMETYPE_IMAGE_AVIF));
175 }
176
findThumbnailInfo(const sp<MetaData> & trackMeta,int32_t * width,int32_t * height,uint32_t * type=NULL,const void ** data=NULL,size_t * size=NULL)177 bool findThumbnailInfo(
178 const sp<MetaData> &trackMeta, int32_t *width, int32_t *height,
179 uint32_t *type = NULL, const void **data = NULL, size_t *size = NULL) {
180 uint32_t dummyType;
181 const void *dummyData;
182 size_t dummySize;
183 int codecConfigKey = isAvif(trackMeta) ? kKeyThumbnailAV1C : kKeyThumbnailHVCC;
184 return trackMeta->findInt32(kKeyThumbnailWidth, width)
185 && trackMeta->findInt32(kKeyThumbnailHeight, height)
186 && trackMeta->findData(codecConfigKey,
187 type ?: &dummyType, data ?: &dummyData, size ?: &dummySize);
188 }
189
findGridInfo(const sp<MetaData> & trackMeta,int32_t * tileWidth,int32_t * tileHeight,int32_t * gridRows,int32_t * gridCols)190 bool findGridInfo(const sp<MetaData> &trackMeta,
191 int32_t *tileWidth, int32_t *tileHeight, int32_t *gridRows, int32_t *gridCols) {
192 return trackMeta->findInt32(kKeyTileWidth, tileWidth) && (*tileWidth > 0)
193 && trackMeta->findInt32(kKeyTileHeight, tileHeight) && (*tileHeight > 0)
194 && trackMeta->findInt32(kKeyGridRows, gridRows) && (*gridRows > 0)
195 && trackMeta->findInt32(kKeyGridCols, gridCols) && (*gridCols > 0);
196 }
197
getDstColorFormat(android_pixel_format_t colorFormat,OMX_COLOR_FORMATTYPE * dstFormat,ui::PixelFormat * captureFormat,int32_t * dstBpp)198 bool getDstColorFormat(
199 android_pixel_format_t colorFormat,
200 OMX_COLOR_FORMATTYPE *dstFormat,
201 ui::PixelFormat *captureFormat,
202 int32_t *dstBpp) {
203 switch (colorFormat) {
204 case HAL_PIXEL_FORMAT_RGB_565:
205 {
206 *dstFormat = OMX_COLOR_Format16bitRGB565;
207 *captureFormat = ui::PixelFormat::RGB_565;
208 *dstBpp = 2;
209 return true;
210 }
211 case HAL_PIXEL_FORMAT_RGBA_8888:
212 {
213 *dstFormat = OMX_COLOR_Format32BitRGBA8888;
214 *captureFormat = ui::PixelFormat::RGBA_8888;
215 *dstBpp = 4;
216 return true;
217 }
218 case HAL_PIXEL_FORMAT_BGRA_8888:
219 {
220 *dstFormat = OMX_COLOR_Format32bitBGRA8888;
221 *captureFormat = ui::PixelFormat::BGRA_8888;
222 *dstBpp = 4;
223 return true;
224 }
225 case HAL_PIXEL_FORMAT_RGBA_1010102:
226 {
227 *dstFormat = (OMX_COLOR_FORMATTYPE)COLOR_Format32bitABGR2101010;
228 *captureFormat = ui::PixelFormat::RGBA_1010102;
229 *dstBpp = 4;
230 return true;
231 }
232 default:
233 {
234 ALOGE("Unsupported color format: %d", colorFormat);
235 break;
236 }
237 }
238 return false;
239 }
240
AsyncCodecHandler(const wp<FrameDecoder> & frameDecoder)241 AsyncCodecHandler::AsyncCodecHandler(const wp<FrameDecoder>& frameDecoder) {
242 mFrameDecoder = frameDecoder;
243 }
244
onMessageReceived(const sp<AMessage> & msg)245 void AsyncCodecHandler::onMessageReceived(const sp<AMessage>& msg) {
246 switch (msg->what()) {
247 case FrameDecoder::kWhatCallbackNotify:
248 int32_t callbackId;
249 if (!msg->findInt32("callbackID", &callbackId)) {
250 ALOGD("kWhatCallbackNotify: callbackID is expected.");
251 break;
252 }
253 switch (callbackId) {
254 case MediaCodec::CB_INPUT_AVAILABLE: {
255 int32_t index;
256 if (!msg->findInt32("index", &index)) {
257 ALOGD("CB_INPUT_AVAILABLE: index is expected.");
258 break;
259 }
260 ALOGD("CB_INPUT_AVAILABLE received, index is %d", index);
261 sp<FrameDecoder> frameDecoder = mFrameDecoder.promote();
262 if (frameDecoder != nullptr) {
263 frameDecoder->handleInputBufferAsync(index);
264 }
265 break;
266 }
267 case MediaCodec::CB_OUTPUT_AVAILABLE: {
268 int32_t index;
269 int64_t timeUs;
270 CHECK(msg->findInt32("index", &index));
271 CHECK(msg->findInt64("timeUs", &timeUs));
272 ALOGV("CB_OUTPUT_AVAILABLE received, index is %d", index);
273 sp<FrameDecoder> frameDecoder = mFrameDecoder.promote();
274 if (frameDecoder != nullptr) {
275 frameDecoder->handleOutputBufferAsync(index, timeUs);
276 }
277 break;
278 }
279 case MediaCodec::CB_OUTPUT_FORMAT_CHANGED: {
280 ALOGV("CB_OUTPUT_FORMAT_CHANGED received");
281 sp<AMessage> format;
282 if (!msg->findMessage("format", &format) || format == nullptr) {
283 ALOGD("CB_OUTPUT_FORMAT_CHANGED: format is expected.");
284 break;
285 }
286 sp<FrameDecoder> frameDecoder = mFrameDecoder.promote();
287 if (frameDecoder != nullptr) {
288 frameDecoder->handleOutputFormatChangeAsync(format);
289 }
290 break;
291 }
292 case MediaCodec::CB_ERROR: {
293 status_t err;
294 int32_t actionCode;
295 AString detail;
296 if (!msg->findInt32("err", &err)) {
297 ALOGD("CB_ERROR: err is expected.");
298 break;
299 }
300 if (!msg->findInt32("actionCode", &actionCode)) {
301 ALOGD("CB_ERROR: actionCode is expected.");
302 break;
303 }
304 msg->findString("detail", &detail);
305 ALOGI("Codec reported error(0x%x/%s), actionCode(%d), detail(%s)", err,
306 StrMediaError(err).c_str(), actionCode, detail.c_str());
307 break;
308 }
309 case MediaCodec::CB_REQUIRED_RESOURCES_CHANGED:
310 case MediaCodec::CB_METRICS_FLUSHED:
311 {
312 // Nothing to do. Informational. Safe to ignore.
313 break;
314 }
315
316 case MediaCodec::CB_LARGE_FRAME_OUTPUT_AVAILABLE:
317 // unexpected as we are not using large frames
318 case MediaCodec::CB_CRYPTO_ERROR:
319 // unexpected as we are not using crypto
320 default:
321 {
322 ALOGD("kWhatCallbackNotify: callbackID(%d) is unexpected.", callbackId);
323 break;
324 }
325 }
326 break;
327 default:
328 ALOGD("unexpected message received: %s", msg->debugString().c_str());
329 break;
330 }
331 }
332
enqueue(int32_t index)333 void InputBufferIndexQueue::enqueue(int32_t index) {
334 std::scoped_lock<std::mutex> lock(mMutex);
335 mQueue.push(index);
336 mCondition.notify_one();
337 }
338
dequeue(int32_t * index,int32_t timeOutUs)339 bool InputBufferIndexQueue::dequeue(int32_t* index, int32_t timeOutUs) {
340 std::unique_lock<std::mutex> lock(mMutex);
341 bool hasAvailableIndex = mCondition.wait_for(lock, std::chrono::microseconds(timeOutUs),
342 [this] { return !mQueue.empty(); });
343 if (hasAvailableIndex) {
344 *index = mQueue.front();
345 mQueue.pop();
346 return true;
347 } else {
348 return false;
349 }
350 }
351
352 //static
getMetadataOnly(const sp<MetaData> & trackMeta,int colorFormat,bool thumbnail,uint32_t bitDepth)353 sp<IMemory> FrameDecoder::getMetadataOnly(
354 const sp<MetaData> &trackMeta, int colorFormat, bool thumbnail, uint32_t bitDepth) {
355 OMX_COLOR_FORMATTYPE dstFormat;
356 ui::PixelFormat captureFormat;
357 int32_t dstBpp;
358 if (!getDstColorFormat((android_pixel_format_t)colorFormat,
359 &dstFormat, &captureFormat, &dstBpp)) {
360 return NULL;
361 }
362
363 int32_t width, height, tileWidth = 0, tileHeight = 0;
364 if (thumbnail) {
365 if (!findThumbnailInfo(trackMeta, &width, &height)) {
366 return NULL;
367 }
368 } else {
369 CHECK(trackMeta->findInt32(kKeyWidth, &width));
370 CHECK(trackMeta->findInt32(kKeyHeight, &height));
371
372 int32_t gridRows, gridCols;
373 if (!findGridInfo(trackMeta, &tileWidth, &tileHeight, &gridRows, &gridCols)) {
374 tileWidth = tileHeight = 0;
375 }
376 }
377
378 sp<IMemory> metaMem =
379 allocMetaFrame(trackMeta, width, height, tileWidth, tileHeight, dstBpp, bitDepth);
380 if (metaMem == nullptr) {
381 return NULL;
382 }
383
384 // try to fill sequence meta's duration based on average frame rate,
385 // default to 33ms if frame rate is unavailable.
386 int32_t frameRate;
387 VideoFrame* meta = static_cast<VideoFrame*>(metaMem->unsecurePointer());
388 if (trackMeta->findInt32(kKeyFrameRate, &frameRate) && frameRate > 0) {
389 meta->mDurationUs = 1000000LL / frameRate;
390 } else {
391 meta->mDurationUs = kDefaultSampleDurationUs;
392 }
393 return metaMem;
394 }
395
FrameDecoder(const AString & componentName,const sp<MetaData> & trackMeta,const sp<IMediaSource> & source)396 FrameDecoder::FrameDecoder(
397 const AString &componentName,
398 const sp<MetaData> &trackMeta,
399 const sp<IMediaSource> &source)
400 : mComponentName(componentName),
401 mUseBlockModel(false),
402 mTrackMeta(trackMeta),
403 mSource(source),
404 mDstFormat(OMX_COLOR_Format16bitRGB565),
405 mDstBpp(2),
406 mHaveMoreInputs(true),
407 mFirstSample(true),
408 mSourceStopped(false) {
409 }
410
~FrameDecoder()411 FrameDecoder::~FrameDecoder() {
412 if (mHandler != NULL) {
413 mAsyncLooper->stop();
414 mAsyncLooper->unregisterHandler(mHandler->id());
415 }
416 if (mDecoder != NULL) {
417 mDecoder->release();
418 if (!mSourceStopped) {
419 mSource->stop();
420 }
421 }
422 }
423
isHDR(const sp<AMessage> & format)424 bool isHDR(const sp<AMessage> &format) {
425 uint32_t standard, transfer;
426 if (!format->findInt32("color-standard", (int32_t*)&standard)) {
427 standard = 0;
428 }
429 if (!format->findInt32("color-transfer", (int32_t*)&transfer)) {
430 transfer = 0;
431 }
432 return standard == ColorUtils::kColorStandardBT2020 &&
433 (transfer == ColorUtils::kColorTransferST2084 ||
434 transfer == ColorUtils::kColorTransferHLG);
435 }
436
init(int64_t frameTimeUs,int option,int colorFormat)437 status_t FrameDecoder::init(
438 int64_t frameTimeUs, int option, int colorFormat) {
439 if (!getDstColorFormat((android_pixel_format_t)colorFormat,
440 &mDstFormat, &mCaptureFormat, &mDstBpp)) {
441 return ERROR_UNSUPPORTED;
442 }
443
444 sp<AMessage> videoFormat = onGetFormatAndSeekOptions(
445 frameTimeUs, option, &mReadOptions, &mSurface);
446 if (videoFormat == NULL) {
447 ALOGE("video format or seek mode not supported");
448 return ERROR_UNSUPPORTED;
449 }
450
451 status_t err;
452 sp<ALooper> looper = new ALooper;
453 looper->start();
454 sp<MediaCodec> decoder = MediaCodec::CreateByComponentName(
455 looper, mComponentName, &err);
456 if (decoder.get() == NULL || err != OK) {
457 ALOGW("Failed to instantiate decoder [%s]", mComponentName.c_str());
458 return (decoder.get() == NULL) ? NO_MEMORY : err;
459 }
460
461 if (mUseBlockModel) {
462 mAsyncLooper = new ALooper;
463 mAsyncLooper->start();
464 mHandler = new AsyncCodecHandler(wp<FrameDecoder>(this));
465 mAsyncLooper->registerHandler(mHandler);
466 sp<AMessage> callbackMsg = new AMessage(kWhatCallbackNotify, mHandler);
467 decoder->setCallback(callbackMsg);
468 }
469
470 err = decoder->configure(
471 videoFormat, mSurface, NULL /* crypto */,
472 mUseBlockModel ? MediaCodec::CONFIGURE_FLAG_USE_BLOCK_MODEL : 0 /* flags */);
473 if (err != OK) {
474 ALOGW("configure returned error %d (%s)", err, asString(err));
475 decoder->release();
476 return err;
477 }
478
479 err = decoder->start();
480 if (err != OK) {
481 ALOGW("start returned error %d (%s)", err, asString(err));
482 decoder->release();
483 return err;
484 }
485
486 err = mSource->start();
487 if (err != OK) {
488 ALOGW("source failed to start: %d (%s)", err, asString(err));
489 decoder->release();
490 return err;
491 }
492 mDecoder = decoder;
493
494 return OK;
495 }
496
extractFrame(FrameRect * rect)497 sp<IMemory> FrameDecoder::extractFrame(FrameRect *rect) {
498 ScopedTrace trace(ATRACE_TAG, "FrameDecoder::ExtractFrame");
499 status_t err = onExtractRect(rect);
500 if (err != OK) {
501 ALOGE("onExtractRect error %d", err);
502 return NULL;
503 }
504
505 if (!mUseBlockModel) {
506 err = extractInternal();
507 } else {
508 err = extractInternalUsingBlockModel();
509 }
510 if (err != OK) {
511 ALOGE("extractInternal error %d", err);
512 return NULL;
513 }
514
515 return mFrameMemory;
516 }
517
extractInternal()518 status_t FrameDecoder::extractInternal() {
519 status_t err = OK;
520 bool done = false;
521 size_t retriesLeft = kRetryCount;
522 if (!mDecoder) {
523 ALOGE("decoder is not initialized");
524 return NO_INIT;
525 }
526
527 do {
528 size_t index;
529 int64_t ptsUs = 0LL;
530 uint32_t flags = 0;
531
532 // Queue as many inputs as we possibly can, then block on dequeuing
533 // outputs. After getting each output, come back and queue the inputs
534 // again to keep the decoder busy.
535 while (mHaveMoreInputs) {
536 err = mDecoder->dequeueInputBuffer(&index, 0);
537 if (err != OK) {
538 ALOGV("Timed out waiting for input");
539 if (retriesLeft) {
540 err = OK;
541 }
542 break;
543 }
544 sp<MediaCodecBuffer> codecBuffer;
545 err = mDecoder->getInputBuffer(index, &codecBuffer);
546 if (err != OK) {
547 ALOGE("failed to get input buffer %zu", index);
548 break;
549 }
550
551 MediaBufferBase *mediaBuffer = NULL;
552
553 err = mSource->read(&mediaBuffer, &mReadOptions);
554 mReadOptions.clearSeekTo();
555 if (err != OK) {
556 mHaveMoreInputs = false;
557 if (!mFirstSample && err == ERROR_END_OF_STREAM) {
558 (void)mDecoder->queueInputBuffer(
559 index, 0, 0, 0, MediaCodec::BUFFER_FLAG_EOS);
560 err = OK;
561 } else {
562 ALOGW("Input Error: err=%d", err);
563 }
564 break;
565 }
566
567 if (mediaBuffer->range_length() > codecBuffer->capacity()) {
568 ALOGE("buffer size (%zu) too large for codec input size (%zu)",
569 mediaBuffer->range_length(), codecBuffer->capacity());
570 mHaveMoreInputs = false;
571 err = BAD_VALUE;
572 } else {
573 codecBuffer->setRange(0, mediaBuffer->range_length());
574
575 CHECK(mediaBuffer->meta_data().findInt64(kKeyTime, &ptsUs));
576 memcpy(codecBuffer->data(),
577 (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
578 mediaBuffer->range_length());
579
580 onInputReceived(codecBuffer->data(), codecBuffer->size(), mediaBuffer->meta_data(),
581 mFirstSample, &flags);
582 mFirstSample = false;
583 }
584
585 mediaBuffer->release();
586
587 if (mHaveMoreInputs) {
588 ALOGV("QueueInput: size=%zu ts=%" PRId64 " us flags=%x",
589 codecBuffer->size(), ptsUs, flags);
590
591 err = mDecoder->queueInputBuffer(
592 index,
593 codecBuffer->offset(),
594 codecBuffer->size(),
595 ptsUs,
596 flags);
597
598 if (flags & MediaCodec::BUFFER_FLAG_EOS) {
599 mHaveMoreInputs = false;
600 }
601 }
602 }
603
604 while (err == OK) {
605 size_t offset, size;
606 // wait for a decoded buffer
607 err = mDecoder->dequeueOutputBuffer(
608 &index,
609 &offset,
610 &size,
611 &ptsUs,
612 &flags,
613 kBufferTimeOutUs);
614
615 if (err == INFO_FORMAT_CHANGED) {
616 ALOGV("Received format change");
617 err = mDecoder->getOutputFormat(&mOutputFormat);
618 } else if (err == INFO_OUTPUT_BUFFERS_CHANGED) {
619 ALOGV("Output buffers changed");
620 err = OK;
621 } else {
622 if (err == -EAGAIN /* INFO_TRY_AGAIN_LATER */ && --retriesLeft > 0) {
623 ALOGV("Timed-out waiting for output.. retries left = %zu", retriesLeft);
624 err = OK;
625 } else if (err == OK) {
626 // If we're seeking with CLOSEST option and obtained a valid targetTimeUs
627 // from the extractor, decode to the specified frame. Otherwise we're done.
628 ALOGV("Received an output buffer, timeUs=%lld", (long long)ptsUs);
629 sp<MediaCodecBuffer> videoFrameBuffer;
630 err = mDecoder->getOutputBuffer(index, &videoFrameBuffer);
631 if (err != OK) {
632 ALOGE("failed to get output buffer %zu", index);
633 break;
634 }
635 uint8_t* frameData = videoFrameBuffer->data();
636 sp<ABuffer> imageData;
637 videoFrameBuffer->meta()->findBuffer("image-data", &imageData);
638 if (mSurface != nullptr) {
639 mDecoder->renderOutputBufferAndRelease(index);
640 err = onOutputReceived(frameData, imageData, mOutputFormat, ptsUs, &done);
641 } else {
642 err = onOutputReceived(frameData, imageData, mOutputFormat, ptsUs, &done);
643 mDecoder->releaseOutputBuffer(index);
644 }
645 } else {
646 ALOGW("Received error %d (%s) instead of output", err, asString(err));
647 done = true;
648 }
649 break;
650 }
651 }
652 } while (err == OK && !done);
653
654 if (err != OK) {
655 ALOGE("failed to get video frame (err %d)", err);
656 }
657
658 return err;
659 }
660
extractInternalUsingBlockModel()661 status_t FrameDecoder::extractInternalUsingBlockModel() {
662 status_t err = OK;
663 MediaBufferBase* mediaBuffer = NULL;
664 int64_t ptsUs = 0LL;
665 uint32_t flags = 0;
666 int32_t index;
667 mHandleOutputBufferAsyncDone = false;
668
669 err = mSource->read(&mediaBuffer, &mReadOptions);
670 mReadOptions.clearSeekTo();
671 if (err != OK) {
672 ALOGW("Input Error: err=%d", err);
673 if (mediaBuffer) {
674 mediaBuffer->release();
675 }
676 return err;
677 }
678
679 size_t inputSize = mediaBuffer->range_length();
680 std::shared_ptr<C2LinearBlock> block =
681 MediaCodec::FetchLinearBlock(inputSize, {std::string{mComponentName.c_str()}});
682 C2WriteView view{block->map().get()};
683 if (view.error() != C2_OK) {
684 ALOGE("Fatal error: failed to allocate and map a block");
685 mediaBuffer->release();
686 return NO_MEMORY;
687 }
688 if (inputSize > view.capacity()) {
689 ALOGE("Fatal error: allocated block is too small "
690 "(input size %zu; block cap %u)",
691 inputSize, view.capacity());
692 mediaBuffer->release();
693 return BAD_VALUE;
694 }
695 CHECK(mediaBuffer->meta_data().findInt64(kKeyTime, &ptsUs));
696 memcpy(view.base(), (const uint8_t*)mediaBuffer->data() + mediaBuffer->range_offset(),
697 inputSize);
698 std::shared_ptr<C2Buffer> c2Buffer =
699 C2Buffer::CreateLinearBuffer(block->share(0, inputSize, C2Fence{}));
700 onInputReceived(view.base(), inputSize, mediaBuffer->meta_data(), true /* firstSample */,
701 &flags);
702 flags |= MediaCodec::BUFFER_FLAG_EOS;
703 mediaBuffer->release();
704
705 std::vector<AccessUnitInfo> infoVec;
706 infoVec.emplace_back(flags, inputSize, ptsUs);
707 sp<BufferInfosWrapper> infos = new BufferInfosWrapper{std::move(infoVec)};
708
709 if (!mInputBufferIndexQueue.dequeue(&index, kAsyncBufferTimeOutUs)) {
710 ALOGE("No available input buffer index for async mode.");
711 return TIMED_OUT;
712 }
713
714 AString errorDetailMsg;
715 ALOGD("QueueLinearBlock: index=%d size=%zu ts=%" PRId64 " us flags=%x",
716 index, inputSize, ptsUs,flags);
717 err = mDecoder->queueBuffer(index, c2Buffer, infos, nullptr, &errorDetailMsg);
718 if (err != OK) {
719 ALOGE("failed to queueBuffer (err %d): %s", err, errorDetailMsg.c_str());
720 return err;
721 }
722
723 // wait for handleOutputBufferAsync() to finish
724 std::unique_lock _lk(mMutex);
725 if (!mOutputFramePending.wait_for(_lk, std::chrono::microseconds(kAsyncBufferTimeOutUs),
726 [this] { return mHandleOutputBufferAsyncDone; })) {
727 ALOGE("%s timed out waiting for handleOutputBufferAsync() to complete.", __func__);
728 mSource->stop();
729 mSourceStopped = true;
730 }
731 return mHandleOutputBufferAsyncDone ? OK : TIMED_OUT;
732 }
733
734 //////////////////////////////////////////////////////////////////////
735
VideoFrameDecoder(const AString & componentName,const sp<MetaData> & trackMeta,const sp<IMediaSource> & source)736 VideoFrameDecoder::VideoFrameDecoder(
737 const AString &componentName,
738 const sp<MetaData> &trackMeta,
739 const sp<IMediaSource> &source)
740 : FrameDecoder(componentName, trackMeta, source),
741 mFrame(NULL),
742 mIsAvc(false),
743 mIsHevc(false),
744 mSeekMode(MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC),
745 mTargetTimeUs(-1LL),
746 mDefaultSampleDurationUs(0) {
747 }
748
handleOutputFormatChangeAsync(sp<AMessage> format)749 status_t FrameDecoder::handleOutputFormatChangeAsync(sp<AMessage> format) {
750 // Here format is MediaCodec's internal copy of output format.
751 // Make a copy since the client might modify it.
752 mOutputFormat = format->dup();
753 ALOGD("receive output format in async mode: %s", mOutputFormat->debugString().c_str());
754 return OK;
755 }
756
handleInputBufferAsync(int32_t index)757 status_t FrameDecoder::handleInputBufferAsync(int32_t index) {
758 mInputBufferIndexQueue.enqueue(index);
759 return OK;
760 }
761
handleOutputBufferAsync(int32_t index,int64_t timeUs)762 status_t FrameDecoder::handleOutputBufferAsync(int32_t index, int64_t timeUs) {
763 if (mHandleOutputBufferAsyncDone) {
764 // we have already processed an output buffer, skip others
765 return OK;
766 }
767
768 status_t err = OK;
769 sp<MediaCodecBuffer> videoFrameBuffer;
770 err = mDecoder->getOutputBuffer(index, &videoFrameBuffer);
771 if (err != OK || videoFrameBuffer == nullptr) {
772 ALOGE("failed to get output buffer %d", index);
773 return err;
774 }
775
776 bool onOutputReceivedDone = false;
777 if (mSurface != nullptr) {
778 mDecoder->renderOutputBufferAndRelease(index);
779 // frameData and imgObj will be fetched by captureSurface() inside onOutputReceived()
780 // explicitly pass null here
781 err = onOutputReceived(nullptr, nullptr, mOutputFormat, timeUs, &onOutputReceivedDone);
782 } else {
783 // get stride and frame data for block model buffer
784 std::shared_ptr<C2Buffer> c2buffer = videoFrameBuffer->asC2Buffer();
785 if (!c2buffer
786 || c2buffer->data().type() != C2BufferData::GRAPHIC
787 || c2buffer->data().graphicBlocks().size() == 0u) {
788 ALOGE("C2Buffer precond fail");
789 return ERROR_MALFORMED;
790 }
791
792 std::unique_ptr<const C2GraphicView> view(std::make_unique<const C2GraphicView>(
793 c2buffer->data().graphicBlocks()[0].map().get()));
794 GraphicView2MediaImageConverter converter(*view, mOutputFormat, false /* copy */);
795 if (converter.initCheck() != OK) {
796 ALOGE("Converter init failed: %d", converter.initCheck());
797 return NO_INIT;
798 }
799
800 uint8_t* frameData = converter.wrap()->data();
801 sp<ABuffer> imageData = converter.imageData();
802 if (imageData != nullptr) {
803 mOutputFormat->setBuffer("image-data", imageData);
804 MediaImage2 *img = (MediaImage2*) imageData->data();
805 if (img->mNumPlanes > 0 && img->mType != img->MEDIA_IMAGE_TYPE_UNKNOWN) {
806 int32_t stride = img->mPlane[0].mRowInc;
807 mOutputFormat->setInt32(KEY_STRIDE, stride);
808 ALOGD("updating stride = %d", stride);
809 }
810 }
811
812 err = onOutputReceived(frameData, imageData, mOutputFormat, timeUs, &onOutputReceivedDone);
813 mDecoder->releaseOutputBuffer(index);
814 }
815
816 if (err == OK && onOutputReceivedDone) {
817 std::lock_guard _lm(mMutex);
818 mHandleOutputBufferAsyncDone = true;
819 mOutputFramePending.notify_one();
820 }
821 return err;
822 }
823
onGetFormatAndSeekOptions(int64_t frameTimeUs,int seekMode,MediaSource::ReadOptions * options,sp<Surface> * window)824 sp<AMessage> VideoFrameDecoder::onGetFormatAndSeekOptions(
825 int64_t frameTimeUs, int seekMode,
826 MediaSource::ReadOptions *options,
827 sp<Surface> *window) {
828 mSeekMode = static_cast<MediaSource::ReadOptions::SeekMode>(seekMode);
829 if (mSeekMode < MediaSource::ReadOptions::SEEK_PREVIOUS_SYNC ||
830 mSeekMode > MediaSource::ReadOptions::SEEK_FRAME_INDEX) {
831 ALOGE("Unknown seek mode: %d", mSeekMode);
832 return NULL;
833 }
834
835 const char *mime;
836 if (!trackMeta()->findCString(kKeyMIMEType, &mime)) {
837 ALOGE("Could not find mime type");
838 return NULL;
839 }
840
841 mIsAvc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_AVC);
842 mIsHevc = !strcasecmp(mime, MEDIA_MIMETYPE_VIDEO_HEVC);
843
844 if (frameTimeUs < 0) {
845 int64_t thumbNailTime = -1ll;
846 if (!trackMeta()->findInt64(kKeyThumbnailTime, &thumbNailTime)
847 || thumbNailTime < 0) {
848 thumbNailTime = 0;
849 }
850 options->setSeekTo(thumbNailTime, mSeekMode);
851 } else {
852 options->setSeekTo(frameTimeUs, mSeekMode);
853 }
854
855 sp<AMessage> videoFormat;
856 if (convertMetaDataToMessage(trackMeta(), &videoFormat) != OK) {
857 ALOGE("b/23680780");
858 ALOGW("Failed to convert meta data to message");
859 return NULL;
860 }
861
862 if (dstFormat() == COLOR_Format32bitABGR2101010) {
863 videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
864 } else {
865 videoFormat->setInt32("color-format", COLOR_FormatYUV420Flexible);
866 }
867
868 // For the thumbnail extraction case, try to allocate single buffer in both
869 // input and output ports, if seeking to a sync frame. NOTE: This request may
870 // fail if component requires more than that for decoding.
871 bool isSeekingClosest = (mSeekMode == MediaSource::ReadOptions::SEEK_CLOSEST)
872 || (mSeekMode == MediaSource::ReadOptions::SEEK_FRAME_INDEX);
873 if (!isSeekingClosest) {
874 if (mComponentName.startsWithIgnoreCase("c2.")) {
875 mUseBlockModel = android::media::codec::provider_->thumbnail_block_model();
876 } else {
877 // OMX Codec
878 videoFormat->setInt32("android._num-input-buffers", 1);
879 videoFormat->setInt32("android._num-output-buffers", 1);
880 }
881 }
882
883 if (isHDR(videoFormat)) {
884 *window = initSurface();
885 if (*window == NULL) {
886 ALOGE("Failed to init surface control for HDR, fallback to non-hdr");
887 } else {
888 videoFormat->setInt32("color-format", OMX_COLOR_FormatAndroidOpaque);
889 }
890 }
891
892 // Set the importance for thumbnail.
893 videoFormat->setInt32(KEY_IMPORTANCE, kThumbnailImportance);
894
895 int32_t frameRate;
896 if (trackMeta()->findInt32(kKeyFrameRate, &frameRate) && frameRate > 0) {
897 mDefaultSampleDurationUs = 1000000LL / frameRate;
898 } else {
899 mDefaultSampleDurationUs = kDefaultSampleDurationUs;
900 }
901
902 return videoFormat;
903 }
904
onInputReceived(uint8_t * data,size_t size,MetaDataBase & sampleMeta,bool firstSample,uint32_t * flags)905 status_t VideoFrameDecoder::onInputReceived(uint8_t* data, size_t size, MetaDataBase& sampleMeta,
906 bool firstSample, uint32_t* flags) {
907 bool isSeekingClosest = (mSeekMode == MediaSource::ReadOptions::SEEK_CLOSEST)
908 || (mSeekMode == MediaSource::ReadOptions::SEEK_FRAME_INDEX);
909
910 if (firstSample && isSeekingClosest) {
911 sampleMeta.findInt64(kKeyTargetTime, &mTargetTimeUs);
912 ALOGV("Seeking closest: targetTimeUs=%lld", (long long)mTargetTimeUs);
913 }
914
915 if (!isSeekingClosest && ((mIsAvc && IsIDR(data, size)) || (mIsHevc && IsIDR(data, size)))) {
916 // Only need to decode one IDR frame, unless we're seeking with CLOSEST
917 // option, in which case we need to actually decode to targetTimeUs.
918 *flags |= MediaCodec::BUFFER_FLAG_EOS;
919 }
920 int64_t durationUs;
921 if (sampleMeta.findInt64(kKeyDuration, &durationUs)) {
922 mSampleDurations.push_back(durationUs);
923 } else {
924 mSampleDurations.push_back(mDefaultSampleDurationUs);
925 }
926 return OK;
927 }
928
onOutputReceived(uint8_t * frameData,sp<ABuffer> imgObj,const sp<AMessage> & outputFormat,int64_t timeUs,bool * done)929 status_t VideoFrameDecoder::onOutputReceived(
930 uint8_t* frameData,
931 sp<ABuffer> imgObj,
932 const sp<AMessage> &outputFormat,
933 int64_t timeUs, bool *done) {
934 int64_t durationUs = mDefaultSampleDurationUs;
935 if (!mSampleDurations.empty()) {
936 durationUs = *mSampleDurations.begin();
937 mSampleDurations.erase(mSampleDurations.begin());
938 }
939 bool shouldOutput = (mTargetTimeUs < 0LL) || (timeUs >= mTargetTimeUs);
940
941 // If this is not the target frame, skip color convert.
942 if (!shouldOutput) {
943 *done = false;
944 return OK;
945 }
946
947 *done = true;
948
949 if (outputFormat == NULL) {
950 return ERROR_MALFORMED;
951 }
952
953 int32_t width, height, stride, srcFormat;
954 if (!outputFormat->findInt32("width", &width) ||
955 !outputFormat->findInt32("height", &height) ||
956 !outputFormat->findInt32("color-format", &srcFormat)) {
957 ALOGE("format missing dimension or color: %s",
958 outputFormat->debugString().c_str());
959 return ERROR_MALFORMED;
960 }
961
962 if (!outputFormat->findInt32("stride", &stride)) {
963 if (mCaptureLayer == NULL) {
964 ALOGE("format must have stride for byte buffer mode: %s",
965 outputFormat->debugString().c_str());
966 return ERROR_MALFORMED;
967 }
968 // for surface output, set stride to width, we don't actually need it.
969 stride = width;
970 }
971
972 int32_t crop_left, crop_top, crop_right, crop_bottom;
973 if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
974 crop_left = crop_top = 0;
975 crop_right = width - 1;
976 crop_bottom = height - 1;
977 }
978
979 int32_t slice_height;
980 if (outputFormat->findInt32("slice-height", &slice_height) && slice_height > 0) {
981 height = slice_height;
982 }
983
984 uint32_t bitDepth = 8;
985 if (COLOR_FormatYUVP010 == srcFormat) {
986 bitDepth = 10;
987 }
988
989 if (mFrame == NULL) {
990 sp<IMemory> frameMem = allocVideoFrame(
991 trackMeta(),
992 (crop_right - crop_left + 1),
993 (crop_bottom - crop_top + 1),
994 0,
995 0,
996 dstBpp(),
997 bitDepth,
998 mCaptureLayer != nullptr /*allocRotated*/);
999 if (frameMem == nullptr) {
1000 return NO_MEMORY;
1001 }
1002
1003 mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
1004 setFrame(frameMem);
1005 }
1006
1007 mFrame->mDurationUs = durationUs;
1008
1009 if (mCaptureLayer != nullptr) {
1010 return captureSurface();
1011 }
1012 ColorConverter colorConverter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
1013
1014 uint32_t standard, range, transfer;
1015 if (!outputFormat->findInt32("color-standard", (int32_t*)&standard)) {
1016 standard = 0;
1017 }
1018 if (!outputFormat->findInt32("color-range", (int32_t*)&range)) {
1019 range = 0;
1020 }
1021 if (!outputFormat->findInt32("color-transfer", (int32_t*)&transfer)) {
1022 transfer = 0;
1023 }
1024
1025 if (imgObj != nullptr) {
1026 MediaImage2 *imageData = nullptr;
1027 imageData = (MediaImage2 *)(imgObj.get()->data());
1028 if (imageData != nullptr) {
1029 colorConverter.setSrcMediaImage2(*imageData);
1030 }
1031 }
1032 if (srcFormat == COLOR_FormatYUV420Flexible && imgObj.get() == nullptr) {
1033 return ERROR_UNSUPPORTED;
1034 }
1035 colorConverter.setSrcColorSpace(standard, range, transfer);
1036 if (colorConverter.isValid()) {
1037 ScopedTrace trace(ATRACE_TAG, "FrameDecoder::ColorConverter");
1038 if (frameData == nullptr) {
1039 ALOGD("frameData is null for ColorConverter");
1040 }
1041 colorConverter.convert(
1042 (const uint8_t *)frameData,
1043 width, height, stride,
1044 crop_left, crop_top, crop_right, crop_bottom,
1045 mFrame->getFlattenedData(),
1046 mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
1047 // since the frame is allocated with top-left adjusted,
1048 // the dst rect should start at {0,0} as well.
1049 0, 0, mFrame->mWidth - 1, mFrame->mHeight - 1);
1050 return OK;
1051 }
1052
1053 ALOGE("Unable to convert from format 0x%08x to 0x%08x",
1054 srcFormat, dstFormat());
1055 return ERROR_UNSUPPORTED;
1056 }
1057
initSurface()1058 sp<Surface> VideoFrameDecoder::initSurface() {
1059 // create the consumer listener interface, and hold sp so that this
1060 // interface lives as long as the GraphicBufferSource.
1061 sp<FrameCaptureLayer> captureLayer = new FrameCaptureLayer();
1062 if (captureLayer->init() != OK) {
1063 ALOGE("failed to init capture layer");
1064 return nullptr;
1065 }
1066 mCaptureLayer = captureLayer;
1067
1068 return captureLayer->getSurface();
1069 }
1070
captureSurface()1071 status_t VideoFrameDecoder::captureSurface() {
1072 sp<GraphicBuffer> outBuffer;
1073 status_t err = mCaptureLayer->capture(
1074 captureFormat(), Rect(0, 0, mFrame->mWidth, mFrame->mHeight), &outBuffer);
1075
1076 if (err != OK) {
1077 ALOGE("failed to capture layer (err %d)", err);
1078 return err;
1079 }
1080
1081 ALOGV("capture: %dx%d, format %d, stride %d",
1082 outBuffer->getWidth(),
1083 outBuffer->getHeight(),
1084 outBuffer->getPixelFormat(),
1085 outBuffer->getStride());
1086
1087 uint8_t *base;
1088 int32_t outBytesPerPixel, outBytesPerStride;
1089 err = outBuffer->lock(
1090 GraphicBuffer::USAGE_SW_READ_OFTEN,
1091 reinterpret_cast<void**>(&base),
1092 &outBytesPerPixel,
1093 &outBytesPerStride);
1094 if (err != OK) {
1095 ALOGE("failed to lock graphic buffer: err %d", err);
1096 return err;
1097 }
1098
1099 uint8_t *dst = mFrame->getFlattenedData();
1100 for (size_t y = 0 ; y < fmin(mFrame->mHeight, outBuffer->getHeight()) ; y++) {
1101 memcpy(dst, base, fmin(mFrame->mWidth, outBuffer->getWidth()) * mFrame->mBytesPerPixel);
1102 dst += mFrame->mRowBytes;
1103 base += outBuffer->getStride() * mFrame->mBytesPerPixel;
1104 }
1105 outBuffer->unlock();
1106 return OK;
1107 }
1108
1109 ////////////////////////////////////////////////////////////////////////
1110
MediaImageDecoder(const AString & componentName,const sp<MetaData> & trackMeta,const sp<IMediaSource> & source)1111 MediaImageDecoder::MediaImageDecoder(
1112 const AString &componentName,
1113 const sp<MetaData> &trackMeta,
1114 const sp<IMediaSource> &source)
1115 : FrameDecoder(componentName, trackMeta, source),
1116 mFrame(NULL),
1117 mWidth(0),
1118 mHeight(0),
1119 mGridRows(1),
1120 mGridCols(1),
1121 mTileWidth(0),
1122 mTileHeight(0),
1123 mTilesDecoded(0),
1124 mTargetTiles(0) {
1125 }
1126
onGetFormatAndSeekOptions(int64_t frameTimeUs,int,MediaSource::ReadOptions * options,sp<Surface> *)1127 sp<AMessage> MediaImageDecoder::onGetFormatAndSeekOptions(
1128 int64_t frameTimeUs, int /*seekMode*/,
1129 MediaSource::ReadOptions *options, sp<Surface> * /*window*/) {
1130 sp<MetaData> overrideMeta;
1131 if (frameTimeUs < 0) {
1132 uint32_t type;
1133 const void *data;
1134 size_t size;
1135
1136 // if we have a stand-alone thumbnail, set up the override meta,
1137 // and set seekTo time to -1.
1138 if (!findThumbnailInfo(trackMeta(), &mWidth, &mHeight, &type, &data, &size)) {
1139 ALOGE("Thumbnail not available");
1140 return NULL;
1141 }
1142 overrideMeta = new MetaData(*(trackMeta()));
1143 overrideMeta->remove(kKeyDisplayWidth);
1144 overrideMeta->remove(kKeyDisplayHeight);
1145 overrideMeta->setInt32(kKeyWidth, mWidth);
1146 overrideMeta->setInt32(kKeyHeight, mHeight);
1147 // The AV1 codec configuration data is passed via CSD0 to the AV1
1148 // decoder.
1149 const int codecConfigKey = isAvif(trackMeta()) ? kKeyOpaqueCSD0 : kKeyHVCC;
1150 overrideMeta->setData(codecConfigKey, type, data, size);
1151 options->setSeekTo(-1);
1152 } else {
1153 CHECK(trackMeta()->findInt32(kKeyWidth, &mWidth));
1154 CHECK(trackMeta()->findInt32(kKeyHeight, &mHeight));
1155
1156 options->setSeekTo(frameTimeUs);
1157 }
1158
1159 mGridRows = mGridCols = 1;
1160 if (overrideMeta == NULL) {
1161 // check if we're dealing with a tiled heif
1162 int32_t tileWidth, tileHeight, gridRows, gridCols;
1163 int32_t widthColsProduct = 0;
1164 int32_t heightRowsProduct = 0;
1165 if (findGridInfo(trackMeta(), &tileWidth, &tileHeight, &gridRows, &gridCols)) {
1166 if (__builtin_mul_overflow(tileWidth, gridCols, &widthColsProduct) ||
1167 __builtin_mul_overflow(tileHeight, gridRows, &heightRowsProduct)) {
1168 ALOGE("Multiplication overflowed Grid size: %dx%d, Picture size: %dx%d",
1169 gridCols, gridRows, tileWidth, tileHeight);
1170 return nullptr;
1171 }
1172 if (mWidth <= widthColsProduct && mHeight <= heightRowsProduct) {
1173 ALOGV("grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
1174 gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
1175
1176 overrideMeta = new MetaData(*(trackMeta()));
1177 overrideMeta->setInt32(kKeyWidth, tileWidth);
1178 overrideMeta->setInt32(kKeyHeight, tileHeight);
1179 mTileWidth = tileWidth;
1180 mTileHeight = tileHeight;
1181 mGridCols = gridCols;
1182 mGridRows = gridRows;
1183 } else {
1184 ALOGW("ignore bad grid: %dx%d, tile size: %dx%d, picture size: %dx%d",
1185 gridCols, gridRows, tileWidth, tileHeight, mWidth, mHeight);
1186 }
1187 }
1188 if (overrideMeta == NULL) {
1189 overrideMeta = trackMeta();
1190 }
1191 }
1192 mTargetTiles = mGridCols * mGridRows;
1193
1194 sp<AMessage> videoFormat;
1195 if (convertMetaDataToMessage(overrideMeta, &videoFormat) != OK) {
1196 ALOGE("b/23680780");
1197 ALOGW("Failed to convert meta data to message");
1198 return NULL;
1199 }
1200
1201 if (dstFormat() == COLOR_Format32bitABGR2101010) {
1202 videoFormat->setInt32("color-format", COLOR_FormatYUVP010);
1203 } else {
1204 videoFormat->setInt32("color-format", COLOR_FormatYUV420Flexible);
1205 }
1206
1207 if ((mGridRows == 1) && (mGridCols == 1)) {
1208 videoFormat->setInt32("android._num-input-buffers", 1);
1209 videoFormat->setInt32("android._num-output-buffers", 1);
1210 }
1211
1212 /// Set the importance for thumbnail.
1213 videoFormat->setInt32(KEY_IMPORTANCE, kThumbnailImportance);
1214
1215 return videoFormat;
1216 }
1217
onExtractRect(FrameRect * rect)1218 status_t MediaImageDecoder::onExtractRect(FrameRect *rect) {
1219 // TODO:
1220 // This callback is for verifying whether we can decode the rect,
1221 // and if so, set up the internal variables for decoding.
1222 // Currently, rect decoding is restricted to sequentially decoding one
1223 // row of tiles at a time. We can't decode arbitrary rects, as the image
1224 // track doesn't yet support seeking by tiles. So all we do here is to
1225 // verify the rect against what we expect.
1226 // When seeking by tile is supported, this code should be updated to
1227 // set the seek parameters.
1228 if (rect == NULL) {
1229 if (mTilesDecoded > 0) {
1230 return ERROR_UNSUPPORTED;
1231 }
1232 mTargetTiles = mGridRows * mGridCols;
1233 return OK;
1234 }
1235
1236 if (mTileWidth <= 0 || mTileHeight <=0) {
1237 return ERROR_UNSUPPORTED;
1238 }
1239
1240 int32_t row = mTilesDecoded / mGridCols;
1241 int32_t expectedTop = row * mTileHeight;
1242 int32_t expectedBot = (row + 1) * mTileHeight;
1243 if (expectedBot > mHeight) {
1244 expectedBot = mHeight;
1245 }
1246 if (rect->left != 0 || rect->top != expectedTop
1247 || rect->right != mWidth || rect->bottom != expectedBot) {
1248 ALOGE("currently only support sequential decoding of slices");
1249 return ERROR_UNSUPPORTED;
1250 }
1251
1252 // advance one row
1253 mTargetTiles = mTilesDecoded + mGridCols;
1254 return OK;
1255 }
1256
onOutputReceived(uint8_t * frameData,sp<ABuffer> imgObj,const sp<AMessage> & outputFormat,int64_t,bool * done)1257 status_t MediaImageDecoder::onOutputReceived(
1258 uint8_t* frameData,
1259 sp<ABuffer> imgObj,
1260 const sp<AMessage> &outputFormat, int64_t /*timeUs*/, bool *done) {
1261 if (outputFormat == NULL) {
1262 return ERROR_MALFORMED;
1263 }
1264
1265 int32_t width, height, stride;
1266 if (outputFormat->findInt32("width", &width) == false) {
1267 ALOGE("MediaImageDecoder::onOutputReceived:width is missing in outputFormat");
1268 return ERROR_MALFORMED;
1269 }
1270 if (outputFormat->findInt32("height", &height) == false) {
1271 ALOGE("MediaImageDecoder::onOutputReceived:height is missing in outputFormat");
1272 return ERROR_MALFORMED;
1273 }
1274 if (outputFormat->findInt32("stride", &stride) == false) {
1275 ALOGE("MediaImageDecoder::onOutputReceived:stride is missing in outputFormat");
1276 return ERROR_MALFORMED;
1277 }
1278
1279 int32_t srcFormat;
1280 CHECK(outputFormat->findInt32("color-format", &srcFormat));
1281
1282 uint32_t bitDepth = 8;
1283 if (COLOR_FormatYUVP010 == srcFormat) {
1284 bitDepth = 10;
1285 }
1286
1287 if (mFrame == NULL) {
1288 sp<IMemory> frameMem = allocVideoFrame(
1289 trackMeta(), mWidth, mHeight, mTileWidth, mTileHeight, dstBpp(), bitDepth);
1290
1291 if (frameMem == nullptr) {
1292 return NO_MEMORY;
1293 }
1294
1295 mFrame = static_cast<VideoFrame*>(frameMem->unsecurePointer());
1296
1297 setFrame(frameMem);
1298 }
1299
1300 ColorConverter converter((OMX_COLOR_FORMATTYPE)srcFormat, dstFormat());
1301
1302 uint32_t standard, range, transfer;
1303 if (!outputFormat->findInt32("color-standard", (int32_t*)&standard)) {
1304 standard = 0;
1305 }
1306 if (!outputFormat->findInt32("color-range", (int32_t*)&range)) {
1307 range = 0;
1308 }
1309 if (!outputFormat->findInt32("color-transfer", (int32_t*)&transfer)) {
1310 transfer = 0;
1311 }
1312
1313 if (imgObj != nullptr) {
1314 MediaImage2 *imageData = nullptr;
1315 imageData = (MediaImage2 *)(imgObj.get()->data());
1316 if (imageData != nullptr) {
1317 converter.setSrcMediaImage2(*imageData);
1318 }
1319 }
1320 if (srcFormat == COLOR_FormatYUV420Flexible && imgObj.get() == nullptr) {
1321 return ERROR_UNSUPPORTED;
1322 }
1323 converter.setSrcColorSpace(standard, range, transfer);
1324
1325 int32_t crop_left, crop_top, crop_right, crop_bottom;
1326 if (!outputFormat->findRect("crop", &crop_left, &crop_top, &crop_right, &crop_bottom)) {
1327 crop_left = crop_top = 0;
1328 crop_right = width - 1;
1329 crop_bottom = height - 1;
1330 }
1331
1332 int32_t slice_height;
1333 if (outputFormat->findInt32("slice-height", &slice_height) && slice_height > 0) {
1334 height = slice_height;
1335 }
1336
1337 int32_t crop_width, crop_height;
1338 crop_width = crop_right - crop_left + 1;
1339 crop_height = crop_bottom - crop_top + 1;
1340
1341 int32_t dstLeft, dstTop, dstRight, dstBottom;
1342 dstLeft = mTilesDecoded % mGridCols * crop_width;
1343 dstTop = mTilesDecoded / mGridCols * crop_height;
1344 dstRight = dstLeft + crop_width - 1;
1345 dstBottom = dstTop + crop_height - 1;
1346
1347 // apply crop on bottom-right
1348 // TODO: need to move this into the color converter itself.
1349 if (dstRight >= mWidth) {
1350 crop_right = crop_left + mWidth - dstLeft - 1;
1351 dstRight = mWidth - 1;
1352 }
1353 if (dstBottom >= mHeight) {
1354 crop_bottom = crop_top + mHeight - dstTop - 1;
1355 dstBottom = mHeight - 1;
1356 }
1357
1358 *done = (++mTilesDecoded >= mTargetTiles);
1359
1360 if (converter.isValid()) {
1361 converter.convert(
1362 (const uint8_t *)frameData,
1363 width, height, stride,
1364 crop_left, crop_top, crop_right, crop_bottom,
1365 mFrame->getFlattenedData(),
1366 mFrame->mWidth, mFrame->mHeight, mFrame->mRowBytes,
1367 dstLeft, dstTop, dstRight, dstBottom);
1368 return OK;
1369 }
1370
1371 ALOGE("Unable to convert from format 0x%08x to 0x%08x",
1372 srcFormat, dstFormat());
1373 return ERROR_UNSUPPORTED;
1374 }
1375
1376 } // namespace android
1377