• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018, The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "Codec2Buffer"
19 #define ATRACE_TAG  ATRACE_TAG_VIDEO
20 #include <utils/Log.h>
21 #include <utils/Trace.h>
22 
23 #include <aidl/android/hardware/graphics/common/Cta861_3.h>
24 #include <aidl/android/hardware/graphics/common/Smpte2086.h>
25 #include <android-base/no_destructor.h>
26 #include <android-base/properties.h>
27 #include <android/hardware/cas/native/1.0/types.h>
28 #include <android/hardware/drm/1.0/types.h>
29 #include <android/hardware/graphics/common/1.2/types.h>
30 #include <android/hardware/graphics/mapper/4.0/IMapper.h>
31 #include <gralloctypes/Gralloc4.h>
32 #include <hidlmemory/FrameworkUtils.h>
33 #include <media/hardware/HardwareAPI.h>
34 #include <media/stagefright/CodecBase.h>
35 #include <media/stagefright/MediaCodecConstants.h>
36 #include <media/stagefright/foundation/ABuffer.h>
37 #include <media/stagefright/foundation/AMessage.h>
38 #include <media/stagefright/foundation/AUtils.h>
39 #include <mediadrm/ICrypto.h>
40 #include <nativebase/nativebase.h>
41 #include <ui/Fence.h>
42 
43 #include <C2AllocatorGralloc.h>
44 #include <C2BlockInternal.h>
45 #include <C2Debug.h>
46 
47 #include "Codec2Buffer.h"
48 
49 namespace android {
50 
51 // Codec2Buffer
52 
canCopyLinear(const std::shared_ptr<C2Buffer> & buffer) const53 bool Codec2Buffer::canCopyLinear(const std::shared_ptr<C2Buffer> &buffer) const {
54     if (const_cast<Codec2Buffer *>(this)->base() == nullptr) {
55         return false;
56     }
57     if (!buffer) {
58         // Nothing to copy, so we can copy by doing nothing.
59         return true;
60     }
61     if (buffer->data().type() != C2BufferData::LINEAR) {
62         return false;
63     }
64     if (buffer->data().linearBlocks().size() == 0u) {
65         // Nothing to copy, so we can copy by doing nothing.
66         return true;
67     } else if (buffer->data().linearBlocks().size() > 1u) {
68         // We don't know how to copy more than one blocks.
69         return false;
70     }
71     if (buffer->data().linearBlocks()[0].size() > capacity()) {
72         // It won't fit.
73         return false;
74     }
75     return true;
76 }
77 
copyLinear(const std::shared_ptr<C2Buffer> & buffer)78 bool Codec2Buffer::copyLinear(const std::shared_ptr<C2Buffer> &buffer) {
79     // We assume that all canCopyLinear() checks passed.
80     if (!buffer || buffer->data().linearBlocks().size() == 0u
81             || buffer->data().linearBlocks()[0].size() == 0u) {
82         setRange(0, 0);
83         return true;
84     }
85     C2ReadView view = buffer->data().linearBlocks()[0].map().get();
86     if (view.error() != C2_OK) {
87         ALOGD("Error while mapping: %d", view.error());
88         return false;
89     }
90     if (view.capacity() > capacity()) {
91         ALOGD("C2ConstLinearBlock lied --- it actually doesn't fit: view(%u) > this(%zu)",
92                 view.capacity(), capacity());
93         return false;
94     }
95     memcpy(base(), view.data(), view.capacity());
96     setRange(0, view.capacity());
97     return true;
98 }
99 
setImageData(const sp<ABuffer> & imageData)100 void Codec2Buffer::setImageData(const sp<ABuffer> &imageData) {
101     mImageData = imageData;
102 }
103 
104 // LocalLinearBuffer
105 
canCopy(const std::shared_ptr<C2Buffer> & buffer) const106 bool LocalLinearBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
107     return canCopyLinear(buffer);
108 }
109 
copy(const std::shared_ptr<C2Buffer> & buffer)110 bool LocalLinearBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
111     return copyLinear(buffer);
112 }
113 
114 // DummyContainerBuffer
115 
116 static uint8_t sDummyByte[1] = { 0 };
117 
DummyContainerBuffer(const sp<AMessage> & format,const std::shared_ptr<C2Buffer> & buffer)118 DummyContainerBuffer::DummyContainerBuffer(
119         const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer)
120     : Codec2Buffer(format, new ABuffer(sDummyByte, 1)),
121       mBufferRef(buffer) {
122     setRange(0, buffer ? 1 : 0);
123 }
124 
asC2Buffer()125 std::shared_ptr<C2Buffer> DummyContainerBuffer::asC2Buffer() {
126     return mBufferRef;
127 }
128 
clearC2BufferRefs()129 void DummyContainerBuffer::clearC2BufferRefs() {
130     mBufferRef.reset();
131 }
132 
canCopy(const std::shared_ptr<C2Buffer> &) const133 bool DummyContainerBuffer::canCopy(const std::shared_ptr<C2Buffer> &) const {
134     return !mBufferRef;
135 }
136 
copy(const std::shared_ptr<C2Buffer> & buffer)137 bool DummyContainerBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
138     mBufferRef = buffer;
139     setRange(0, mBufferRef ? 1 : 0);
140     return true;
141 }
142 
143 // LinearBlockBuffer
144 
145 // static
Allocate(const sp<AMessage> & format,const std::shared_ptr<C2LinearBlock> & block)146 sp<LinearBlockBuffer> LinearBlockBuffer::Allocate(
147         const sp<AMessage> &format, const std::shared_ptr<C2LinearBlock> &block) {
148     C2WriteView writeView(block->map().get());
149     if (writeView.error() != C2_OK) {
150         return nullptr;
151     }
152     return new LinearBlockBuffer(format, std::move(writeView), block);
153 }
154 
asC2Buffer()155 std::shared_ptr<C2Buffer> LinearBlockBuffer::asC2Buffer() {
156     return C2Buffer::CreateLinearBuffer(mBlock->share(offset(), size(), C2Fence()));
157 }
158 
canCopy(const std::shared_ptr<C2Buffer> & buffer) const159 bool LinearBlockBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
160     return canCopyLinear(buffer);
161 }
162 
copy(const std::shared_ptr<C2Buffer> & buffer)163 bool LinearBlockBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
164     return copyLinear(buffer);
165 }
166 
LinearBlockBuffer(const sp<AMessage> & format,C2WriteView && writeView,const std::shared_ptr<C2LinearBlock> & block)167 LinearBlockBuffer::LinearBlockBuffer(
168         const sp<AMessage> &format,
169         C2WriteView&& writeView,
170         const std::shared_ptr<C2LinearBlock> &block)
171     : Codec2Buffer(format, new ABuffer(writeView.data(), writeView.size())),
172       mWriteView(writeView),
173       mBlock(block) {
174 }
175 
176 // ConstLinearBlockBuffer
177 
178 // static
Allocate(const sp<AMessage> & format,const std::shared_ptr<C2Buffer> & buffer)179 sp<ConstLinearBlockBuffer> ConstLinearBlockBuffer::Allocate(
180         const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer) {
181     if (!buffer
182             || buffer->data().type() != C2BufferData::LINEAR
183             || buffer->data().linearBlocks().size() != 1u) {
184         return nullptr;
185     }
186     C2ReadView readView(buffer->data().linearBlocks()[0].map().get());
187     if (readView.error() != C2_OK) {
188         return nullptr;
189     }
190     return new ConstLinearBlockBuffer(format, std::move(readView), buffer);
191 }
192 
ConstLinearBlockBuffer(const sp<AMessage> & format,C2ReadView && readView,const std::shared_ptr<C2Buffer> & buffer)193 ConstLinearBlockBuffer::ConstLinearBlockBuffer(
194         const sp<AMessage> &format,
195         C2ReadView&& readView,
196         const std::shared_ptr<C2Buffer> &buffer)
197     : Codec2Buffer(format, new ABuffer(
198             // NOTE: ABuffer only takes non-const pointer but this data is
199             //       supposed to be read-only.
200             const_cast<uint8_t *>(readView.data()), readView.capacity())),
201       mReadView(readView),
202       mBufferRef(buffer) {
203 }
204 
asC2Buffer()205 std::shared_ptr<C2Buffer> ConstLinearBlockBuffer::asC2Buffer() {
206     return mBufferRef;
207 }
208 
clearC2BufferRefs()209 void ConstLinearBlockBuffer::clearC2BufferRefs() {
210     mBufferRef.reset();
211 }
212 
213 // GraphicView2MediaImageConverter
214 
215 namespace {
216 
217 class GraphicView2MediaImageConverter {
218 public:
219     /**
220      * Creates a C2GraphicView <=> MediaImage converter
221      *
222      * \param view C2GraphicView object
223      * \param format buffer format
224      * \param copy whether the converter is used for copy or not
225      */
GraphicView2MediaImageConverter(const C2GraphicView & view,const sp<AMessage> & format,bool copy)226     GraphicView2MediaImageConverter(
227             const C2GraphicView &view, const sp<AMessage> &format, bool copy)
228         : mInitCheck(NO_INIT),
229           mView(view),
230           mWidth(view.width()),
231           mHeight(view.height()),
232           mAllocatedDepth(0),
233           mBackBufferSize(0),
234           mMediaImage(new ABuffer(sizeof(MediaImage2))) {
235         ATRACE_CALL();
236         if (!format->findInt32(KEY_COLOR_FORMAT, &mClientColorFormat)) {
237             mClientColorFormat = COLOR_FormatYUV420Flexible;
238         }
239         if (!format->findInt32("android._color-format", &mComponentColorFormat)) {
240             mComponentColorFormat = COLOR_FormatYUV420Flexible;
241         }
242         if (view.error() != C2_OK) {
243             ALOGD("Converter: view.error() = %d", view.error());
244             mInitCheck = BAD_VALUE;
245             return;
246         }
247         MediaImage2 *mediaImage = (MediaImage2 *)mMediaImage->base();
248         const C2PlanarLayout &layout = view.layout();
249         if (layout.numPlanes == 0) {
250             ALOGD("Converter: 0 planes");
251             mInitCheck = BAD_VALUE;
252             return;
253         }
254         memset(mediaImage, 0, sizeof(*mediaImage));
255         mAllocatedDepth = layout.planes[0].allocatedDepth;
256         uint32_t bitDepth = layout.planes[0].bitDepth;
257 
258         // align width and height to support subsampling cleanly
259         uint32_t stride = align(view.crop().width, 2) * divUp(layout.planes[0].allocatedDepth, 8u);
260         uint32_t vStride = align(view.crop().height, 2);
261 
262         bool tryWrapping = !copy;
263 
264         switch (layout.type) {
265             case C2PlanarLayout::TYPE_YUV: {
266                 mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
267                 if (layout.numPlanes != 3) {
268                     ALOGD("Converter: %d planes for YUV layout", layout.numPlanes);
269                     mInitCheck = BAD_VALUE;
270                     return;
271                 }
272                 std::optional<int> clientBitDepth = {};
273                 switch (mClientColorFormat) {
274                     case COLOR_FormatYUVP010:
275                         clientBitDepth = 10;
276                         break;
277                     case COLOR_FormatYUV411PackedPlanar:
278                     case COLOR_FormatYUV411Planar:
279                     case COLOR_FormatYUV420Flexible:
280                     case COLOR_FormatYUV420PackedPlanar:
281                     case COLOR_FormatYUV420PackedSemiPlanar:
282                     case COLOR_FormatYUV420Planar:
283                     case COLOR_FormatYUV420SemiPlanar:
284                     case COLOR_FormatYUV422Flexible:
285                     case COLOR_FormatYUV422PackedPlanar:
286                     case COLOR_FormatYUV422PackedSemiPlanar:
287                     case COLOR_FormatYUV422Planar:
288                     case COLOR_FormatYUV422SemiPlanar:
289                     case COLOR_FormatYUV444Flexible:
290                     case COLOR_FormatYUV444Interleaved:
291                         clientBitDepth = 8;
292                         break;
293                     default:
294                         // no-op; used with optional
295                         break;
296 
297                 }
298                 // conversion fails if client bit-depth and the component bit-depth differs
299                 if ((clientBitDepth) && (bitDepth != clientBitDepth.value())) {
300                     ALOGD("Bit depth of client: %d and component: %d differs",
301                         *clientBitDepth, bitDepth);
302                     mInitCheck = BAD_VALUE;
303                     return;
304                 }
305                 C2PlaneInfo yPlane = layout.planes[C2PlanarLayout::PLANE_Y];
306                 C2PlaneInfo uPlane = layout.planes[C2PlanarLayout::PLANE_U];
307                 C2PlaneInfo vPlane = layout.planes[C2PlanarLayout::PLANE_V];
308                 if (yPlane.channel != C2PlaneInfo::CHANNEL_Y
309                         || uPlane.channel != C2PlaneInfo::CHANNEL_CB
310                         || vPlane.channel != C2PlaneInfo::CHANNEL_CR) {
311                     ALOGD("Converter: not YUV layout");
312                     mInitCheck = BAD_VALUE;
313                     return;
314                 }
315                 bool yuv420888 = yPlane.rowSampling == 1 && yPlane.colSampling == 1
316                         && uPlane.rowSampling == 2 && uPlane.colSampling == 2
317                         && vPlane.rowSampling == 2 && vPlane.colSampling == 2;
318                 if (yuv420888) {
319                     for (uint32_t i = 0; i < 3; ++i) {
320                         const C2PlaneInfo &plane = layout.planes[i];
321                         if (plane.allocatedDepth != 8 || plane.bitDepth != 8) {
322                             yuv420888 = false;
323                             break;
324                         }
325                     }
326                     yuv420888 = yuv420888 && yPlane.colInc == 1 && uPlane.rowInc == vPlane.rowInc;
327                 }
328                 int32_t copyFormat = mClientColorFormat;
329                 if (yuv420888 && mClientColorFormat == COLOR_FormatYUV420Flexible) {
330                     if (uPlane.colInc == 2 && vPlane.colInc == 2
331                             && yPlane.rowInc == uPlane.rowInc) {
332                         copyFormat = COLOR_FormatYUV420PackedSemiPlanar;
333                     } else if (uPlane.colInc == 1 && vPlane.colInc == 1
334                             && yPlane.rowInc == uPlane.rowInc * 2) {
335                         copyFormat = COLOR_FormatYUV420PackedPlanar;
336                     }
337                 }
338                 ALOGV("client_fmt=0x%x y:{colInc=%d rowInc=%d} u:{colInc=%d rowInc=%d} "
339                         "v:{colInc=%d rowInc=%d}",
340                         mClientColorFormat,
341                         yPlane.colInc, yPlane.rowInc,
342                         uPlane.colInc, uPlane.rowInc,
343                         vPlane.colInc, vPlane.rowInc);
344                 switch (copyFormat) {
345                     case COLOR_FormatYUV420Flexible:
346                     case COLOR_FormatYUV420Planar:
347                     case COLOR_FormatYUV420PackedPlanar:
348                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
349                         mediaImage->mPlane[mediaImage->Y].mColInc = 1;
350                         mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
351                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
352                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
353 
354                         mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
355                         mediaImage->mPlane[mediaImage->U].mColInc = 1;
356                         mediaImage->mPlane[mediaImage->U].mRowInc = stride / 2;
357                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
358                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
359 
360                         mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride * 5 / 4;
361                         mediaImage->mPlane[mediaImage->V].mColInc = 1;
362                         mediaImage->mPlane[mediaImage->V].mRowInc = stride / 2;
363                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
364                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
365 
366                         if (tryWrapping && mClientColorFormat != COLOR_FormatYUV420Flexible) {
367                             tryWrapping = yuv420888 && uPlane.colInc == 1 && vPlane.colInc == 1
368                                     && yPlane.rowInc == uPlane.rowInc * 2
369                                     && view.data()[0] < view.data()[1]
370                                     && view.data()[1] < view.data()[2];
371                         }
372                         break;
373 
374                     case COLOR_FormatYUV420SemiPlanar:
375                     case COLOR_FormatYUV420PackedSemiPlanar:
376                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
377                         mediaImage->mPlane[mediaImage->Y].mColInc = 1;
378                         mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
379                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
380                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
381 
382                         mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
383                         mediaImage->mPlane[mediaImage->U].mColInc = 2;
384                         mediaImage->mPlane[mediaImage->U].mRowInc = stride;
385                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
386                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
387 
388                         mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 1;
389                         mediaImage->mPlane[mediaImage->V].mColInc = 2;
390                         mediaImage->mPlane[mediaImage->V].mRowInc = stride;
391                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
392                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
393 
394                         if (tryWrapping && mClientColorFormat != COLOR_FormatYUV420Flexible) {
395                             tryWrapping = yuv420888 && uPlane.colInc == 2 && vPlane.colInc == 2
396                                     && yPlane.rowInc == uPlane.rowInc
397                                     && view.data()[0] < view.data()[1]
398                                     && view.data()[1] < view.data()[2];
399                         }
400                         break;
401 
402                     case COLOR_FormatYUVP010:
403                         // stride is in bytes
404                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
405                         mediaImage->mPlane[mediaImage->Y].mColInc = 2;
406                         mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
407                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
408                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
409 
410                         mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
411                         mediaImage->mPlane[mediaImage->U].mColInc = 4;
412                         mediaImage->mPlane[mediaImage->U].mRowInc = stride;
413                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
414                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
415 
416                         mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 2;
417                         mediaImage->mPlane[mediaImage->V].mColInc = 4;
418                         mediaImage->mPlane[mediaImage->V].mRowInc = stride;
419                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
420                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
421                         if (tryWrapping) {
422                             tryWrapping = yPlane.allocatedDepth == 16
423                                     && uPlane.allocatedDepth == 16
424                                     && vPlane.allocatedDepth == 16
425                                     && yPlane.bitDepth == 10
426                                     && uPlane.bitDepth == 10
427                                     && vPlane.bitDepth == 10
428                                     && yPlane.rightShift == 6
429                                     && uPlane.rightShift == 6
430                                     && vPlane.rightShift == 6
431                                     && yPlane.rowSampling == 1 && yPlane.colSampling == 1
432                                     && uPlane.rowSampling == 2 && uPlane.colSampling == 2
433                                     && vPlane.rowSampling == 2 && vPlane.colSampling == 2
434                                     && yPlane.colInc == 2
435                                     && uPlane.colInc == 4
436                                     && vPlane.colInc == 4
437                                     && yPlane.rowInc == uPlane.rowInc
438                                     && yPlane.rowInc == vPlane.rowInc;
439                         }
440                         break;
441 
442                     default: {
443                         // default to fully planar format --- this will be overridden if wrapping
444                         // TODO: keep interleaved format
445                         int32_t colInc = divUp(mAllocatedDepth, 8u);
446                         int32_t rowInc = stride * colInc / yPlane.colSampling;
447                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
448                         mediaImage->mPlane[mediaImage->Y].mColInc = colInc;
449                         mediaImage->mPlane[mediaImage->Y].mRowInc = rowInc;
450                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = yPlane.colSampling;
451                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = yPlane.rowSampling;
452                         int32_t offset = rowInc * vStride / yPlane.rowSampling;
453 
454                         rowInc = stride * colInc / uPlane.colSampling;
455                         mediaImage->mPlane[mediaImage->U].mOffset = offset;
456                         mediaImage->mPlane[mediaImage->U].mColInc = colInc;
457                         mediaImage->mPlane[mediaImage->U].mRowInc = rowInc;
458                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = uPlane.colSampling;
459                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = uPlane.rowSampling;
460                         offset += rowInc * vStride / uPlane.rowSampling;
461 
462                         rowInc = stride * colInc / vPlane.colSampling;
463                         mediaImage->mPlane[mediaImage->V].mOffset = offset;
464                         mediaImage->mPlane[mediaImage->V].mColInc = colInc;
465                         mediaImage->mPlane[mediaImage->V].mRowInc = rowInc;
466                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = vPlane.colSampling;
467                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = vPlane.rowSampling;
468                         break;
469                     }
470                 }
471                 break;
472             }
473 
474             case C2PlanarLayout::TYPE_YUVA:
475                 ALOGD("Converter: unrecognized color format "
476                         "(client %d component %d) for YUVA layout",
477                         mClientColorFormat, mComponentColorFormat);
478                 mInitCheck = NO_INIT;
479                 return;
480             case C2PlanarLayout::TYPE_RGB:
481                 ALOGD("Converter: unrecognized color format "
482                         "(client %d component %d) for RGB layout",
483                         mClientColorFormat, mComponentColorFormat);
484                 mInitCheck = NO_INIT;
485                 // TODO: support MediaImage layout
486                 return;
487             case C2PlanarLayout::TYPE_RGBA:
488                 ALOGD("Converter: unrecognized color format "
489                         "(client %d component %d) for RGBA layout",
490                         mClientColorFormat, mComponentColorFormat);
491                 mInitCheck = NO_INIT;
492                 // TODO: support MediaImage layout
493                 return;
494             default:
495                 mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
496                 if (layout.numPlanes == 1) {
497                     const C2PlaneInfo &plane = layout.planes[0];
498                     if (plane.colInc < 0 || plane.rowInc < 0) {
499                         // Copy-only if we have negative colInc/rowInc
500                         tryWrapping = false;
501                     }
502                     mediaImage->mPlane[0].mOffset = 0;
503                     mediaImage->mPlane[0].mColInc = std::abs(plane.colInc);
504                     mediaImage->mPlane[0].mRowInc = std::abs(plane.rowInc);
505                     mediaImage->mPlane[0].mHorizSubsampling = plane.colSampling;
506                     mediaImage->mPlane[0].mVertSubsampling = plane.rowSampling;
507                 } else {
508                     ALOGD("Converter: unrecognized layout: color format (client %d component %d)",
509                             mClientColorFormat, mComponentColorFormat);
510                     mInitCheck = NO_INIT;
511                     return;
512                 }
513                 break;
514         }
515         if (tryWrapping) {
516             // try to map directly. check if the planes are near one another
517             const uint8_t *minPtr = mView.data()[0];
518             const uint8_t *maxPtr = mView.data()[0];
519             int32_t planeSize = 0;
520             for (uint32_t i = 0; i < layout.numPlanes; ++i) {
521                 const C2PlaneInfo &plane = layout.planes[i];
522                 int64_t planeStride = std::abs(plane.rowInc / plane.colInc);
523                 ssize_t minOffset = plane.minOffset(
524                         mWidth / plane.colSampling, mHeight / plane.rowSampling);
525                 ssize_t maxOffset = plane.maxOffset(
526                         mWidth / plane.colSampling, mHeight / plane.rowSampling);
527                 if (minPtr > mView.data()[i] + minOffset) {
528                     minPtr = mView.data()[i] + minOffset;
529                 }
530                 if (maxPtr < mView.data()[i] + maxOffset) {
531                     maxPtr = mView.data()[i] + maxOffset;
532                 }
533                 planeSize += planeStride * divUp(mAllocatedDepth, 8u)
534                         * align(mHeight, 64) / plane.rowSampling;
535             }
536 
537             if (minPtr == mView.data()[0] && (maxPtr - minPtr) <= planeSize) {
538                 // FIXME: this is risky as reading/writing data out of bound results
539                 //        in an undefined behavior, but gralloc does assume a
540                 //        contiguous mapping
541                 for (uint32_t i = 0; i < layout.numPlanes; ++i) {
542                     const C2PlaneInfo &plane = layout.planes[i];
543                     mediaImage->mPlane[i].mOffset = mView.data()[i] - minPtr;
544                     mediaImage->mPlane[i].mColInc = plane.colInc;
545                     mediaImage->mPlane[i].mRowInc = plane.rowInc;
546                     mediaImage->mPlane[i].mHorizSubsampling = plane.colSampling;
547                     mediaImage->mPlane[i].mVertSubsampling = plane.rowSampling;
548                 }
549                 mWrapped = new ABuffer(const_cast<uint8_t *>(minPtr), maxPtr - minPtr);
550                 ALOGV("Converter: wrapped (capacity=%zu)", mWrapped->capacity());
551             }
552         }
553         mediaImage->mNumPlanes = layout.numPlanes;
554         mediaImage->mWidth = view.crop().width;
555         mediaImage->mHeight = view.crop().height;
556         mediaImage->mBitDepth = bitDepth;
557         mediaImage->mBitDepthAllocated = mAllocatedDepth;
558 
559         uint32_t bufferSize = 0;
560         for (uint32_t i = 0; i < layout.numPlanes; ++i) {
561             const C2PlaneInfo &plane = layout.planes[i];
562             if (plane.allocatedDepth < plane.bitDepth
563                     || plane.rightShift != plane.allocatedDepth - plane.bitDepth) {
564                 ALOGD("rightShift value of %u unsupported", plane.rightShift);
565                 mInitCheck = BAD_VALUE;
566                 return;
567             }
568             if (plane.allocatedDepth > 8 && plane.endianness != C2PlaneInfo::NATIVE) {
569                 ALOGD("endianness value of %u unsupported", plane.endianness);
570                 mInitCheck = BAD_VALUE;
571                 return;
572             }
573             if (plane.allocatedDepth != mAllocatedDepth || plane.bitDepth != bitDepth) {
574                 ALOGD("different allocatedDepth/bitDepth per plane unsupported");
575                 mInitCheck = BAD_VALUE;
576                 return;
577             }
578             // stride is in bytes
579             bufferSize += stride * vStride / plane.rowSampling / plane.colSampling;
580         }
581 
582         mBackBufferSize = bufferSize;
583         mInitCheck = OK;
584     }
585 
initCheck() const586     status_t initCheck() const { return mInitCheck; }
587 
backBufferSize() const588     uint32_t backBufferSize() const { return mBackBufferSize; }
589 
590     /**
591      * Wrap C2GraphicView using a MediaImage2. Note that if not wrapped, the content is not mapped
592      * in this function --- the caller should use CopyGraphicView2MediaImage() function to copy the
593      * data into a backing buffer explicitly.
594      *
595      * \return media buffer. This is null if wrapping failed.
596      */
wrap() const597     sp<ABuffer> wrap() const {
598         if (mBackBuffer == nullptr) {
599             return mWrapped;
600         }
601         return nullptr;
602     }
603 
setBackBuffer(const sp<ABuffer> & backBuffer)604     bool setBackBuffer(const sp<ABuffer> &backBuffer) {
605         if (backBuffer == nullptr) {
606             return false;
607         }
608         if (backBuffer->capacity() < mBackBufferSize) {
609             return false;
610         }
611         backBuffer->setRange(0, mBackBufferSize);
612         mBackBuffer = backBuffer;
613         return true;
614     }
615 
616     /**
617      * Copy C2GraphicView to MediaImage2.
618      */
copyToMediaImage()619     status_t copyToMediaImage() {
620         ATRACE_CALL();
621         if (mInitCheck != OK) {
622             return mInitCheck;
623         }
624         return ImageCopy(mBackBuffer->base(), getMediaImage(), mView);
625     }
626 
imageData() const627     const sp<ABuffer> &imageData() const { return mMediaImage; }
628 
629 private:
630     status_t mInitCheck;
631 
632     const C2GraphicView mView;
633     uint32_t mWidth;
634     uint32_t mHeight;
635     int32_t mClientColorFormat;  ///< SDK color format for MediaImage
636     int32_t mComponentColorFormat;  ///< SDK color format from component
637     sp<ABuffer> mWrapped;  ///< wrapped buffer (if we can map C2Buffer to an ABuffer)
638     uint32_t mAllocatedDepth;
639     uint32_t mBackBufferSize;
640     sp<ABuffer> mMediaImage;
641     std::function<sp<ABuffer>(size_t)> mAlloc;
642 
643     sp<ABuffer> mBackBuffer;    ///< backing buffer if we have to copy C2Buffer <=> ABuffer
644 
getMediaImage()645     MediaImage2 *getMediaImage() {
646         return (MediaImage2 *)mMediaImage->base();
647     }
648 };
649 
650 }  // namespace
651 
652 // GraphicBlockBuffer
653 
654 // static
Allocate(const sp<AMessage> & format,const std::shared_ptr<C2GraphicBlock> & block,std::function<sp<ABuffer> (size_t)> alloc)655 sp<GraphicBlockBuffer> GraphicBlockBuffer::Allocate(
656         const sp<AMessage> &format,
657         const std::shared_ptr<C2GraphicBlock> &block,
658         std::function<sp<ABuffer>(size_t)> alloc) {
659     ATRACE_BEGIN("GraphicBlockBuffer::Allocate block->map()");
660     C2GraphicView view(block->map().get());
661     ATRACE_END();
662     if (view.error() != C2_OK) {
663         ALOGD("C2GraphicBlock::map failed: %d", view.error());
664         return nullptr;
665     }
666 
667     GraphicView2MediaImageConverter converter(view, format, false /* copy */);
668     if (converter.initCheck() != OK) {
669         ALOGD("Converter init failed: %d", converter.initCheck());
670         return nullptr;
671     }
672     bool wrapped = true;
673     sp<ABuffer> buffer = converter.wrap();
674     if (buffer == nullptr) {
675         buffer = alloc(converter.backBufferSize());
676         if (!converter.setBackBuffer(buffer)) {
677             ALOGD("Converter failed to set back buffer");
678             return nullptr;
679         }
680         wrapped = false;
681     }
682     return new GraphicBlockBuffer(
683             format,
684             buffer,
685             std::move(view),
686             block,
687             converter.imageData(),
688             wrapped);
689 }
690 
GraphicBlockBuffer(const sp<AMessage> & format,const sp<ABuffer> & buffer,C2GraphicView && view,const std::shared_ptr<C2GraphicBlock> & block,const sp<ABuffer> & imageData,bool wrapped)691 GraphicBlockBuffer::GraphicBlockBuffer(
692         const sp<AMessage> &format,
693         const sp<ABuffer> &buffer,
694         C2GraphicView &&view,
695         const std::shared_ptr<C2GraphicBlock> &block,
696         const sp<ABuffer> &imageData,
697         bool wrapped)
698     : Codec2Buffer(format, buffer),
699       mView(view),
700       mBlock(block),
701       mWrapped(wrapped) {
702     setImageData(imageData);
703 }
704 
asC2Buffer()705 std::shared_ptr<C2Buffer> GraphicBlockBuffer::asC2Buffer() {
706     ATRACE_CALL();
707     uint32_t width = mView.width();
708     uint32_t height = mView.height();
709     if (!mWrapped) {
710         (void)ImageCopy(mView, base(), imageData());
711     }
712     return C2Buffer::CreateGraphicBuffer(
713             mBlock->share(C2Rect(width, height), C2Fence()));
714 }
715 
716 // GraphicMetadataBuffer
GraphicMetadataBuffer(const sp<AMessage> & format,const std::shared_ptr<C2Allocator> & alloc)717 GraphicMetadataBuffer::GraphicMetadataBuffer(
718         const sp<AMessage> &format,
719         const std::shared_ptr<C2Allocator> &alloc)
720     : Codec2Buffer(format, new ABuffer(sizeof(VideoNativeMetadata))),
721       mAlloc(alloc) {
722     ((VideoNativeMetadata *)base())->pBuffer = nullptr;
723 }
724 
asC2Buffer()725 std::shared_ptr<C2Buffer> GraphicMetadataBuffer::asC2Buffer() {
726 #ifdef __LP64__
727     static std::once_flag s_checkOnce;
728     static bool s_is64bitOk {true};
729     std::call_once(s_checkOnce, [&](){
730         const std::string abi32list =
731         ::android::base::GetProperty("ro.product.cpu.abilist32", "");
732         if (!abi32list.empty()) {
733             int32_t inputSurfaceSetting =
734             ::android::base::GetIntProperty("debug.stagefright.c2inputsurface", int32_t(0));
735             s_is64bitOk = inputSurfaceSetting != 0;
736         }
737     });
738 
739     if (!s_is64bitOk) {
740         ALOGE("GraphicMetadataBuffer does not work in 32+64 system if compiled as 64-bit object"\
741               "when debug.stagefright.c2inputsurface is set to 0");
742         return nullptr;
743     }
744 #endif
745 
746     VideoNativeMetadata *meta = (VideoNativeMetadata *)base();
747     ANativeWindowBuffer *buffer = (ANativeWindowBuffer *)meta->pBuffer;
748     if (buffer == nullptr) {
749         ALOGD("VideoNativeMetadata contains null buffer");
750         return nullptr;
751     }
752 
753     ALOGV("VideoNativeMetadata: %dx%d", buffer->width, buffer->height);
754     C2Handle *handle = WrapNativeCodec2GrallocHandle(
755             buffer->handle,
756             buffer->width,
757             buffer->height,
758             buffer->format,
759             buffer->usage,
760             buffer->stride);
761     std::shared_ptr<C2GraphicAllocation> alloc;
762     c2_status_t err = mAlloc->priorGraphicAllocation(handle, &alloc);
763     if (err != C2_OK) {
764         ALOGD("Failed to wrap VideoNativeMetadata into C2GraphicAllocation");
765         native_handle_close(handle);
766         native_handle_delete(handle);
767         return nullptr;
768     }
769     std::shared_ptr<C2GraphicBlock> block = _C2BlockFactory::CreateGraphicBlock(alloc);
770 
771     meta->pBuffer = 0;
772     // TODO: wrap this in C2Fence so that the component can wait when it
773     //       actually starts processing.
774     if (meta->nFenceFd >= 0) {
775         sp<Fence> fence(new Fence(meta->nFenceFd));
776         fence->waitForever(LOG_TAG);
777     }
778     return C2Buffer::CreateGraphicBuffer(
779             block->share(C2Rect(buffer->width, buffer->height), C2Fence()));
780 }
781 
782 // ConstGraphicBlockBuffer
783 
784 // static
Allocate(const sp<AMessage> & format,const std::shared_ptr<C2Buffer> & buffer,std::function<sp<ABuffer> (size_t)> alloc)785 sp<ConstGraphicBlockBuffer> ConstGraphicBlockBuffer::Allocate(
786         const sp<AMessage> &format,
787         const std::shared_ptr<C2Buffer> &buffer,
788         std::function<sp<ABuffer>(size_t)> alloc) {
789     if (!buffer
790             || buffer->data().type() != C2BufferData::GRAPHIC
791             || buffer->data().graphicBlocks().size() != 1u) {
792         ALOGD("C2Buffer precond fail");
793         return nullptr;
794     }
795     ATRACE_BEGIN("ConstGraphicBlockBuffer::Allocate block->map()");
796     std::unique_ptr<const C2GraphicView> view(std::make_unique<const C2GraphicView>(
797             buffer->data().graphicBlocks()[0].map().get()));
798     ATRACE_END();
799     std::unique_ptr<const C2GraphicView> holder;
800 
801     GraphicView2MediaImageConverter converter(*view, format, false /* copy */);
802     if (converter.initCheck() != OK) {
803         ALOGD("Converter init failed: %d", converter.initCheck());
804         return nullptr;
805     }
806     bool wrapped = true;
807     sp<ABuffer> aBuffer = converter.wrap();
808     if (aBuffer == nullptr) {
809         aBuffer = alloc(converter.backBufferSize());
810         if (!converter.setBackBuffer(aBuffer)) {
811             ALOGD("Converter failed to set back buffer");
812             return nullptr;
813         }
814         wrapped = false;
815         converter.copyToMediaImage();
816         // We don't need the view.
817         holder = std::move(view);
818     }
819     return new ConstGraphicBlockBuffer(
820             format,
821             aBuffer,
822             std::move(view),
823             buffer,
824             converter.imageData(),
825             wrapped);
826 }
827 
828 // static
AllocateEmpty(const sp<AMessage> & format,std::function<sp<ABuffer> (size_t)> alloc)829 sp<ConstGraphicBlockBuffer> ConstGraphicBlockBuffer::AllocateEmpty(
830         const sp<AMessage> &format,
831         std::function<sp<ABuffer>(size_t)> alloc) {
832     int32_t width, height;
833     if (!format->findInt32("width", &width)
834             || !format->findInt32("height", &height)) {
835         ALOGD("format had no width / height");
836         return nullptr;
837     }
838     int32_t colorFormat = COLOR_FormatYUV420Flexible;
839     int32_t bpp = 12;  // 8(Y) + 2(U) + 2(V)
840     if (format->findInt32(KEY_COLOR_FORMAT, &colorFormat)) {
841         if (colorFormat == COLOR_FormatYUVP010) {
842             bpp = 24;  // 16(Y) + 4(U) + 4(V)
843         }
844     }
845     sp<ABuffer> aBuffer(alloc(align(width, 16) * align(height, 16) * bpp / 8));
846     if (aBuffer == nullptr) {
847         ALOGD("%s: failed to allocate buffer", __func__);
848         return nullptr;
849     }
850     return new ConstGraphicBlockBuffer(
851             format,
852             aBuffer,
853             nullptr,
854             nullptr,
855             nullptr,
856             false);
857 }
858 
ConstGraphicBlockBuffer(const sp<AMessage> & format,const sp<ABuffer> & aBuffer,std::unique_ptr<const C2GraphicView> && view,const std::shared_ptr<C2Buffer> & buffer,const sp<ABuffer> & imageData,bool wrapped)859 ConstGraphicBlockBuffer::ConstGraphicBlockBuffer(
860         const sp<AMessage> &format,
861         const sp<ABuffer> &aBuffer,
862         std::unique_ptr<const C2GraphicView> &&view,
863         const std::shared_ptr<C2Buffer> &buffer,
864         const sp<ABuffer> &imageData,
865         bool wrapped)
866     : Codec2Buffer(format, aBuffer),
867       mView(std::move(view)),
868       mBufferRef(buffer),
869       mWrapped(wrapped) {
870     setImageData(imageData);
871 }
872 
asC2Buffer()873 std::shared_ptr<C2Buffer> ConstGraphicBlockBuffer::asC2Buffer() {
874     return mBufferRef;
875 }
876 
clearC2BufferRefs()877 void ConstGraphicBlockBuffer::clearC2BufferRefs() {
878     mView.reset();
879     mBufferRef.reset();
880 }
881 
canCopy(const std::shared_ptr<C2Buffer> & buffer) const882 bool ConstGraphicBlockBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
883     if (mWrapped || mBufferRef) {
884         ALOGD("ConstGraphicBlockBuffer::canCopy: %swrapped ; buffer ref %s",
885                 mWrapped ? "" : "not ", mBufferRef ? "exists" : "doesn't exist");
886         return false;
887     }
888     if (!buffer) {
889         // Nothing to copy, so we can copy by doing nothing.
890         return true;
891     }
892     if (buffer->data().type() != C2BufferData::GRAPHIC) {
893         ALOGD("ConstGraphicBlockBuffer::canCopy: buffer precondition unsatisfied");
894         return false;
895     }
896     if (buffer->data().graphicBlocks().size() == 0) {
897         return true;
898     } else if (buffer->data().graphicBlocks().size() != 1u) {
899         ALOGD("ConstGraphicBlockBuffer::canCopy: too many blocks");
900         return false;
901     }
902 
903     ATRACE_BEGIN("ConstGraphicBlockBuffer::canCopy block->map()");
904     GraphicView2MediaImageConverter converter(
905             buffer->data().graphicBlocks()[0].map().get(),
906             // FIXME: format() is not const, but we cannot change it, so do a const cast here
907             const_cast<ConstGraphicBlockBuffer *>(this)->format(),
908             true /* copy */);
909     ATRACE_END();
910     if (converter.initCheck() != OK) {
911         ALOGD("ConstGraphicBlockBuffer::canCopy: converter init failed: %d", converter.initCheck());
912         return false;
913     }
914     if (converter.backBufferSize() > capacity()) {
915         ALOGD("ConstGraphicBlockBuffer::canCopy: insufficient capacity: req %u has %zu",
916                 converter.backBufferSize(), capacity());
917         return false;
918     }
919     return true;
920 }
921 
copy(const std::shared_ptr<C2Buffer> & buffer)922 bool ConstGraphicBlockBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
923     if (!buffer || buffer->data().graphicBlocks().size() == 0) {
924         setRange(0, 0);
925         return true;
926     }
927 
928     GraphicView2MediaImageConverter converter(
929             buffer->data().graphicBlocks()[0].map().get(), format(), true /* copy */);
930     if (converter.initCheck() != OK) {
931         ALOGD("ConstGraphicBlockBuffer::copy: converter init failed: %d", converter.initCheck());
932         return false;
933     }
934     sp<ABuffer> aBuffer = new ABuffer(base(), capacity());
935     if (!converter.setBackBuffer(aBuffer)) {
936         ALOGD("ConstGraphicBlockBuffer::copy: set back buffer failed");
937         return false;
938     }
939     setRange(0, aBuffer->size());  // align size info
940     converter.copyToMediaImage();
941     setImageData(converter.imageData());
942     mBufferRef = buffer;
943     return true;
944 }
945 
946 // EncryptedLinearBlockBuffer
947 
EncryptedLinearBlockBuffer(const sp<AMessage> & format,const std::shared_ptr<C2LinearBlock> & block,const sp<IMemory> & memory,int32_t heapSeqNum)948 EncryptedLinearBlockBuffer::EncryptedLinearBlockBuffer(
949         const sp<AMessage> &format,
950         const std::shared_ptr<C2LinearBlock> &block,
951         const sp<IMemory> &memory,
952         int32_t heapSeqNum)
953     // TODO: Using unsecurePointer() has some associated security pitfalls
954     //       (see declaration for details).
955     //       Either document why it is safe in this case or address the
956     //       issue (e.g. by copying).
957     : Codec2Buffer(format, new ABuffer(memory->unsecurePointer(), memory->size())),
958       mBlock(block),
959       mMemory(memory),
960       mHeapSeqNum(heapSeqNum) {
961 }
962 
asC2Buffer()963 std::shared_ptr<C2Buffer> EncryptedLinearBlockBuffer::asC2Buffer() {
964     return C2Buffer::CreateLinearBuffer(mBlock->share(offset(), size(), C2Fence()));
965 }
966 
fillSourceBuffer(hardware::drm::V1_0::SharedBuffer * source)967 void EncryptedLinearBlockBuffer::fillSourceBuffer(
968         hardware::drm::V1_0::SharedBuffer *source) {
969     BufferChannelBase::IMemoryToSharedBuffer(mMemory, mHeapSeqNum, source);
970 }
971 
fillSourceBuffer(hardware::cas::native::V1_0::SharedBuffer * source)972 void EncryptedLinearBlockBuffer::fillSourceBuffer(
973         hardware::cas::native::V1_0::SharedBuffer *source) {
974     ssize_t offset;
975     size_t size;
976 
977     mHidlMemory = hardware::fromHeap(mMemory->getMemory(&offset, &size));
978     source->heapBase = *mHidlMemory;
979     source->offset = offset;
980     source->size = size;
981 }
982 
copyDecryptedContent(const sp<IMemory> & decrypted,size_t length)983 bool EncryptedLinearBlockBuffer::copyDecryptedContent(
984         const sp<IMemory> &decrypted, size_t length) {
985     C2WriteView view = mBlock->map().get();
986     if (view.error() != C2_OK) {
987         return false;
988     }
989     if (view.size() < length) {
990         return false;
991     }
992     memcpy(view.data(), decrypted->unsecurePointer(), length);
993     return true;
994 }
995 
copyDecryptedContentFromMemory(size_t length)996 bool EncryptedLinearBlockBuffer::copyDecryptedContentFromMemory(size_t length) {
997     return copyDecryptedContent(mMemory, length);
998 }
999 
handle() const1000 native_handle_t *EncryptedLinearBlockBuffer::handle() const {
1001     return const_cast<native_handle_t *>(mBlock->handle());
1002 }
1003 
1004 using ::aidl::android::hardware::graphics::common::Cta861_3;
1005 using ::aidl::android::hardware::graphics::common::Dataspace;
1006 using ::aidl::android::hardware::graphics::common::Smpte2086;
1007 
1008 using ::android::gralloc4::MetadataType_Cta861_3;
1009 using ::android::gralloc4::MetadataType_Dataspace;
1010 using ::android::gralloc4::MetadataType_Smpte2086;
1011 using ::android::gralloc4::MetadataType_Smpte2094_40;
1012 
1013 using ::android::hardware::Return;
1014 using ::android::hardware::hidl_vec;
1015 
1016 using Error4 = ::android::hardware::graphics::mapper::V4_0::Error;
1017 using IMapper4 = ::android::hardware::graphics::mapper::V4_0::IMapper;
1018 
1019 namespace {
1020 
GetMapper4()1021 sp<IMapper4> GetMapper4() {
1022     static ::android::base::NoDestructor<sp<IMapper4>> sMapper(IMapper4::getService());
1023     return *sMapper;
1024 }
1025 
1026 class Gralloc4Buffer {
1027 public:
Gralloc4Buffer(const C2Handle * const handle)1028     Gralloc4Buffer(const C2Handle *const handle) : mBuffer(nullptr) {
1029         sp<IMapper4> mapper = GetMapper4();
1030         if (!mapper) {
1031             return;
1032         }
1033         // Unwrap raw buffer handle from the C2Handle
1034         native_handle_t *nh = UnwrapNativeCodec2GrallocHandle(handle);
1035         if (!nh) {
1036             return;
1037         }
1038         // Import the raw handle so IMapper can use the buffer. The imported
1039         // handle must be freed when the client is done with the buffer.
1040         mapper->importBuffer(
1041                 hardware::hidl_handle(nh),
1042                 [&](const Error4 &error, void *buffer) {
1043                     if (error == Error4::NONE) {
1044                         mBuffer = buffer;
1045                     }
1046                 });
1047 
1048         // TRICKY: UnwrapNativeCodec2GrallocHandle creates a new handle but
1049         //         does not clone the fds. Thus we need to delete the handle
1050         //         without closing it.
1051         native_handle_delete(nh);
1052     }
1053 
~Gralloc4Buffer()1054     ~Gralloc4Buffer() {
1055         sp<IMapper4> mapper = GetMapper4();
1056         if (mapper && mBuffer) {
1057             // Free the imported buffer handle. This does not release the
1058             // underlying buffer itself.
1059             mapper->freeBuffer(mBuffer);
1060         }
1061     }
1062 
get() const1063     void *get() const { return mBuffer; }
operator bool() const1064     operator bool() const { return (mBuffer != nullptr); }
1065 private:
1066     void *mBuffer;
1067 };
1068 
1069 }  // namspace
1070 
GetHdrMetadataFromGralloc4Handle(const C2Handle * const handle,std::shared_ptr<C2StreamHdrStaticMetadataInfo::input> * staticInfo,std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> * dynamicInfo)1071 c2_status_t GetHdrMetadataFromGralloc4Handle(
1072         const C2Handle *const handle,
1073         std::shared_ptr<C2StreamHdrStaticMetadataInfo::input> *staticInfo,
1074         std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> *dynamicInfo) {
1075     c2_status_t err = C2_OK;
1076     sp<IMapper4> mapper = GetMapper4();
1077     Gralloc4Buffer buffer(handle);
1078     if (!mapper || !buffer) {
1079         // Gralloc4 not supported; nothing to do
1080         return err;
1081     }
1082     Error4 mapperErr = Error4::NONE;
1083     if (staticInfo) {
1084         ALOGV("Grabbing static HDR info from gralloc4 metadata");
1085         staticInfo->reset(new C2StreamHdrStaticMetadataInfo::input(0u));
1086         memset(&(*staticInfo)->mastering, 0, sizeof((*staticInfo)->mastering));
1087         (*staticInfo)->maxCll = 0;
1088         (*staticInfo)->maxFall = 0;
1089         IMapper4::get_cb cb = [&mapperErr, staticInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
1090             mapperErr = err;
1091             if (err != Error4::NONE) {
1092                 return;
1093             }
1094 
1095             std::optional<Smpte2086> smpte2086;
1096             gralloc4::decodeSmpte2086(vec, &smpte2086);
1097             if (smpte2086) {
1098                 (*staticInfo)->mastering.red.x    = smpte2086->primaryRed.x;
1099                 (*staticInfo)->mastering.red.y    = smpte2086->primaryRed.y;
1100                 (*staticInfo)->mastering.green.x  = smpte2086->primaryGreen.x;
1101                 (*staticInfo)->mastering.green.y  = smpte2086->primaryGreen.y;
1102                 (*staticInfo)->mastering.blue.x   = smpte2086->primaryBlue.x;
1103                 (*staticInfo)->mastering.blue.y   = smpte2086->primaryBlue.y;
1104                 (*staticInfo)->mastering.white.x  = smpte2086->whitePoint.x;
1105                 (*staticInfo)->mastering.white.y  = smpte2086->whitePoint.y;
1106 
1107                 (*staticInfo)->mastering.maxLuminance = smpte2086->maxLuminance;
1108                 (*staticInfo)->mastering.minLuminance = smpte2086->minLuminance;
1109             } else {
1110                 mapperErr = Error4::BAD_VALUE;
1111             }
1112         };
1113         Return<void> ret = mapper->get(buffer.get(), MetadataType_Smpte2086, cb);
1114         if (!ret.isOk()) {
1115             err = C2_REFUSED;
1116         } else if (mapperErr != Error4::NONE) {
1117             err = C2_CORRUPTED;
1118         }
1119         cb = [&mapperErr, staticInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
1120             mapperErr = err;
1121             if (err != Error4::NONE) {
1122                 return;
1123             }
1124 
1125             std::optional<Cta861_3> cta861_3;
1126             gralloc4::decodeCta861_3(vec, &cta861_3);
1127             if (cta861_3) {
1128                 (*staticInfo)->maxCll   = cta861_3->maxContentLightLevel;
1129                 (*staticInfo)->maxFall  = cta861_3->maxFrameAverageLightLevel;
1130             } else {
1131                 mapperErr = Error4::BAD_VALUE;
1132             }
1133         };
1134         ret = mapper->get(buffer.get(), MetadataType_Cta861_3, cb);
1135         if (!ret.isOk()) {
1136             err = C2_REFUSED;
1137         } else if (mapperErr != Error4::NONE) {
1138             err = C2_CORRUPTED;
1139         }
1140     }
1141 
1142     if (err != C2_OK) {
1143         staticInfo->reset();
1144     }
1145 
1146     if (dynamicInfo) {
1147         ALOGV("Grabbing dynamic HDR info from gralloc4 metadata");
1148         dynamicInfo->reset();
1149         IMapper4::get_cb cb = [&mapperErr, dynamicInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
1150             mapperErr = err;
1151             if (err != Error4::NONE) {
1152                 return;
1153             }
1154             if (!dynamicInfo) {
1155                 return;
1156             }
1157             *dynamicInfo = C2StreamHdrDynamicMetadataInfo::input::AllocShared(
1158                     vec.size(), 0u, C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40);
1159             memcpy((*dynamicInfo)->m.data, vec.data(), vec.size());
1160         };
1161         Return<void> ret = mapper->get(buffer.get(), MetadataType_Smpte2094_40, cb);
1162         if (!ret.isOk() || mapperErr != Error4::NONE) {
1163             dynamicInfo->reset();
1164         }
1165     }
1166 
1167     return err;
1168 }
1169 
SetMetadataToGralloc4Handle(android_dataspace_t dataSpace,const std::shared_ptr<const C2StreamHdrStaticMetadataInfo::output> & staticInfo,const std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> & dynamicInfo,const C2Handle * const handle)1170 c2_status_t SetMetadataToGralloc4Handle(
1171         android_dataspace_t dataSpace,
1172         const std::shared_ptr<const C2StreamHdrStaticMetadataInfo::output> &staticInfo,
1173         const std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> &dynamicInfo,
1174         const C2Handle *const handle) {
1175     c2_status_t err = C2_OK;
1176     sp<IMapper4> mapper = GetMapper4();
1177     Gralloc4Buffer buffer(handle);
1178     if (!mapper || !buffer) {
1179         // Gralloc4 not supported; nothing to do
1180         return err;
1181     }
1182     {
1183         hidl_vec<uint8_t> metadata;
1184         if (gralloc4::encodeDataspace(static_cast<Dataspace>(dataSpace), &metadata) == OK) {
1185             Return<Error4> ret = mapper->set(buffer.get(), MetadataType_Dataspace, metadata);
1186             if (!ret.isOk()) {
1187                 err = C2_REFUSED;
1188             } else if (ret != Error4::NONE) {
1189                 err = C2_CORRUPTED;
1190             }
1191         }
1192     }
1193     if (staticInfo && *staticInfo) {
1194         ALOGV("Setting static HDR info as gralloc4 metadata");
1195         std::optional<Smpte2086> smpte2086 = Smpte2086{
1196             {staticInfo->mastering.red.x, staticInfo->mastering.red.y},
1197             {staticInfo->mastering.green.x, staticInfo->mastering.green.y},
1198             {staticInfo->mastering.blue.x, staticInfo->mastering.blue.y},
1199             {staticInfo->mastering.white.x, staticInfo->mastering.white.y},
1200             staticInfo->mastering.maxLuminance,
1201             staticInfo->mastering.minLuminance,
1202         };
1203         hidl_vec<uint8_t> vec;
1204         if (0.0 <= smpte2086->primaryRed.x && smpte2086->primaryRed.x <= 1.0
1205                 && 0.0 <= smpte2086->primaryRed.y && smpte2086->primaryRed.y <= 1.0
1206                 && 0.0 <= smpte2086->primaryGreen.x && smpte2086->primaryGreen.x <= 1.0
1207                 && 0.0 <= smpte2086->primaryGreen.y && smpte2086->primaryGreen.y <= 1.0
1208                 && 0.0 <= smpte2086->primaryBlue.x && smpte2086->primaryBlue.x <= 1.0
1209                 && 0.0 <= smpte2086->primaryBlue.y && smpte2086->primaryBlue.y <= 1.0
1210                 && 0.0 <= smpte2086->whitePoint.x && smpte2086->whitePoint.x <= 1.0
1211                 && 0.0 <= smpte2086->whitePoint.y && smpte2086->whitePoint.y <= 1.0
1212                 && 0.0 <= smpte2086->maxLuminance && 0.0 <= smpte2086->minLuminance
1213                 && gralloc4::encodeSmpte2086(smpte2086, &vec) == OK) {
1214             Return<Error4> ret = mapper->set(buffer.get(), MetadataType_Smpte2086, vec);
1215             if (!ret.isOk()) {
1216                 err = C2_REFUSED;
1217             } else if (ret != Error4::NONE) {
1218                 err = C2_CORRUPTED;
1219             }
1220         }
1221         std::optional<Cta861_3> cta861_3 = Cta861_3{
1222             staticInfo->maxCll,
1223             staticInfo->maxFall,
1224         };
1225         if (0.0 <= cta861_3->maxContentLightLevel && 0.0 <= cta861_3->maxFrameAverageLightLevel
1226                 && gralloc4::encodeCta861_3(cta861_3, &vec) == OK) {
1227             Return<Error4> ret = mapper->set(buffer.get(), MetadataType_Cta861_3, vec);
1228             if (!ret.isOk()) {
1229                 err = C2_REFUSED;
1230             } else if (ret != Error4::NONE) {
1231                 err = C2_CORRUPTED;
1232             }
1233         }
1234     }
1235     if (dynamicInfo && *dynamicInfo && dynamicInfo->flexCount() > 0) {
1236         ALOGV("Setting dynamic HDR info as gralloc4 metadata");
1237         std::optional<IMapper4::MetadataType> metadataType;
1238         switch (dynamicInfo->m.type_) {
1239         case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_10:
1240             // TODO
1241             break;
1242         case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40:
1243             metadataType = MetadataType_Smpte2094_40;
1244             break;
1245         }
1246 
1247         if (metadataType) {
1248             std::vector<uint8_t> smpte2094_40;
1249             smpte2094_40.resize(dynamicInfo->flexCount());
1250             memcpy(smpte2094_40.data(), dynamicInfo->m.data, dynamicInfo->flexCount());
1251 
1252             hidl_vec<uint8_t> vec;
1253             if (gralloc4::encodeSmpte2094_40({ smpte2094_40 }, &vec) == OK) {
1254                 Return<Error4> ret = mapper->set(buffer.get(), *metadataType, vec);
1255                 if (!ret.isOk()) {
1256                     err = C2_REFUSED;
1257                 } else if (ret != Error4::NONE) {
1258                     err = C2_CORRUPTED;
1259                 }
1260             }
1261         } else {
1262             err = C2_BAD_VALUE;
1263         }
1264     }
1265 
1266     return err;
1267 }
1268 
1269 }  // namespace android
1270