• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2018, The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *     http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 //#define LOG_NDEBUG 0
18 #define LOG_TAG "Codec2Buffer"
19 #define ATRACE_TAG  ATRACE_TAG_VIDEO
20 #include <utils/Log.h>
21 #include <utils/Trace.h>
22 
23 #include <aidl/android/hardware/graphics/common/Cta861_3.h>
24 #include <aidl/android/hardware/graphics/common/Smpte2086.h>
25 #include <android-base/properties.h>
26 #include <android/hardware/cas/native/1.0/types.h>
27 #include <android/hardware/drm/1.0/types.h>
28 #include <android/hardware/graphics/common/1.2/types.h>
29 #include <android/hardware/graphics/mapper/4.0/IMapper.h>
30 #include <gralloctypes/Gralloc4.h>
31 #include <hidlmemory/FrameworkUtils.h>
32 #include <media/hardware/HardwareAPI.h>
33 #include <media/stagefright/CodecBase.h>
34 #include <media/stagefright/MediaCodecConstants.h>
35 #include <media/stagefright/foundation/ABuffer.h>
36 #include <media/stagefright/foundation/AMessage.h>
37 #include <media/stagefright/foundation/AUtils.h>
38 #include <mediadrm/ICrypto.h>
39 #include <nativebase/nativebase.h>
40 #include <ui/Fence.h>
41 
42 #include <C2AllocatorGralloc.h>
43 #include <C2BlockInternal.h>
44 #include <C2Debug.h>
45 
46 #include "Codec2Buffer.h"
47 
48 namespace android {
49 
50 // Codec2Buffer
51 
canCopyLinear(const std::shared_ptr<C2Buffer> & buffer) const52 bool Codec2Buffer::canCopyLinear(const std::shared_ptr<C2Buffer> &buffer) const {
53     if (const_cast<Codec2Buffer *>(this)->base() == nullptr) {
54         return false;
55     }
56     if (!buffer) {
57         // Nothing to copy, so we can copy by doing nothing.
58         return true;
59     }
60     if (buffer->data().type() != C2BufferData::LINEAR) {
61         return false;
62     }
63     if (buffer->data().linearBlocks().size() == 0u) {
64         // Nothing to copy, so we can copy by doing nothing.
65         return true;
66     } else if (buffer->data().linearBlocks().size() > 1u) {
67         // We don't know how to copy more than one blocks.
68         return false;
69     }
70     if (buffer->data().linearBlocks()[0].size() > capacity()) {
71         // It won't fit.
72         return false;
73     }
74     return true;
75 }
76 
copyLinear(const std::shared_ptr<C2Buffer> & buffer)77 bool Codec2Buffer::copyLinear(const std::shared_ptr<C2Buffer> &buffer) {
78     // We assume that all canCopyLinear() checks passed.
79     if (!buffer || buffer->data().linearBlocks().size() == 0u
80             || buffer->data().linearBlocks()[0].size() == 0u) {
81         setRange(0, 0);
82         return true;
83     }
84     C2ReadView view = buffer->data().linearBlocks()[0].map().get();
85     if (view.error() != C2_OK) {
86         ALOGD("Error while mapping: %d", view.error());
87         return false;
88     }
89     if (view.capacity() > capacity()) {
90         ALOGD("C2ConstLinearBlock lied --- it actually doesn't fit: view(%u) > this(%zu)",
91                 view.capacity(), capacity());
92         return false;
93     }
94     memcpy(base(), view.data(), view.capacity());
95     setRange(0, view.capacity());
96     return true;
97 }
98 
setImageData(const sp<ABuffer> & imageData)99 void Codec2Buffer::setImageData(const sp<ABuffer> &imageData) {
100     mImageData = imageData;
101 }
102 
103 // LocalLinearBuffer
104 
canCopy(const std::shared_ptr<C2Buffer> & buffer) const105 bool LocalLinearBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
106     return canCopyLinear(buffer);
107 }
108 
copy(const std::shared_ptr<C2Buffer> & buffer)109 bool LocalLinearBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
110     return copyLinear(buffer);
111 }
112 
113 // DummyContainerBuffer
114 
115 static uint8_t sDummyByte[1] = { 0 };
116 
DummyContainerBuffer(const sp<AMessage> & format,const std::shared_ptr<C2Buffer> & buffer)117 DummyContainerBuffer::DummyContainerBuffer(
118         const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer)
119     : Codec2Buffer(format, new ABuffer(sDummyByte, 1)),
120       mBufferRef(buffer) {
121     setRange(0, buffer ? 1 : 0);
122 }
123 
asC2Buffer()124 std::shared_ptr<C2Buffer> DummyContainerBuffer::asC2Buffer() {
125     return mBufferRef;
126 }
127 
clearC2BufferRefs()128 void DummyContainerBuffer::clearC2BufferRefs() {
129     mBufferRef.reset();
130 }
131 
canCopy(const std::shared_ptr<C2Buffer> &) const132 bool DummyContainerBuffer::canCopy(const std::shared_ptr<C2Buffer> &) const {
133     return !mBufferRef;
134 }
135 
copy(const std::shared_ptr<C2Buffer> & buffer)136 bool DummyContainerBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
137     mBufferRef = buffer;
138     setRange(0, mBufferRef ? 1 : 0);
139     return true;
140 }
141 
142 // LinearBlockBuffer
143 
144 // static
Allocate(const sp<AMessage> & format,const std::shared_ptr<C2LinearBlock> & block)145 sp<LinearBlockBuffer> LinearBlockBuffer::Allocate(
146         const sp<AMessage> &format, const std::shared_ptr<C2LinearBlock> &block) {
147     C2WriteView writeView(block->map().get());
148     if (writeView.error() != C2_OK) {
149         return nullptr;
150     }
151     return new LinearBlockBuffer(format, std::move(writeView), block);
152 }
153 
asC2Buffer()154 std::shared_ptr<C2Buffer> LinearBlockBuffer::asC2Buffer() {
155     return C2Buffer::CreateLinearBuffer(mBlock->share(offset(), size(), C2Fence()));
156 }
157 
canCopy(const std::shared_ptr<C2Buffer> & buffer) const158 bool LinearBlockBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
159     return canCopyLinear(buffer);
160 }
161 
copy(const std::shared_ptr<C2Buffer> & buffer)162 bool LinearBlockBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
163     return copyLinear(buffer);
164 }
165 
LinearBlockBuffer(const sp<AMessage> & format,C2WriteView && writeView,const std::shared_ptr<C2LinearBlock> & block)166 LinearBlockBuffer::LinearBlockBuffer(
167         const sp<AMessage> &format,
168         C2WriteView&& writeView,
169         const std::shared_ptr<C2LinearBlock> &block)
170     : Codec2Buffer(format, new ABuffer(writeView.data(), writeView.size())),
171       mWriteView(writeView),
172       mBlock(block) {
173 }
174 
175 // ConstLinearBlockBuffer
176 
177 // static
Allocate(const sp<AMessage> & format,const std::shared_ptr<C2Buffer> & buffer)178 sp<ConstLinearBlockBuffer> ConstLinearBlockBuffer::Allocate(
179         const sp<AMessage> &format, const std::shared_ptr<C2Buffer> &buffer) {
180     if (!buffer
181             || buffer->data().type() != C2BufferData::LINEAR
182             || buffer->data().linearBlocks().size() != 1u) {
183         return nullptr;
184     }
185     C2ReadView readView(buffer->data().linearBlocks()[0].map().get());
186     if (readView.error() != C2_OK) {
187         return nullptr;
188     }
189     return new ConstLinearBlockBuffer(format, std::move(readView), buffer);
190 }
191 
ConstLinearBlockBuffer(const sp<AMessage> & format,C2ReadView && readView,const std::shared_ptr<C2Buffer> & buffer)192 ConstLinearBlockBuffer::ConstLinearBlockBuffer(
193         const sp<AMessage> &format,
194         C2ReadView&& readView,
195         const std::shared_ptr<C2Buffer> &buffer)
196     : Codec2Buffer(format, new ABuffer(
197             // NOTE: ABuffer only takes non-const pointer but this data is
198             //       supposed to be read-only.
199             const_cast<uint8_t *>(readView.data()), readView.capacity())),
200       mReadView(readView),
201       mBufferRef(buffer) {
202 }
203 
asC2Buffer()204 std::shared_ptr<C2Buffer> ConstLinearBlockBuffer::asC2Buffer() {
205     return mBufferRef;
206 }
207 
clearC2BufferRefs()208 void ConstLinearBlockBuffer::clearC2BufferRefs() {
209     mBufferRef.reset();
210 }
211 
212 // GraphicView2MediaImageConverter
213 
214 namespace {
215 
216 class GraphicView2MediaImageConverter {
217 public:
218     /**
219      * Creates a C2GraphicView <=> MediaImage converter
220      *
221      * \param view C2GraphicView object
222      * \param format buffer format
223      * \param copy whether the converter is used for copy or not
224      */
GraphicView2MediaImageConverter(const C2GraphicView & view,const sp<AMessage> & format,bool copy)225     GraphicView2MediaImageConverter(
226             const C2GraphicView &view, const sp<AMessage> &format, bool copy)
227         : mInitCheck(NO_INIT),
228           mView(view),
229           mWidth(view.width()),
230           mHeight(view.height()),
231           mAllocatedDepth(0),
232           mBackBufferSize(0),
233           mMediaImage(new ABuffer(sizeof(MediaImage2))) {
234         ATRACE_CALL();
235         if (!format->findInt32(KEY_COLOR_FORMAT, &mClientColorFormat)) {
236             mClientColorFormat = COLOR_FormatYUV420Flexible;
237         }
238         if (!format->findInt32("android._color-format", &mComponentColorFormat)) {
239             mComponentColorFormat = COLOR_FormatYUV420Flexible;
240         }
241         if (view.error() != C2_OK) {
242             ALOGD("Converter: view.error() = %d", view.error());
243             mInitCheck = BAD_VALUE;
244             return;
245         }
246         MediaImage2 *mediaImage = (MediaImage2 *)mMediaImage->base();
247         const C2PlanarLayout &layout = view.layout();
248         if (layout.numPlanes == 0) {
249             ALOGD("Converter: 0 planes");
250             mInitCheck = BAD_VALUE;
251             return;
252         }
253         memset(mediaImage, 0, sizeof(*mediaImage));
254         mAllocatedDepth = layout.planes[0].allocatedDepth;
255         uint32_t bitDepth = layout.planes[0].bitDepth;
256 
257         // align width and height to support subsampling cleanly
258         uint32_t stride = align(view.crop().width, 2) * divUp(layout.planes[0].allocatedDepth, 8u);
259         uint32_t vStride = align(view.crop().height, 2);
260 
261         bool tryWrapping = !copy;
262 
263         switch (layout.type) {
264             case C2PlanarLayout::TYPE_YUV: {
265                 mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_YUV;
266                 if (layout.numPlanes != 3) {
267                     ALOGD("Converter: %d planes for YUV layout", layout.numPlanes);
268                     mInitCheck = BAD_VALUE;
269                     return;
270                 }
271                 std::optional<int> clientBitDepth = {};
272                 switch (mClientColorFormat) {
273                     case COLOR_FormatYUVP010:
274                         clientBitDepth = 10;
275                         break;
276                     case COLOR_FormatYUV411PackedPlanar:
277                     case COLOR_FormatYUV411Planar:
278                     case COLOR_FormatYUV420Flexible:
279                     case COLOR_FormatYUV420PackedPlanar:
280                     case COLOR_FormatYUV420PackedSemiPlanar:
281                     case COLOR_FormatYUV420Planar:
282                     case COLOR_FormatYUV420SemiPlanar:
283                     case COLOR_FormatYUV422Flexible:
284                     case COLOR_FormatYUV422PackedPlanar:
285                     case COLOR_FormatYUV422PackedSemiPlanar:
286                     case COLOR_FormatYUV422Planar:
287                     case COLOR_FormatYUV422SemiPlanar:
288                     case COLOR_FormatYUV444Flexible:
289                     case COLOR_FormatYUV444Interleaved:
290                         clientBitDepth = 8;
291                         break;
292                     default:
293                         // no-op; used with optional
294                         break;
295 
296                 }
297                 // conversion fails if client bit-depth and the component bit-depth differs
298                 if ((clientBitDepth) && (bitDepth != clientBitDepth.value())) {
299                     ALOGD("Bit depth of client: %d and component: %d differs",
300                         *clientBitDepth, bitDepth);
301                     mInitCheck = BAD_VALUE;
302                     return;
303                 }
304                 C2PlaneInfo yPlane = layout.planes[C2PlanarLayout::PLANE_Y];
305                 C2PlaneInfo uPlane = layout.planes[C2PlanarLayout::PLANE_U];
306                 C2PlaneInfo vPlane = layout.planes[C2PlanarLayout::PLANE_V];
307                 if (yPlane.channel != C2PlaneInfo::CHANNEL_Y
308                         || uPlane.channel != C2PlaneInfo::CHANNEL_CB
309                         || vPlane.channel != C2PlaneInfo::CHANNEL_CR) {
310                     ALOGD("Converter: not YUV layout");
311                     mInitCheck = BAD_VALUE;
312                     return;
313                 }
314                 bool yuv420888 = yPlane.rowSampling == 1 && yPlane.colSampling == 1
315                         && uPlane.rowSampling == 2 && uPlane.colSampling == 2
316                         && vPlane.rowSampling == 2 && vPlane.colSampling == 2;
317                 if (yuv420888) {
318                     for (uint32_t i = 0; i < 3; ++i) {
319                         const C2PlaneInfo &plane = layout.planes[i];
320                         if (plane.allocatedDepth != 8 || plane.bitDepth != 8) {
321                             yuv420888 = false;
322                             break;
323                         }
324                     }
325                     yuv420888 = yuv420888 && yPlane.colInc == 1 && uPlane.rowInc == vPlane.rowInc;
326                 }
327                 int32_t copyFormat = mClientColorFormat;
328                 if (yuv420888 && mClientColorFormat == COLOR_FormatYUV420Flexible) {
329                     if (uPlane.colInc == 2 && vPlane.colInc == 2
330                             && yPlane.rowInc == uPlane.rowInc) {
331                         copyFormat = COLOR_FormatYUV420PackedSemiPlanar;
332                     } else if (uPlane.colInc == 1 && vPlane.colInc == 1
333                             && yPlane.rowInc == uPlane.rowInc * 2) {
334                         copyFormat = COLOR_FormatYUV420PackedPlanar;
335                     }
336                 }
337                 ALOGV("client_fmt=0x%x y:{colInc=%d rowInc=%d} u:{colInc=%d rowInc=%d} "
338                         "v:{colInc=%d rowInc=%d}",
339                         mClientColorFormat,
340                         yPlane.colInc, yPlane.rowInc,
341                         uPlane.colInc, uPlane.rowInc,
342                         vPlane.colInc, vPlane.rowInc);
343                 switch (copyFormat) {
344                     case COLOR_FormatYUV420Flexible:
345                     case COLOR_FormatYUV420Planar:
346                     case COLOR_FormatYUV420PackedPlanar:
347                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
348                         mediaImage->mPlane[mediaImage->Y].mColInc = 1;
349                         mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
350                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
351                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
352 
353                         mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
354                         mediaImage->mPlane[mediaImage->U].mColInc = 1;
355                         mediaImage->mPlane[mediaImage->U].mRowInc = stride / 2;
356                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
357                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
358 
359                         mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride * 5 / 4;
360                         mediaImage->mPlane[mediaImage->V].mColInc = 1;
361                         mediaImage->mPlane[mediaImage->V].mRowInc = stride / 2;
362                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
363                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
364 
365                         if (tryWrapping && mClientColorFormat != COLOR_FormatYUV420Flexible) {
366                             tryWrapping = yuv420888 && uPlane.colInc == 1 && vPlane.colInc == 1
367                                     && yPlane.rowInc == uPlane.rowInc * 2
368                                     && view.data()[0] < view.data()[1]
369                                     && view.data()[1] < view.data()[2];
370                         }
371                         break;
372 
373                     case COLOR_FormatYUV420SemiPlanar:
374                     case COLOR_FormatYUV420PackedSemiPlanar:
375                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
376                         mediaImage->mPlane[mediaImage->Y].mColInc = 1;
377                         mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
378                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
379                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
380 
381                         mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
382                         mediaImage->mPlane[mediaImage->U].mColInc = 2;
383                         mediaImage->mPlane[mediaImage->U].mRowInc = stride;
384                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
385                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
386 
387                         mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 1;
388                         mediaImage->mPlane[mediaImage->V].mColInc = 2;
389                         mediaImage->mPlane[mediaImage->V].mRowInc = stride;
390                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
391                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
392 
393                         if (tryWrapping && mClientColorFormat != COLOR_FormatYUV420Flexible) {
394                             tryWrapping = yuv420888 && uPlane.colInc == 2 && vPlane.colInc == 2
395                                     && yPlane.rowInc == uPlane.rowInc
396                                     && view.data()[0] < view.data()[1]
397                                     && view.data()[1] < view.data()[2];
398                         }
399                         break;
400 
401                     case COLOR_FormatYUVP010:
402                         // stride is in bytes
403                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
404                         mediaImage->mPlane[mediaImage->Y].mColInc = 2;
405                         mediaImage->mPlane[mediaImage->Y].mRowInc = stride;
406                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = 1;
407                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = 1;
408 
409                         mediaImage->mPlane[mediaImage->U].mOffset = stride * vStride;
410                         mediaImage->mPlane[mediaImage->U].mColInc = 4;
411                         mediaImage->mPlane[mediaImage->U].mRowInc = stride;
412                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = 2;
413                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = 2;
414 
415                         mediaImage->mPlane[mediaImage->V].mOffset = stride * vStride + 2;
416                         mediaImage->mPlane[mediaImage->V].mColInc = 4;
417                         mediaImage->mPlane[mediaImage->V].mRowInc = stride;
418                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = 2;
419                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = 2;
420                         if (tryWrapping) {
421                             tryWrapping = yPlane.allocatedDepth == 16
422                                     && uPlane.allocatedDepth == 16
423                                     && vPlane.allocatedDepth == 16
424                                     && yPlane.bitDepth == 10
425                                     && uPlane.bitDepth == 10
426                                     && vPlane.bitDepth == 10
427                                     && yPlane.rightShift == 6
428                                     && uPlane.rightShift == 6
429                                     && vPlane.rightShift == 6
430                                     && yPlane.rowSampling == 1 && yPlane.colSampling == 1
431                                     && uPlane.rowSampling == 2 && uPlane.colSampling == 2
432                                     && vPlane.rowSampling == 2 && vPlane.colSampling == 2
433                                     && yPlane.colInc == 2
434                                     && uPlane.colInc == 4
435                                     && vPlane.colInc == 4
436                                     && yPlane.rowInc == uPlane.rowInc
437                                     && yPlane.rowInc == vPlane.rowInc;
438                         }
439                         break;
440 
441                     default: {
442                         // default to fully planar format --- this will be overridden if wrapping
443                         // TODO: keep interleaved format
444                         int32_t colInc = divUp(mAllocatedDepth, 8u);
445                         int32_t rowInc = stride * colInc / yPlane.colSampling;
446                         mediaImage->mPlane[mediaImage->Y].mOffset = 0;
447                         mediaImage->mPlane[mediaImage->Y].mColInc = colInc;
448                         mediaImage->mPlane[mediaImage->Y].mRowInc = rowInc;
449                         mediaImage->mPlane[mediaImage->Y].mHorizSubsampling = yPlane.colSampling;
450                         mediaImage->mPlane[mediaImage->Y].mVertSubsampling = yPlane.rowSampling;
451                         int32_t offset = rowInc * vStride / yPlane.rowSampling;
452 
453                         rowInc = stride * colInc / uPlane.colSampling;
454                         mediaImage->mPlane[mediaImage->U].mOffset = offset;
455                         mediaImage->mPlane[mediaImage->U].mColInc = colInc;
456                         mediaImage->mPlane[mediaImage->U].mRowInc = rowInc;
457                         mediaImage->mPlane[mediaImage->U].mHorizSubsampling = uPlane.colSampling;
458                         mediaImage->mPlane[mediaImage->U].mVertSubsampling = uPlane.rowSampling;
459                         offset += rowInc * vStride / uPlane.rowSampling;
460 
461                         rowInc = stride * colInc / vPlane.colSampling;
462                         mediaImage->mPlane[mediaImage->V].mOffset = offset;
463                         mediaImage->mPlane[mediaImage->V].mColInc = colInc;
464                         mediaImage->mPlane[mediaImage->V].mRowInc = rowInc;
465                         mediaImage->mPlane[mediaImage->V].mHorizSubsampling = vPlane.colSampling;
466                         mediaImage->mPlane[mediaImage->V].mVertSubsampling = vPlane.rowSampling;
467                         break;
468                     }
469                 }
470                 break;
471             }
472 
473             case C2PlanarLayout::TYPE_YUVA:
474                 ALOGD("Converter: unrecognized color format "
475                         "(client %d component %d) for YUVA layout",
476                         mClientColorFormat, mComponentColorFormat);
477                 mInitCheck = NO_INIT;
478                 return;
479             case C2PlanarLayout::TYPE_RGB:
480                 ALOGD("Converter: unrecognized color format "
481                         "(client %d component %d) for RGB layout",
482                         mClientColorFormat, mComponentColorFormat);
483                 mInitCheck = NO_INIT;
484                 // TODO: support MediaImage layout
485                 return;
486             case C2PlanarLayout::TYPE_RGBA:
487                 ALOGD("Converter: unrecognized color format "
488                         "(client %d component %d) for RGBA layout",
489                         mClientColorFormat, mComponentColorFormat);
490                 mInitCheck = NO_INIT;
491                 // TODO: support MediaImage layout
492                 return;
493             default:
494                 mediaImage->mType = MediaImage2::MEDIA_IMAGE_TYPE_UNKNOWN;
495                 if (layout.numPlanes == 1) {
496                     const C2PlaneInfo &plane = layout.planes[0];
497                     if (plane.colInc < 0 || plane.rowInc < 0) {
498                         // Copy-only if we have negative colInc/rowInc
499                         tryWrapping = false;
500                     }
501                     mediaImage->mPlane[0].mOffset = 0;
502                     mediaImage->mPlane[0].mColInc = std::abs(plane.colInc);
503                     mediaImage->mPlane[0].mRowInc = std::abs(plane.rowInc);
504                     mediaImage->mPlane[0].mHorizSubsampling = plane.colSampling;
505                     mediaImage->mPlane[0].mVertSubsampling = plane.rowSampling;
506                 } else {
507                     ALOGD("Converter: unrecognized layout: color format (client %d component %d)",
508                             mClientColorFormat, mComponentColorFormat);
509                     mInitCheck = NO_INIT;
510                     return;
511                 }
512                 break;
513         }
514         if (tryWrapping) {
515             // try to map directly. check if the planes are near one another
516             const uint8_t *minPtr = mView.data()[0];
517             const uint8_t *maxPtr = mView.data()[0];
518             int32_t planeSize = 0;
519             for (uint32_t i = 0; i < layout.numPlanes; ++i) {
520                 const C2PlaneInfo &plane = layout.planes[i];
521                 int64_t planeStride = std::abs(plane.rowInc / plane.colInc);
522                 ssize_t minOffset = plane.minOffset(
523                         mWidth / plane.colSampling, mHeight / plane.rowSampling);
524                 ssize_t maxOffset = plane.maxOffset(
525                         mWidth / plane.colSampling, mHeight / plane.rowSampling);
526                 if (minPtr > mView.data()[i] + minOffset) {
527                     minPtr = mView.data()[i] + minOffset;
528                 }
529                 if (maxPtr < mView.data()[i] + maxOffset) {
530                     maxPtr = mView.data()[i] + maxOffset;
531                 }
532                 planeSize += planeStride * divUp(mAllocatedDepth, 8u)
533                         * align(mHeight, 64) / plane.rowSampling;
534             }
535 
536             if (minPtr == mView.data()[0] && (maxPtr - minPtr + 1) <= planeSize) {
537                 // FIXME: this is risky as reading/writing data out of bound results
538                 //        in an undefined behavior, but gralloc does assume a
539                 //        contiguous mapping
540                 for (uint32_t i = 0; i < layout.numPlanes; ++i) {
541                     const C2PlaneInfo &plane = layout.planes[i];
542                     mediaImage->mPlane[i].mOffset = mView.data()[i] - minPtr;
543                     mediaImage->mPlane[i].mColInc = plane.colInc;
544                     mediaImage->mPlane[i].mRowInc = plane.rowInc;
545                     mediaImage->mPlane[i].mHorizSubsampling = plane.colSampling;
546                     mediaImage->mPlane[i].mVertSubsampling = plane.rowSampling;
547                 }
548                 mWrapped = new ABuffer(const_cast<uint8_t *>(minPtr),
549                                        maxPtr - minPtr + 1);
550                 ALOGV("Converter: wrapped (capacity=%zu)", mWrapped->capacity());
551             }
552         }
553         mediaImage->mNumPlanes = layout.numPlanes;
554         mediaImage->mWidth = view.crop().width;
555         mediaImage->mHeight = view.crop().height;
556         mediaImage->mBitDepth = bitDepth;
557         mediaImage->mBitDepthAllocated = mAllocatedDepth;
558 
559         uint32_t bufferSize = 0;
560         for (uint32_t i = 0; i < layout.numPlanes; ++i) {
561             const C2PlaneInfo &plane = layout.planes[i];
562             if (plane.allocatedDepth < plane.bitDepth
563                     || plane.rightShift != plane.allocatedDepth - plane.bitDepth) {
564                 ALOGD("rightShift value of %u unsupported", plane.rightShift);
565                 mInitCheck = BAD_VALUE;
566                 return;
567             }
568             if (plane.allocatedDepth > 8 && plane.endianness != C2PlaneInfo::NATIVE) {
569                 ALOGD("endianness value of %u unsupported", plane.endianness);
570                 mInitCheck = BAD_VALUE;
571                 return;
572             }
573             if (plane.allocatedDepth != mAllocatedDepth || plane.bitDepth != bitDepth) {
574                 ALOGD("different allocatedDepth/bitDepth per plane unsupported");
575                 mInitCheck = BAD_VALUE;
576                 return;
577             }
578             // stride is in bytes
579             bufferSize += stride * vStride / plane.rowSampling / plane.colSampling;
580         }
581 
582         mBackBufferSize = bufferSize;
583         mInitCheck = OK;
584     }
585 
initCheck() const586     status_t initCheck() const { return mInitCheck; }
587 
backBufferSize() const588     uint32_t backBufferSize() const { return mBackBufferSize; }
589 
590     /**
591      * Wrap C2GraphicView using a MediaImage2. Note that if not wrapped, the content is not mapped
592      * in this function --- the caller should use CopyGraphicView2MediaImage() function to copy the
593      * data into a backing buffer explicitly.
594      *
595      * \return media buffer. This is null if wrapping failed.
596      */
wrap() const597     sp<ABuffer> wrap() const {
598         if (mBackBuffer == nullptr) {
599             return mWrapped;
600         }
601         return nullptr;
602     }
603 
setBackBuffer(const sp<ABuffer> & backBuffer)604     bool setBackBuffer(const sp<ABuffer> &backBuffer) {
605         if (backBuffer == nullptr) {
606             return false;
607         }
608         if (backBuffer->capacity() < mBackBufferSize) {
609             return false;
610         }
611         backBuffer->setRange(0, mBackBufferSize);
612         mBackBuffer = backBuffer;
613         return true;
614     }
615 
616     /**
617      * Copy C2GraphicView to MediaImage2.
618      */
copyToMediaImage()619     status_t copyToMediaImage() {
620         ATRACE_CALL();
621         if (mInitCheck != OK) {
622             return mInitCheck;
623         }
624         return ImageCopy(mBackBuffer->base(), getMediaImage(), mView);
625     }
626 
imageData() const627     const sp<ABuffer> &imageData() const { return mMediaImage; }
628 
629 private:
630     status_t mInitCheck;
631 
632     const C2GraphicView mView;
633     uint32_t mWidth;
634     uint32_t mHeight;
635     int32_t mClientColorFormat;  ///< SDK color format for MediaImage
636     int32_t mComponentColorFormat;  ///< SDK color format from component
637     sp<ABuffer> mWrapped;  ///< wrapped buffer (if we can map C2Buffer to an ABuffer)
638     uint32_t mAllocatedDepth;
639     uint32_t mBackBufferSize;
640     sp<ABuffer> mMediaImage;
641     std::function<sp<ABuffer>(size_t)> mAlloc;
642 
643     sp<ABuffer> mBackBuffer;    ///< backing buffer if we have to copy C2Buffer <=> ABuffer
644 
getMediaImage()645     MediaImage2 *getMediaImage() {
646         return (MediaImage2 *)mMediaImage->base();
647     }
648 };
649 
650 }  // namespace
651 
652 // GraphicBlockBuffer
653 
654 // static
Allocate(const sp<AMessage> & format,const std::shared_ptr<C2GraphicBlock> & block,std::function<sp<ABuffer> (size_t)> alloc)655 sp<GraphicBlockBuffer> GraphicBlockBuffer::Allocate(
656         const sp<AMessage> &format,
657         const std::shared_ptr<C2GraphicBlock> &block,
658         std::function<sp<ABuffer>(size_t)> alloc) {
659     ATRACE_BEGIN("GraphicBlockBuffer::Allocate block->map()");
660     C2GraphicView view(block->map().get());
661     ATRACE_END();
662     if (view.error() != C2_OK) {
663         ALOGD("C2GraphicBlock::map failed: %d", view.error());
664         return nullptr;
665     }
666 
667     GraphicView2MediaImageConverter converter(view, format, false /* copy */);
668     if (converter.initCheck() != OK) {
669         ALOGD("Converter init failed: %d", converter.initCheck());
670         return nullptr;
671     }
672     bool wrapped = true;
673     sp<ABuffer> buffer = converter.wrap();
674     if (buffer == nullptr) {
675         buffer = alloc(converter.backBufferSize());
676         if (!converter.setBackBuffer(buffer)) {
677             ALOGD("Converter failed to set back buffer");
678             return nullptr;
679         }
680         wrapped = false;
681     }
682     return new GraphicBlockBuffer(
683             format,
684             buffer,
685             std::move(view),
686             block,
687             converter.imageData(),
688             wrapped);
689 }
690 
GraphicBlockBuffer(const sp<AMessage> & format,const sp<ABuffer> & buffer,C2GraphicView && view,const std::shared_ptr<C2GraphicBlock> & block,const sp<ABuffer> & imageData,bool wrapped)691 GraphicBlockBuffer::GraphicBlockBuffer(
692         const sp<AMessage> &format,
693         const sp<ABuffer> &buffer,
694         C2GraphicView &&view,
695         const std::shared_ptr<C2GraphicBlock> &block,
696         const sp<ABuffer> &imageData,
697         bool wrapped)
698     : Codec2Buffer(format, buffer),
699       mView(view),
700       mBlock(block),
701       mWrapped(wrapped) {
702     setImageData(imageData);
703 }
704 
asC2Buffer()705 std::shared_ptr<C2Buffer> GraphicBlockBuffer::asC2Buffer() {
706     ATRACE_CALL();
707     uint32_t width = mView.width();
708     uint32_t height = mView.height();
709     if (!mWrapped) {
710         (void)ImageCopy(mView, base(), imageData());
711     }
712     return C2Buffer::CreateGraphicBuffer(
713             mBlock->share(C2Rect(width, height), C2Fence()));
714 }
715 
716 // GraphicMetadataBuffer
GraphicMetadataBuffer(const sp<AMessage> & format,const std::shared_ptr<C2Allocator> & alloc)717 GraphicMetadataBuffer::GraphicMetadataBuffer(
718         const sp<AMessage> &format,
719         const std::shared_ptr<C2Allocator> &alloc)
720     : Codec2Buffer(format, new ABuffer(sizeof(VideoNativeMetadata))),
721       mAlloc(alloc) {
722     ((VideoNativeMetadata *)base())->pBuffer = nullptr;
723 }
724 
asC2Buffer()725 std::shared_ptr<C2Buffer> GraphicMetadataBuffer::asC2Buffer() {
726 #ifdef __LP64__
727     static std::once_flag s_checkOnce;
728     static bool s_is64bitOk {true};
729     std::call_once(s_checkOnce, [&](){
730         const std::string abi32list =
731         ::android::base::GetProperty("ro.product.cpu.abilist32", "");
732         if (!abi32list.empty()) {
733             int32_t inputSurfaceSetting =
734             ::android::base::GetIntProperty("debug.stagefright.c2inputsurface", int32_t(0));
735             s_is64bitOk = inputSurfaceSetting != 0;
736         }
737     });
738 
739     if (!s_is64bitOk) {
740         ALOGE("GraphicMetadataBuffer does not work in 32+64 system if compiled as 64-bit object"\
741               "when debug.stagefright.c2inputsurface is set to 0");
742         return nullptr;
743     }
744 #endif
745 
746     VideoNativeMetadata *meta = (VideoNativeMetadata *)base();
747     ANativeWindowBuffer *buffer = (ANativeWindowBuffer *)meta->pBuffer;
748     if (buffer == nullptr) {
749         ALOGD("VideoNativeMetadata contains null buffer");
750         return nullptr;
751     }
752 
753     ALOGV("VideoNativeMetadata: %dx%d", buffer->width, buffer->height);
754     C2Handle *handle = WrapNativeCodec2GrallocHandle(
755             buffer->handle,
756             buffer->width,
757             buffer->height,
758             buffer->format,
759             buffer->usage,
760             buffer->stride);
761     std::shared_ptr<C2GraphicAllocation> alloc;
762     c2_status_t err = mAlloc->priorGraphicAllocation(handle, &alloc);
763     if (err != C2_OK) {
764         ALOGD("Failed to wrap VideoNativeMetadata into C2GraphicAllocation");
765         native_handle_close(handle);
766         native_handle_delete(handle);
767         return nullptr;
768     }
769     std::shared_ptr<C2GraphicBlock> block = _C2BlockFactory::CreateGraphicBlock(alloc);
770 
771     meta->pBuffer = 0;
772     // TODO: wrap this in C2Fence so that the component can wait when it
773     //       actually starts processing.
774     if (meta->nFenceFd >= 0) {
775         sp<Fence> fence(new Fence(meta->nFenceFd));
776         fence->waitForever(LOG_TAG);
777     }
778     return C2Buffer::CreateGraphicBuffer(
779             block->share(C2Rect(buffer->width, buffer->height), C2Fence()));
780 }
781 
782 // ConstGraphicBlockBuffer
783 
784 // static
Allocate(const sp<AMessage> & format,const std::shared_ptr<C2Buffer> & buffer,std::function<sp<ABuffer> (size_t)> alloc)785 sp<ConstGraphicBlockBuffer> ConstGraphicBlockBuffer::Allocate(
786         const sp<AMessage> &format,
787         const std::shared_ptr<C2Buffer> &buffer,
788         std::function<sp<ABuffer>(size_t)> alloc) {
789     if (!buffer
790             || buffer->data().type() != C2BufferData::GRAPHIC
791             || buffer->data().graphicBlocks().size() != 1u) {
792         ALOGD("C2Buffer precond fail");
793         return nullptr;
794     }
795     ATRACE_BEGIN("ConstGraphicBlockBuffer::Allocate block->map()");
796     std::unique_ptr<const C2GraphicView> view(std::make_unique<const C2GraphicView>(
797             buffer->data().graphicBlocks()[0].map().get()));
798     ATRACE_END();
799     std::unique_ptr<const C2GraphicView> holder;
800 
801     GraphicView2MediaImageConverter converter(*view, format, false /* copy */);
802     if (converter.initCheck() != OK) {
803         ALOGD("Converter init failed: %d", converter.initCheck());
804         return nullptr;
805     }
806     bool wrapped = true;
807     sp<ABuffer> aBuffer = converter.wrap();
808     if (aBuffer == nullptr) {
809         aBuffer = alloc(converter.backBufferSize());
810         if (!converter.setBackBuffer(aBuffer)) {
811             ALOGD("Converter failed to set back buffer");
812             return nullptr;
813         }
814         wrapped = false;
815         converter.copyToMediaImage();
816         // We don't need the view.
817         holder = std::move(view);
818     }
819     return new ConstGraphicBlockBuffer(
820             format,
821             aBuffer,
822             std::move(view),
823             buffer,
824             converter.imageData(),
825             wrapped);
826 }
827 
828 // static
AllocateEmpty(const sp<AMessage> & format,std::function<sp<ABuffer> (size_t)> alloc)829 sp<ConstGraphicBlockBuffer> ConstGraphicBlockBuffer::AllocateEmpty(
830         const sp<AMessage> &format,
831         std::function<sp<ABuffer>(size_t)> alloc) {
832     int32_t width, height;
833     if (!format->findInt32("width", &width)
834             || !format->findInt32("height", &height)) {
835         ALOGD("format had no width / height");
836         return nullptr;
837     }
838     int32_t colorFormat = COLOR_FormatYUV420Flexible;
839     int32_t bpp = 12;  // 8(Y) + 2(U) + 2(V)
840     if (format->findInt32(KEY_COLOR_FORMAT, &colorFormat)) {
841         if (colorFormat == COLOR_FormatYUVP010) {
842             bpp = 24;  // 16(Y) + 4(U) + 4(V)
843         }
844     }
845     sp<ABuffer> aBuffer(alloc(align(width, 16) * align(height, 16) * bpp / 8));
846     return new ConstGraphicBlockBuffer(
847             format,
848             aBuffer,
849             nullptr,
850             nullptr,
851             nullptr,
852             false);
853 }
854 
ConstGraphicBlockBuffer(const sp<AMessage> & format,const sp<ABuffer> & aBuffer,std::unique_ptr<const C2GraphicView> && view,const std::shared_ptr<C2Buffer> & buffer,const sp<ABuffer> & imageData,bool wrapped)855 ConstGraphicBlockBuffer::ConstGraphicBlockBuffer(
856         const sp<AMessage> &format,
857         const sp<ABuffer> &aBuffer,
858         std::unique_ptr<const C2GraphicView> &&view,
859         const std::shared_ptr<C2Buffer> &buffer,
860         const sp<ABuffer> &imageData,
861         bool wrapped)
862     : Codec2Buffer(format, aBuffer),
863       mView(std::move(view)),
864       mBufferRef(buffer),
865       mWrapped(wrapped) {
866     setImageData(imageData);
867 }
868 
asC2Buffer()869 std::shared_ptr<C2Buffer> ConstGraphicBlockBuffer::asC2Buffer() {
870     return mBufferRef;
871 }
872 
clearC2BufferRefs()873 void ConstGraphicBlockBuffer::clearC2BufferRefs() {
874     mView.reset();
875     mBufferRef.reset();
876 }
877 
canCopy(const std::shared_ptr<C2Buffer> & buffer) const878 bool ConstGraphicBlockBuffer::canCopy(const std::shared_ptr<C2Buffer> &buffer) const {
879     if (mWrapped || mBufferRef) {
880         ALOGD("ConstGraphicBlockBuffer::canCopy: %swrapped ; buffer ref %s",
881                 mWrapped ? "" : "not ", mBufferRef ? "exists" : "doesn't exist");
882         return false;
883     }
884     if (!buffer) {
885         // Nothing to copy, so we can copy by doing nothing.
886         return true;
887     }
888     if (buffer->data().type() != C2BufferData::GRAPHIC) {
889         ALOGD("ConstGraphicBlockBuffer::canCopy: buffer precondition unsatisfied");
890         return false;
891     }
892     if (buffer->data().graphicBlocks().size() == 0) {
893         return true;
894     } else if (buffer->data().graphicBlocks().size() != 1u) {
895         ALOGD("ConstGraphicBlockBuffer::canCopy: too many blocks");
896         return false;
897     }
898 
899     ATRACE_BEGIN("ConstGraphicBlockBuffer::canCopy block->map()");
900     GraphicView2MediaImageConverter converter(
901             buffer->data().graphicBlocks()[0].map().get(),
902             // FIXME: format() is not const, but we cannot change it, so do a const cast here
903             const_cast<ConstGraphicBlockBuffer *>(this)->format(),
904             true /* copy */);
905     ATRACE_END();
906     if (converter.initCheck() != OK) {
907         ALOGD("ConstGraphicBlockBuffer::canCopy: converter init failed: %d", converter.initCheck());
908         return false;
909     }
910     if (converter.backBufferSize() > capacity()) {
911         ALOGD("ConstGraphicBlockBuffer::canCopy: insufficient capacity: req %u has %zu",
912                 converter.backBufferSize(), capacity());
913         return false;
914     }
915     return true;
916 }
917 
copy(const std::shared_ptr<C2Buffer> & buffer)918 bool ConstGraphicBlockBuffer::copy(const std::shared_ptr<C2Buffer> &buffer) {
919     if (!buffer || buffer->data().graphicBlocks().size() == 0) {
920         setRange(0, 0);
921         return true;
922     }
923 
924     GraphicView2MediaImageConverter converter(
925             buffer->data().graphicBlocks()[0].map().get(), format(), true /* copy */);
926     if (converter.initCheck() != OK) {
927         ALOGD("ConstGraphicBlockBuffer::copy: converter init failed: %d", converter.initCheck());
928         return false;
929     }
930     sp<ABuffer> aBuffer = new ABuffer(base(), capacity());
931     if (!converter.setBackBuffer(aBuffer)) {
932         ALOGD("ConstGraphicBlockBuffer::copy: set back buffer failed");
933         return false;
934     }
935     setRange(0, aBuffer->size());  // align size info
936     converter.copyToMediaImage();
937     setImageData(converter.imageData());
938     mBufferRef = buffer;
939     return true;
940 }
941 
942 // EncryptedLinearBlockBuffer
943 
EncryptedLinearBlockBuffer(const sp<AMessage> & format,const std::shared_ptr<C2LinearBlock> & block,const sp<IMemory> & memory,int32_t heapSeqNum)944 EncryptedLinearBlockBuffer::EncryptedLinearBlockBuffer(
945         const sp<AMessage> &format,
946         const std::shared_ptr<C2LinearBlock> &block,
947         const sp<IMemory> &memory,
948         int32_t heapSeqNum)
949     // TODO: Using unsecurePointer() has some associated security pitfalls
950     //       (see declaration for details).
951     //       Either document why it is safe in this case or address the
952     //       issue (e.g. by copying).
953     : Codec2Buffer(format, new ABuffer(memory->unsecurePointer(), memory->size())),
954       mBlock(block),
955       mMemory(memory),
956       mHeapSeqNum(heapSeqNum) {
957 }
958 
asC2Buffer()959 std::shared_ptr<C2Buffer> EncryptedLinearBlockBuffer::asC2Buffer() {
960     return C2Buffer::CreateLinearBuffer(mBlock->share(offset(), size(), C2Fence()));
961 }
962 
fillSourceBuffer(hardware::drm::V1_0::SharedBuffer * source)963 void EncryptedLinearBlockBuffer::fillSourceBuffer(
964         hardware::drm::V1_0::SharedBuffer *source) {
965     BufferChannelBase::IMemoryToSharedBuffer(mMemory, mHeapSeqNum, source);
966 }
967 
fillSourceBuffer(hardware::cas::native::V1_0::SharedBuffer * source)968 void EncryptedLinearBlockBuffer::fillSourceBuffer(
969         hardware::cas::native::V1_0::SharedBuffer *source) {
970     ssize_t offset;
971     size_t size;
972 
973     mHidlMemory = hardware::fromHeap(mMemory->getMemory(&offset, &size));
974     source->heapBase = *mHidlMemory;
975     source->offset = offset;
976     source->size = size;
977 }
978 
copyDecryptedContent(const sp<IMemory> & decrypted,size_t length)979 bool EncryptedLinearBlockBuffer::copyDecryptedContent(
980         const sp<IMemory> &decrypted, size_t length) {
981     C2WriteView view = mBlock->map().get();
982     if (view.error() != C2_OK) {
983         return false;
984     }
985     if (view.size() < length) {
986         return false;
987     }
988     memcpy(view.data(), decrypted->unsecurePointer(), length);
989     return true;
990 }
991 
copyDecryptedContentFromMemory(size_t length)992 bool EncryptedLinearBlockBuffer::copyDecryptedContentFromMemory(size_t length) {
993     return copyDecryptedContent(mMemory, length);
994 }
995 
handle() const996 native_handle_t *EncryptedLinearBlockBuffer::handle() const {
997     return const_cast<native_handle_t *>(mBlock->handle());
998 }
999 
1000 using ::aidl::android::hardware::graphics::common::Cta861_3;
1001 using ::aidl::android::hardware::graphics::common::Dataspace;
1002 using ::aidl::android::hardware::graphics::common::Smpte2086;
1003 
1004 using ::android::gralloc4::MetadataType_Cta861_3;
1005 using ::android::gralloc4::MetadataType_Dataspace;
1006 using ::android::gralloc4::MetadataType_Smpte2086;
1007 using ::android::gralloc4::MetadataType_Smpte2094_40;
1008 
1009 using ::android::hardware::Return;
1010 using ::android::hardware::hidl_vec;
1011 
1012 using Error4 = ::android::hardware::graphics::mapper::V4_0::Error;
1013 using IMapper4 = ::android::hardware::graphics::mapper::V4_0::IMapper;
1014 
1015 namespace {
1016 
GetMapper4()1017 sp<IMapper4> GetMapper4() {
1018     static sp<IMapper4> sMapper = IMapper4::getService();
1019     return sMapper;
1020 }
1021 
1022 class Gralloc4Buffer {
1023 public:
Gralloc4Buffer(const C2Handle * const handle)1024     Gralloc4Buffer(const C2Handle *const handle) : mBuffer(nullptr) {
1025         sp<IMapper4> mapper = GetMapper4();
1026         if (!mapper) {
1027             return;
1028         }
1029         // Unwrap raw buffer handle from the C2Handle
1030         native_handle_t *nh = UnwrapNativeCodec2GrallocHandle(handle);
1031         if (!nh) {
1032             return;
1033         }
1034         // Import the raw handle so IMapper can use the buffer. The imported
1035         // handle must be freed when the client is done with the buffer.
1036         mapper->importBuffer(
1037                 hardware::hidl_handle(nh),
1038                 [&](const Error4 &error, void *buffer) {
1039                     if (error == Error4::NONE) {
1040                         mBuffer = buffer;
1041                     }
1042                 });
1043 
1044         // TRICKY: UnwrapNativeCodec2GrallocHandle creates a new handle but
1045         //         does not clone the fds. Thus we need to delete the handle
1046         //         without closing it.
1047         native_handle_delete(nh);
1048     }
1049 
~Gralloc4Buffer()1050     ~Gralloc4Buffer() {
1051         sp<IMapper4> mapper = GetMapper4();
1052         if (mapper && mBuffer) {
1053             // Free the imported buffer handle. This does not release the
1054             // underlying buffer itself.
1055             mapper->freeBuffer(mBuffer);
1056         }
1057     }
1058 
get() const1059     void *get() const { return mBuffer; }
operator bool() const1060     operator bool() const { return (mBuffer != nullptr); }
1061 private:
1062     void *mBuffer;
1063 };
1064 
1065 }  // namspace
1066 
GetHdrMetadataFromGralloc4Handle(const C2Handle * const handle,std::shared_ptr<C2StreamHdrStaticMetadataInfo::input> * staticInfo,std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> * dynamicInfo)1067 c2_status_t GetHdrMetadataFromGralloc4Handle(
1068         const C2Handle *const handle,
1069         std::shared_ptr<C2StreamHdrStaticMetadataInfo::input> *staticInfo,
1070         std::shared_ptr<C2StreamHdrDynamicMetadataInfo::input> *dynamicInfo) {
1071     c2_status_t err = C2_OK;
1072     sp<IMapper4> mapper = GetMapper4();
1073     Gralloc4Buffer buffer(handle);
1074     if (!mapper || !buffer) {
1075         // Gralloc4 not supported; nothing to do
1076         return err;
1077     }
1078     Error4 mapperErr = Error4::NONE;
1079     if (staticInfo) {
1080         ALOGV("Grabbing static HDR info from gralloc4 metadata");
1081         staticInfo->reset(new C2StreamHdrStaticMetadataInfo::input(0u));
1082         memset(&(*staticInfo)->mastering, 0, sizeof((*staticInfo)->mastering));
1083         (*staticInfo)->maxCll = 0;
1084         (*staticInfo)->maxFall = 0;
1085         IMapper4::get_cb cb = [&mapperErr, staticInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
1086             mapperErr = err;
1087             if (err != Error4::NONE) {
1088                 return;
1089             }
1090 
1091             std::optional<Smpte2086> smpte2086;
1092             gralloc4::decodeSmpte2086(vec, &smpte2086);
1093             if (smpte2086) {
1094                 (*staticInfo)->mastering.red.x    = smpte2086->primaryRed.x;
1095                 (*staticInfo)->mastering.red.y    = smpte2086->primaryRed.y;
1096                 (*staticInfo)->mastering.green.x  = smpte2086->primaryGreen.x;
1097                 (*staticInfo)->mastering.green.y  = smpte2086->primaryGreen.y;
1098                 (*staticInfo)->mastering.blue.x   = smpte2086->primaryBlue.x;
1099                 (*staticInfo)->mastering.blue.y   = smpte2086->primaryBlue.y;
1100                 (*staticInfo)->mastering.white.x  = smpte2086->whitePoint.x;
1101                 (*staticInfo)->mastering.white.y  = smpte2086->whitePoint.y;
1102 
1103                 (*staticInfo)->mastering.maxLuminance = smpte2086->maxLuminance;
1104                 (*staticInfo)->mastering.minLuminance = smpte2086->minLuminance;
1105             } else {
1106                 mapperErr = Error4::BAD_VALUE;
1107             }
1108         };
1109         Return<void> ret = mapper->get(buffer.get(), MetadataType_Smpte2086, cb);
1110         if (!ret.isOk()) {
1111             err = C2_REFUSED;
1112         } else if (mapperErr != Error4::NONE) {
1113             err = C2_CORRUPTED;
1114         }
1115         cb = [&mapperErr, staticInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
1116             mapperErr = err;
1117             if (err != Error4::NONE) {
1118                 return;
1119             }
1120 
1121             std::optional<Cta861_3> cta861_3;
1122             gralloc4::decodeCta861_3(vec, &cta861_3);
1123             if (cta861_3) {
1124                 (*staticInfo)->maxCll   = cta861_3->maxContentLightLevel;
1125                 (*staticInfo)->maxFall  = cta861_3->maxFrameAverageLightLevel;
1126             } else {
1127                 mapperErr = Error4::BAD_VALUE;
1128             }
1129         };
1130         ret = mapper->get(buffer.get(), MetadataType_Cta861_3, cb);
1131         if (!ret.isOk()) {
1132             err = C2_REFUSED;
1133         } else if (mapperErr != Error4::NONE) {
1134             err = C2_CORRUPTED;
1135         }
1136     }
1137     if (dynamicInfo) {
1138         ALOGV("Grabbing dynamic HDR info from gralloc4 metadata");
1139         dynamicInfo->reset();
1140         IMapper4::get_cb cb = [&mapperErr, dynamicInfo](Error4 err, const hidl_vec<uint8_t> &vec) {
1141             mapperErr = err;
1142             if (err != Error4::NONE) {
1143                 return;
1144             }
1145             if (!dynamicInfo) {
1146                 return;
1147             }
1148             *dynamicInfo = C2StreamHdrDynamicMetadataInfo::input::AllocShared(
1149                     vec.size(), 0u, C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40);
1150             memcpy((*dynamicInfo)->m.data, vec.data(), vec.size());
1151         };
1152         Return<void> ret = mapper->get(buffer.get(), MetadataType_Smpte2094_40, cb);
1153         if (!ret.isOk() || mapperErr != Error4::NONE) {
1154             dynamicInfo->reset();
1155         }
1156     }
1157 
1158     return err;
1159 }
1160 
SetMetadataToGralloc4Handle(android_dataspace_t dataSpace,const std::shared_ptr<const C2StreamHdrStaticMetadataInfo::output> & staticInfo,const std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> & dynamicInfo,const C2Handle * const handle)1161 c2_status_t SetMetadataToGralloc4Handle(
1162         android_dataspace_t dataSpace,
1163         const std::shared_ptr<const C2StreamHdrStaticMetadataInfo::output> &staticInfo,
1164         const std::shared_ptr<const C2StreamHdrDynamicMetadataInfo::output> &dynamicInfo,
1165         const C2Handle *const handle) {
1166     c2_status_t err = C2_OK;
1167     sp<IMapper4> mapper = GetMapper4();
1168     Gralloc4Buffer buffer(handle);
1169     if (!mapper || !buffer) {
1170         // Gralloc4 not supported; nothing to do
1171         return err;
1172     }
1173     {
1174         hidl_vec<uint8_t> metadata;
1175         if (gralloc4::encodeDataspace(static_cast<Dataspace>(dataSpace), &metadata) == OK) {
1176             Return<Error4> ret = mapper->set(buffer.get(), MetadataType_Dataspace, metadata);
1177             if (!ret.isOk()) {
1178                 err = C2_REFUSED;
1179             } else if (ret != Error4::NONE) {
1180                 err = C2_CORRUPTED;
1181             }
1182         }
1183     }
1184     if (staticInfo && *staticInfo) {
1185         ALOGV("Setting static HDR info as gralloc4 metadata");
1186         std::optional<Smpte2086> smpte2086 = Smpte2086{
1187             {staticInfo->mastering.red.x, staticInfo->mastering.red.y},
1188             {staticInfo->mastering.green.x, staticInfo->mastering.green.y},
1189             {staticInfo->mastering.blue.x, staticInfo->mastering.blue.y},
1190             {staticInfo->mastering.white.x, staticInfo->mastering.white.y},
1191             staticInfo->mastering.maxLuminance,
1192             staticInfo->mastering.minLuminance,
1193         };
1194         hidl_vec<uint8_t> vec;
1195         if (0.0 <= smpte2086->primaryRed.x && smpte2086->primaryRed.x <= 1.0
1196                 && 0.0 <= smpte2086->primaryRed.y && smpte2086->primaryRed.y <= 1.0
1197                 && 0.0 <= smpte2086->primaryGreen.x && smpte2086->primaryGreen.x <= 1.0
1198                 && 0.0 <= smpte2086->primaryGreen.y && smpte2086->primaryGreen.y <= 1.0
1199                 && 0.0 <= smpte2086->primaryBlue.x && smpte2086->primaryBlue.x <= 1.0
1200                 && 0.0 <= smpte2086->primaryBlue.y && smpte2086->primaryBlue.y <= 1.0
1201                 && 0.0 <= smpte2086->whitePoint.x && smpte2086->whitePoint.x <= 1.0
1202                 && 0.0 <= smpte2086->whitePoint.y && smpte2086->whitePoint.y <= 1.0
1203                 && 0.0 <= smpte2086->maxLuminance && 0.0 <= smpte2086->minLuminance
1204                 && gralloc4::encodeSmpte2086(smpte2086, &vec) == OK) {
1205             Return<Error4> ret = mapper->set(buffer.get(), MetadataType_Smpte2086, vec);
1206             if (!ret.isOk()) {
1207                 err = C2_REFUSED;
1208             } else if (ret != Error4::NONE) {
1209                 err = C2_CORRUPTED;
1210             }
1211         }
1212         std::optional<Cta861_3> cta861_3 = Cta861_3{
1213             staticInfo->maxCll,
1214             staticInfo->maxFall,
1215         };
1216         if (0.0 <= cta861_3->maxContentLightLevel && 0.0 <= cta861_3->maxFrameAverageLightLevel
1217                 && gralloc4::encodeCta861_3(cta861_3, &vec) == OK) {
1218             Return<Error4> ret = mapper->set(buffer.get(), MetadataType_Cta861_3, vec);
1219             if (!ret.isOk()) {
1220                 err = C2_REFUSED;
1221             } else if (ret != Error4::NONE) {
1222                 err = C2_CORRUPTED;
1223             }
1224         }
1225     }
1226     if (dynamicInfo && *dynamicInfo && dynamicInfo->flexCount() > 0) {
1227         ALOGV("Setting dynamic HDR info as gralloc4 metadata");
1228         std::optional<IMapper4::MetadataType> metadataType;
1229         switch (dynamicInfo->m.type_) {
1230         case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_10:
1231             // TODO
1232             break;
1233         case C2Config::HDR_DYNAMIC_METADATA_TYPE_SMPTE_2094_40:
1234             metadataType = MetadataType_Smpte2094_40;
1235             break;
1236         }
1237 
1238         if (metadataType) {
1239             std::vector<uint8_t> smpte2094_40;
1240             smpte2094_40.resize(dynamicInfo->flexCount());
1241             memcpy(smpte2094_40.data(), dynamicInfo->m.data, dynamicInfo->flexCount());
1242 
1243             hidl_vec<uint8_t> vec;
1244             if (gralloc4::encodeSmpte2094_40({ smpte2094_40 }, &vec) == OK) {
1245                 Return<Error4> ret = mapper->set(buffer.get(), *metadataType, vec);
1246                 if (!ret.isOk()) {
1247                     err = C2_REFUSED;
1248                 } else if (ret != Error4::NONE) {
1249                     err = C2_CORRUPTED;
1250                 }
1251             }
1252         } else {
1253             err = C2_BAD_VALUE;
1254         }
1255     }
1256 
1257     return err;
1258 }
1259 
1260 }  // namespace android
1261