• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "bufferCopy.h"
18 
19 #include <android-base/logging.h>
20 
21 #include <libyuv.h>
22 
23 namespace aidl::android::hardware::automotive::evs::implementation {
24 
25 // Round up to the nearest multiple of the given alignment value
26 template <unsigned alignment>
align(int value)27 int align(int value) {
28     static_assert((alignment && !(alignment & (alignment - 1))), "alignment must be a power of 2");
29 
30     unsigned mask = alignment - 1;
31     return (value + mask) & ~mask;
32 }
33 
fillNV21FromNV21(const BufferDesc & tgtBuff,uint8_t * tgt,void * imgData[VIDEO_MAX_PLANES],unsigned)34 void fillNV21FromNV21(const BufferDesc& tgtBuff, uint8_t* tgt, void* imgData[VIDEO_MAX_PLANES],
35                       unsigned) {
36     // The NV21 format provides a Y array of 8bit values, followed by a 1/2 x 1/2 interleave U/V
37     // array. It assumes an even width and height for the overall image, and a horizontal stride
38     // that is an even multiple of 16 bytes for both the Y and UV arrays.
39 
40     // Target  and source image layout properties (They match since the formats match!)
41     const AHardwareBuffer_Desc* pDesc =
42             reinterpret_cast<const AHardwareBuffer_Desc*>(&tgtBuff.buffer.description);
43     const unsigned strideLum = align<16>(pDesc->width);
44     const unsigned sizeY = strideLum * pDesc->height;
45     const unsigned strideColor = strideLum;  // 1/2 the samples, but two interleaved channels
46     const unsigned sizeColor = strideColor * pDesc->height / 2;
47     const unsigned totalBytes = sizeY + sizeColor;
48 
49     // Simply copy the data byte for byte
50     memcpy(tgt, imgData[0], totalBytes);
51 }
52 
fillNV21FromYUYV(const BufferDesc & tgtBuff,uint8_t * tgt,void * imgData[VIDEO_MAX_PLANES],unsigned imgStride)53 void fillNV21FromYUYV(const BufferDesc& tgtBuff, uint8_t* tgt, void* imgData[VIDEO_MAX_PLANES],
54                       unsigned imgStride) {
55     // The YUYV format provides an interleaved array of pixel values with U and V subsampled in
56     // the horizontal direction only.  Also known as interleaved 422 format.  A 4 byte
57     // "macro pixel" provides the Y value for two adjacent pixels and the U and V values shared
58     // between those two pixels.  The width of the image must be an even number.
59     // We need to down sample the UV values and collect them together after all the packed Y values
60     // to construct the NV21 format.
61     // NV21 requires even width and height, so we assume that is the case for the incomming image
62     // as well.
63     uint32_t* srcDataYUYV = (uint32_t*)imgData[0];
64     struct YUYVpixel {
65         uint8_t Y1;
66         uint8_t U;
67         uint8_t Y2;
68         uint8_t V;
69     };
70 
71     // Target image layout properties
72     const AHardwareBuffer_Desc* pDesc =
73             reinterpret_cast<const AHardwareBuffer_Desc*>(&tgtBuff.buffer.description);
74     const unsigned strideLum = align<16>(pDesc->width);
75     const unsigned sizeY = strideLum * pDesc->height;
76     const unsigned strideColor = strideLum;  // 1/2 the samples, but two interleaved channels
77 
78     // Source image layout properties
79     const unsigned srcRowPixels = imgStride / 4;  // imgStride is in units of bytes
80     const unsigned srcRowDoubleStep = srcRowPixels * 2;
81     uint32_t* topSrcRow = srcDataYUYV;
82     uint32_t* botSrcRow = srcDataYUYV + srcRowPixels;
83 
84     // We're going to work on one 2x2 cell in the output image at at time
85     for (unsigned cellRow = 0; cellRow < pDesc->height / 2; cellRow++) {
86         // Set up the output pointers
87         uint8_t* yTopRow = tgt + (cellRow * 2) * strideLum;
88         uint8_t* yBotRow = yTopRow + strideLum;
89         uint8_t* uvRow = (tgt + sizeY) + cellRow * strideColor;
90 
91         for (unsigned cellCol = 0; cellCol < pDesc->width / 2; cellCol++) {
92             // Collect the values from the YUYV interleaved data
93             const YUYVpixel* pTopMacroPixel = (YUYVpixel*)&topSrcRow[cellCol];
94             const YUYVpixel* pBotMacroPixel = (YUYVpixel*)&botSrcRow[cellCol];
95 
96             // Down sample the U/V values by linear average between rows
97             const uint8_t uValue = (pTopMacroPixel->U + pBotMacroPixel->U) >> 1;
98             const uint8_t vValue = (pTopMacroPixel->V + pBotMacroPixel->V) >> 1;
99 
100             // Store the values into the NV21 layout
101             yTopRow[cellCol * 2] = pTopMacroPixel->Y1;
102             yTopRow[cellCol * 2 + 1] = pTopMacroPixel->Y2;
103             yBotRow[cellCol * 2] = pBotMacroPixel->Y1;
104             yBotRow[cellCol * 2 + 1] = pBotMacroPixel->Y2;
105             uvRow[cellCol * 2] = uValue;
106             uvRow[cellCol * 2 + 1] = vValue;
107         }
108 
109         // Skipping two rows to get to the next set of two source rows
110         topSrcRow += srcRowDoubleStep;
111         botSrcRow += srcRowDoubleStep;
112     }
113 }
114 
fillRGBAFromYUYV(const BufferDesc & tgtBuff,uint8_t * tgt,void * imgData[VIDEO_MAX_PLANES],unsigned imgStride)115 void fillRGBAFromYUYV(const BufferDesc& tgtBuff, uint8_t* tgt, void* imgData[VIDEO_MAX_PLANES],
116                       unsigned imgStride) {
117     const AHardwareBuffer_Desc* pDesc =
118             reinterpret_cast<const AHardwareBuffer_Desc*>(&tgtBuff.buffer.description);
119     // Converts YUY2ToARGB (little endian).  Please note that libyuv uses the
120     // little endian while we're using the big endian in RGB format names.
121     const auto dstStrideInBytes = pDesc->stride * 4;  // 4-byte per pixel
122     auto result = libyuv::YUY2ToARGB((const uint8_t*)imgData[0],
123                                      imgStride,  // input stride in bytes
124                                      tgt,
125                                      dstStrideInBytes,  // output stride in bytes
126                                      pDesc->width, pDesc->height);
127     if (result) {
128         LOG(ERROR) << "Failed to convert YUYV to BGRA.";
129         return;
130     }
131 
132     // Swaps R and B pixels to convert BGRA to RGBA in place.
133     // TODO(b/190783702): Consider allocating an extra space to store ARGB data
134     //                    temporarily if below operation is too slow.
135     result = libyuv::ABGRToARGB(tgt, dstStrideInBytes, tgt, dstStrideInBytes, pDesc->width,
136                                 pDesc->height);
137     if (result) {
138         LOG(ERROR) << "Failed to convert BGRA to RGBA.";
139     }
140 }
141 
fillRGBAFromBGRA(const BufferDesc & tgtBuff,uint8_t * tgt,void * imgData[VIDEO_MAX_PLANES],unsigned imgStride)142 void fillRGBAFromBGRA(const BufferDesc& tgtBuff, uint8_t* tgt, void* imgData[VIDEO_MAX_PLANES],
143                       unsigned imgStride) {
144     const AHardwareBuffer_Desc* pDesc =
145             reinterpret_cast<const AHardwareBuffer_Desc*>(&tgtBuff.buffer.description);
146     auto result = libyuv::ABGRToARGB((const uint8_t*)imgData[0], imgStride, tgt, imgStride,
147                                      pDesc->width, pDesc->height);
148     if (result) {
149         LOG(ERROR) << "Failed to convert BGRA to RGBA.";
150     }
151 }
152 
fillRGBAFromARGB(const BufferDesc & tgtBuff,uint8_t * tgt,void * imgData[VIDEO_MAX_PLANES],unsigned imgStride)153 void fillRGBAFromARGB(const BufferDesc& tgtBuff, uint8_t* tgt, void* imgData[VIDEO_MAX_PLANES],
154                       unsigned imgStride) {
155     const AHardwareBuffer_Desc* pDesc =
156             reinterpret_cast<const AHardwareBuffer_Desc*>(&tgtBuff.buffer.description);
157 
158     const auto dstStrideInBytes = pDesc->stride * 4;  // 4-byte per pixel
159     auto result = libyuv::ARGBToABGR((const uint8_t*)imgData[0], imgStride, tgt, imgStride,
160                                      pDesc->width, pDesc->height);
161     if (result) {
162         LOG(ERROR) << "Failed to convert BGRA to RGBA.";
163     }
164 }
165 
fillRGBAFromRGB3(const BufferDesc & tgtBuff,uint8_t * tgt,void * imgData[VIDEO_MAX_PLANES],unsigned imgStride)166 void fillRGBAFromRGB3(const BufferDesc& tgtBuff, uint8_t* tgt, void* imgData[VIDEO_MAX_PLANES],
167                       unsigned imgStride) {
168     const AHardwareBuffer_Desc* pDesc =
169             reinterpret_cast<const AHardwareBuffer_Desc*>(&tgtBuff.buffer.description);
170     const auto dstStrideInBytes = pDesc->stride * 4;  // 4-byte per pixel
171     auto result = libyuv::RGB24ToARGB((const uint8_t*)imgData[0], imgStride, tgt, dstStrideInBytes,
172                                       pDesc->width, pDesc->height);
173     if (result) {
174         LOG(ERROR) << "Failed to convert RGB3 to RGBA.";
175     }
176 }
177 
fillYUYVFromYUYV(const BufferDesc & tgtBuff,uint8_t * tgt,void * imgData[VIDEO_MAX_PLANES],unsigned imgStride)178 void fillYUYVFromYUYV(const BufferDesc& tgtBuff, uint8_t* tgt, void* imgData[VIDEO_MAX_PLANES],
179                       unsigned imgStride) {
180     const AHardwareBuffer_Desc* pDesc =
181             reinterpret_cast<const AHardwareBuffer_Desc*>(&tgtBuff.buffer.description);
182     unsigned width = pDesc->width;
183     unsigned height = pDesc->height;
184     uint8_t* src = (uint8_t*)imgData[0];
185     uint8_t* dst = (uint8_t*)tgt;
186     unsigned srcStrideBytes = imgStride;
187     unsigned dstStrideBytes = pDesc->stride * 2;
188 
189     for (unsigned r = 0; r < height; r++) {
190         // Copy a pixel row at a time (2 bytes per pixel, averaged over a YUYV macro pixel)
191         memcpy(dst + r * dstStrideBytes, src + r * srcStrideBytes, width * 2);
192     }
193 }
194 
fillYUYVFromUYVY(const BufferDesc & tgtBuff,uint8_t * tgt,void * imgData[VIDEO_MAX_PLANES],unsigned imgStride)195 void fillYUYVFromUYVY(const BufferDesc& tgtBuff, uint8_t* tgt, void* imgData[VIDEO_MAX_PLANES],
196                       unsigned imgStride) {
197     const AHardwareBuffer_Desc* pDesc =
198             reinterpret_cast<const AHardwareBuffer_Desc*>(&tgtBuff.buffer.description);
199     unsigned width = pDesc->width;
200     unsigned height = pDesc->height;
201     uint32_t* src = (uint32_t*)imgData[0];
202     uint32_t* dst = (uint32_t*)tgt;
203     unsigned srcStridePixels = imgStride / 2;
204     unsigned dstStridePixels = pDesc->stride;
205 
206     const int srcRowPadding32 =
207             srcStridePixels / 2 - width / 2;  // 2 bytes per pixel, 4 bytes per word
208     const int dstRowPadding32 =
209             dstStridePixels / 2 - width / 2;  // 2 bytes per pixel, 4 bytes per word
210 
211     for (unsigned r = 0; r < height; r++) {
212         for (unsigned c = 0; c < width / 2; c++) {
213             // Note:  we're walking two pixels at a time here (even/odd)
214             uint32_t srcPixel = *src++;
215 
216             uint8_t Y1 = (srcPixel) & 0xFF;
217             uint8_t U = (srcPixel >> 8) & 0xFF;
218             uint8_t Y2 = (srcPixel >> 16) & 0xFF;
219             uint8_t V = (srcPixel >> 24) & 0xFF;
220 
221             // Now we write back the pair of pixels with the components swizzled
222             *dst++ = (U) | (Y1 << 8) | (V << 16) | (Y2 << 24);
223         }
224 
225         // Skip over any extra data or end of row alignment padding
226         src += srcRowPadding32;
227         dst += dstRowPadding32;
228     }
229 }
230 
231 }  // namespace aidl::android::hardware::automotive::evs::implementation
232