• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2022 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <cmath>
18 #include <vector>
19 #include <ultrahdr/gainmapmath.h>
20 
21 namespace android::ultrahdr {
22 
__anon35fc62900102null23 static const std::vector<float> kPqOETF = [] {
24     std::vector<float> result;
25     for (int idx = 0; idx < kPqOETFNumEntries; idx++) {
26       float value = static_cast<float>(idx) / static_cast<float>(kPqOETFNumEntries - 1);
27       result.push_back(pqOetf(value));
28     }
29     return result;
30 }();
31 
__anon35fc62900202null32 static const std::vector<float> kPqInvOETF = [] {
33     std::vector<float> result;
34     for (int idx = 0; idx < kPqInvOETFNumEntries; idx++) {
35       float value = static_cast<float>(idx) / static_cast<float>(kPqInvOETFNumEntries - 1);
36       result.push_back(pqInvOetf(value));
37     }
38     return result;
39 }();
40 
__anon35fc62900302null41 static const std::vector<float> kHlgOETF = [] {
42     std::vector<float> result;
43     for (int idx = 0; idx < kHlgOETFNumEntries; idx++) {
44       float value = static_cast<float>(idx) / static_cast<float>(kHlgOETFNumEntries - 1);
45       result.push_back(hlgOetf(value));
46     }
47     return result;
48 }();
49 
__anon35fc62900402null50 static const std::vector<float> kHlgInvOETF = [] {
51     std::vector<float> result;
52     for (int idx = 0; idx < kHlgInvOETFNumEntries; idx++) {
53       float value = static_cast<float>(idx) / static_cast<float>(kHlgInvOETFNumEntries - 1);
54       result.push_back(hlgInvOetf(value));
55     }
56     return result;
57 }();
58 
__anon35fc62900502null59 static const std::vector<float> kSrgbInvOETF = [] {
60     std::vector<float> result;
61     for (int idx = 0; idx < kSrgbInvOETFNumEntries; idx++) {
62       float value = static_cast<float>(idx) / static_cast<float>(kSrgbInvOETFNumEntries - 1);
63       result.push_back(srgbInvOetf(value));
64     }
65     return result;
66 }();
67 
68 // Use Shepard's method for inverse distance weighting. For more information:
69 // en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method
70 
euclideanDistance(float x1,float x2,float y1,float y2)71 float ShepardsIDW::euclideanDistance(float x1, float x2, float y1, float y2) {
72   return sqrt(((y2 - y1) * (y2 - y1)) + (x2 - x1) * (x2 - x1));
73 }
74 
fillShepardsIDW(float * weights,int incR,int incB)75 void ShepardsIDW::fillShepardsIDW(float *weights, int incR, int incB) {
76   for (int y = 0; y < mMapScaleFactor; y++) {
77     for (int x = 0; x < mMapScaleFactor; x++) {
78       float pos_x = ((float)x) / mMapScaleFactor;
79       float pos_y = ((float)y) / mMapScaleFactor;
80       int curr_x = floor(pos_x);
81       int curr_y = floor(pos_y);
82       int next_x = curr_x + incR;
83       int next_y = curr_y + incB;
84       float e1_distance = euclideanDistance(pos_x, curr_x, pos_y, curr_y);
85       int index = y * mMapScaleFactor * 4 + x * 4;
86       if (e1_distance == 0) {
87         weights[index++] = 1.f;
88         weights[index++] = 0.f;
89         weights[index++] = 0.f;
90         weights[index++] = 0.f;
91       } else {
92         float e1_weight = 1.f / e1_distance;
93 
94         float e2_distance = euclideanDistance(pos_x, curr_x, pos_y, next_y);
95         float e2_weight = 1.f / e2_distance;
96 
97         float e3_distance = euclideanDistance(pos_x, next_x, pos_y, curr_y);
98         float e3_weight = 1.f / e3_distance;
99 
100         float e4_distance = euclideanDistance(pos_x, next_x, pos_y, next_y);
101         float e4_weight = 1.f / e4_distance;
102 
103         float total_weight = e1_weight + e2_weight + e3_weight + e4_weight;
104 
105         weights[index++] = e1_weight / total_weight;
106         weights[index++] = e2_weight / total_weight;
107         weights[index++] = e3_weight / total_weight;
108         weights[index++] = e4_weight / total_weight;
109       }
110     }
111   }
112 }
113 
114 ////////////////////////////////////////////////////////////////////////////////
115 // sRGB transformations
116 
117 static const float kMaxPixelFloat = 1.0f;
clampPixelFloat(float value)118 static float clampPixelFloat(float value) {
119     return (value < 0.0f) ? 0.0f : (value > kMaxPixelFloat) ? kMaxPixelFloat : value;
120 }
121 
122 // See IEC 61966-2-1/Amd 1:2003, Equation F.7.
123 static const float kSrgbR = 0.2126f, kSrgbG = 0.7152f, kSrgbB = 0.0722f;
124 
srgbLuminance(Color e)125 float srgbLuminance(Color e) {
126   return kSrgbR * e.r + kSrgbG * e.g + kSrgbB * e.b;
127 }
128 
129 // See ITU-R BT.709-6, Section 3.
130 // Uses the same coefficients for deriving luma signal as
131 // IEC 61966-2-1/Amd 1:2003 states for luminance, so we reuse the luminance
132 // function above.
133 static const float kSrgbCb = 1.8556f, kSrgbCr = 1.5748f;
134 
srgbRgbToYuv(Color e_gamma)135 Color srgbRgbToYuv(Color e_gamma) {
136   float y_gamma = srgbLuminance(e_gamma);
137   return {{{ y_gamma,
138              (e_gamma.b - y_gamma) / kSrgbCb,
139              (e_gamma.r - y_gamma) / kSrgbCr }}};
140 }
141 
142 // See ITU-R BT.709-6, Section 3.
143 // Same derivation to BT.2100's YUV->RGB, below. Similar to srgbRgbToYuv, we
144 // can reuse the luminance coefficients since they are the same.
145 static const float kSrgbGCb = kSrgbB * kSrgbCb / kSrgbG;
146 static const float kSrgbGCr = kSrgbR * kSrgbCr / kSrgbG;
147 
srgbYuvToRgb(Color e_gamma)148 Color srgbYuvToRgb(Color e_gamma) {
149   return {{{ clampPixelFloat(e_gamma.y + kSrgbCr * e_gamma.v),
150              clampPixelFloat(e_gamma.y - kSrgbGCb * e_gamma.u - kSrgbGCr * e_gamma.v),
151              clampPixelFloat(e_gamma.y + kSrgbCb * e_gamma.u) }}};
152 }
153 
154 // See IEC 61966-2-1/Amd 1:2003, Equations F.5 and F.6.
srgbInvOetf(float e_gamma)155 float srgbInvOetf(float e_gamma) {
156   if (e_gamma <= 0.04045f) {
157     return e_gamma / 12.92f;
158   } else {
159     return pow((e_gamma + 0.055f) / 1.055f, 2.4);
160   }
161 }
162 
srgbInvOetf(Color e_gamma)163 Color srgbInvOetf(Color e_gamma) {
164   return {{{ srgbInvOetf(e_gamma.r),
165              srgbInvOetf(e_gamma.g),
166              srgbInvOetf(e_gamma.b) }}};
167 }
168 
169 // See IEC 61966-2-1, Equations F.5 and F.6.
srgbInvOetfLUT(float e_gamma)170 float srgbInvOetfLUT(float e_gamma) {
171   uint32_t value = static_cast<uint32_t>(e_gamma * kSrgbInvOETFNumEntries);
172   //TODO() : Remove once conversion modules have appropriate clamping in place
173   value = CLIP3(value, 0, kSrgbInvOETFNumEntries - 1);
174   return kSrgbInvOETF[value];
175 }
176 
srgbInvOetfLUT(Color e_gamma)177 Color srgbInvOetfLUT(Color e_gamma) {
178   return {{{ srgbInvOetfLUT(e_gamma.r),
179              srgbInvOetfLUT(e_gamma.g),
180              srgbInvOetfLUT(e_gamma.b) }}};
181 }
182 
183 ////////////////////////////////////////////////////////////////////////////////
184 // Display-P3 transformations
185 
186 // See SMPTE EG 432-1, Equation 7-8.
187 static const float kP3R = 0.20949f, kP3G = 0.72160f, kP3B = 0.06891f;
188 
p3Luminance(Color e)189 float p3Luminance(Color e) {
190   return kP3R * e.r + kP3G * e.g + kP3B * e.b;
191 }
192 
193 // See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2.
194 // Unfortunately, calculation of luma signal differs from calculation of
195 // luminance for Display-P3, so we can't reuse p3Luminance here.
196 static const float kP3YR = 0.299f, kP3YG = 0.587f, kP3YB = 0.114f;
197 static const float kP3Cb = 1.772f, kP3Cr = 1.402f;
198 
p3RgbToYuv(Color e_gamma)199 Color p3RgbToYuv(Color e_gamma) {
200   float y_gamma = kP3YR * e_gamma.r + kP3YG * e_gamma.g + kP3YB * e_gamma.b;
201   return {{{ y_gamma,
202              (e_gamma.b - y_gamma) / kP3Cb,
203              (e_gamma.r - y_gamma) / kP3Cr }}};
204 }
205 
206 // See ITU-R BT.601-7, Sections 2.5.1 and 2.5.2.
207 // Same derivation to BT.2100's YUV->RGB, below. Similar to p3RgbToYuv, we must
208 // use luma signal coefficients rather than the luminance coefficients.
209 static const float kP3GCb = kP3YB * kP3Cb / kP3YG;
210 static const float kP3GCr = kP3YR * kP3Cr / kP3YG;
211 
p3YuvToRgb(Color e_gamma)212 Color p3YuvToRgb(Color e_gamma) {
213   return {{{ clampPixelFloat(e_gamma.y + kP3Cr * e_gamma.v),
214              clampPixelFloat(e_gamma.y - kP3GCb * e_gamma.u - kP3GCr * e_gamma.v),
215              clampPixelFloat(e_gamma.y + kP3Cb * e_gamma.u) }}};
216 }
217 
218 
219 ////////////////////////////////////////////////////////////////////////////////
220 // BT.2100 transformations - according to ITU-R BT.2100-2
221 
222 // See ITU-R BT.2100-2, Table 5, HLG Reference OOTF
223 static const float kBt2100R = 0.2627f, kBt2100G = 0.6780f, kBt2100B = 0.0593f;
224 
bt2100Luminance(Color e)225 float bt2100Luminance(Color e) {
226   return kBt2100R * e.r + kBt2100G * e.g + kBt2100B * e.b;
227 }
228 
229 // See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
230 // BT.2100 uses the same coefficients for calculating luma signal and luminance,
231 // so we reuse the luminance function here.
232 static const float kBt2100Cb = 1.8814f, kBt2100Cr = 1.4746f;
233 
bt2100RgbToYuv(Color e_gamma)234 Color bt2100RgbToYuv(Color e_gamma) {
235   float y_gamma = bt2100Luminance(e_gamma);
236   return {{{ y_gamma,
237              (e_gamma.b - y_gamma) / kBt2100Cb,
238              (e_gamma.r - y_gamma) / kBt2100Cr }}};
239 }
240 
241 // See ITU-R BT.2100-2, Table 6, Derivation of colour difference signals.
242 //
243 // Similar to bt2100RgbToYuv above, we can reuse the luminance coefficients.
244 //
245 // Derived by inversing bt2100RgbToYuv. The derivation for R and B are  pretty
246 // straight forward; we just invert the formulas for U and V above. But deriving
247 // the formula for G is a bit more complicated:
248 //
249 // Start with equation for luminance:
250 //   Y = kBt2100R * R + kBt2100G * G + kBt2100B * B
251 // Solve for G:
252 //   G = (Y - kBt2100R * R - kBt2100B * B) / kBt2100B
253 // Substitute equations for R and B in terms YUV:
254 //   G = (Y - kBt2100R * (Y + kBt2100Cr * V) - kBt2100B * (Y + kBt2100Cb * U)) / kBt2100B
255 // Simplify:
256 //   G = Y * ((1 - kBt2100R - kBt2100B) / kBt2100G)
257 //     + U * (kBt2100B * kBt2100Cb / kBt2100G)
258 //     + V * (kBt2100R * kBt2100Cr / kBt2100G)
259 //
260 // We then get the following coeficients for calculating G from YUV:
261 //
262 // Coef for Y = (1 - kBt2100R - kBt2100B) / kBt2100G = 1
263 // Coef for U = kBt2100B * kBt2100Cb / kBt2100G = kBt2100GCb = ~0.1645
264 // Coef for V = kBt2100R * kBt2100Cr / kBt2100G = kBt2100GCr = ~0.5713
265 
266 static const float kBt2100GCb = kBt2100B * kBt2100Cb / kBt2100G;
267 static const float kBt2100GCr = kBt2100R * kBt2100Cr / kBt2100G;
268 
bt2100YuvToRgb(Color e_gamma)269 Color bt2100YuvToRgb(Color e_gamma) {
270   return {{{ clampPixelFloat(e_gamma.y + kBt2100Cr * e_gamma.v),
271              clampPixelFloat(e_gamma.y - kBt2100GCb * e_gamma.u - kBt2100GCr * e_gamma.v),
272              clampPixelFloat(e_gamma.y + kBt2100Cb * e_gamma.u) }}};
273 }
274 
275 // See ITU-R BT.2100-2, Table 5, HLG Reference OETF.
276 static const float kHlgA = 0.17883277f, kHlgB = 0.28466892f, kHlgC = 0.55991073;
277 
hlgOetf(float e)278 float hlgOetf(float e) {
279   if (e <= 1.0f/12.0f) {
280     return sqrt(3.0f * e);
281   } else {
282     return kHlgA * log(12.0f * e - kHlgB) + kHlgC;
283   }
284 }
285 
hlgOetf(Color e)286 Color hlgOetf(Color e) {
287   return {{{ hlgOetf(e.r), hlgOetf(e.g), hlgOetf(e.b) }}};
288 }
289 
hlgOetfLUT(float e)290 float hlgOetfLUT(float e) {
291   uint32_t value = static_cast<uint32_t>(e * kHlgOETFNumEntries);
292   //TODO() : Remove once conversion modules have appropriate clamping in place
293   value = CLIP3(value, 0, kHlgOETFNumEntries - 1);
294 
295   return kHlgOETF[value];
296 }
297 
hlgOetfLUT(Color e)298 Color hlgOetfLUT(Color e) {
299   return {{{ hlgOetfLUT(e.r), hlgOetfLUT(e.g), hlgOetfLUT(e.b) }}};
300 }
301 
302 // See ITU-R BT.2100-2, Table 5, HLG Reference EOTF.
hlgInvOetf(float e_gamma)303 float hlgInvOetf(float e_gamma) {
304   if (e_gamma <= 0.5f) {
305     return pow(e_gamma, 2.0f) / 3.0f;
306   } else {
307     return (exp((e_gamma - kHlgC) / kHlgA) + kHlgB) / 12.0f;
308   }
309 }
310 
hlgInvOetf(Color e_gamma)311 Color hlgInvOetf(Color e_gamma) {
312   return {{{ hlgInvOetf(e_gamma.r),
313              hlgInvOetf(e_gamma.g),
314              hlgInvOetf(e_gamma.b) }}};
315 }
316 
hlgInvOetfLUT(float e_gamma)317 float hlgInvOetfLUT(float e_gamma) {
318   uint32_t value = static_cast<uint32_t>(e_gamma * kHlgInvOETFNumEntries);
319   //TODO() : Remove once conversion modules have appropriate clamping in place
320   value = CLIP3(value, 0, kHlgInvOETFNumEntries - 1);
321 
322   return kHlgInvOETF[value];
323 }
324 
hlgInvOetfLUT(Color e_gamma)325 Color hlgInvOetfLUT(Color e_gamma) {
326   return {{{ hlgInvOetfLUT(e_gamma.r),
327              hlgInvOetfLUT(e_gamma.g),
328              hlgInvOetfLUT(e_gamma.b) }}};
329 }
330 
331 // See ITU-R BT.2100-2, Table 4, Reference PQ OETF.
332 static const float kPqM1 = 2610.0f / 16384.0f, kPqM2 = 2523.0f / 4096.0f * 128.0f;
333 static const float kPqC1 = 3424.0f / 4096.0f, kPqC2 = 2413.0f / 4096.0f * 32.0f,
334                    kPqC3 = 2392.0f / 4096.0f * 32.0f;
335 
pqOetf(float e)336 float pqOetf(float e) {
337   if (e <= 0.0f) return 0.0f;
338   return pow((kPqC1 + kPqC2 * pow(e, kPqM1)) / (1 + kPqC3 * pow(e, kPqM1)),
339              kPqM2);
340 }
341 
pqOetf(Color e)342 Color pqOetf(Color e) {
343   return {{{ pqOetf(e.r), pqOetf(e.g), pqOetf(e.b) }}};
344 }
345 
pqOetfLUT(float e)346 float pqOetfLUT(float e) {
347   uint32_t value = static_cast<uint32_t>(e * kPqOETFNumEntries);
348   //TODO() : Remove once conversion modules have appropriate clamping in place
349   value = CLIP3(value, 0, kPqOETFNumEntries - 1);
350 
351   return kPqOETF[value];
352 }
353 
pqOetfLUT(Color e)354 Color pqOetfLUT(Color e) {
355   return {{{ pqOetfLUT(e.r), pqOetfLUT(e.g), pqOetfLUT(e.b) }}};
356 }
357 
358 // Derived from the inverse of the Reference PQ OETF.
359 static const float kPqInvA = 128.0f, kPqInvB = 107.0f, kPqInvC = 2413.0f, kPqInvD = 2392.0f,
360                    kPqInvE = 6.2773946361f, kPqInvF = 0.0126833f;
361 
pqInvOetf(float e_gamma)362 float pqInvOetf(float e_gamma) {
363   // This equation blows up if e_gamma is 0.0, and checking on <= 0.0 doesn't
364   // always catch 0.0. So, check on 0.0001, since anything this small will
365   // effectively be crushed to zero anyways.
366   if (e_gamma <= 0.0001f) return 0.0f;
367   return pow((kPqInvA * pow(e_gamma, kPqInvF) - kPqInvB)
368            / (kPqInvC - kPqInvD * pow(e_gamma, kPqInvF)),
369              kPqInvE);
370 }
371 
pqInvOetf(Color e_gamma)372 Color pqInvOetf(Color e_gamma) {
373   return {{{ pqInvOetf(e_gamma.r),
374              pqInvOetf(e_gamma.g),
375              pqInvOetf(e_gamma.b) }}};
376 }
377 
pqInvOetfLUT(float e_gamma)378 float pqInvOetfLUT(float e_gamma) {
379   uint32_t value = static_cast<uint32_t>(e_gamma * kPqInvOETFNumEntries);
380   //TODO() : Remove once conversion modules have appropriate clamping in place
381   value = CLIP3(value, 0, kPqInvOETFNumEntries - 1);
382 
383   return kPqInvOETF[value];
384 }
385 
pqInvOetfLUT(Color e_gamma)386 Color pqInvOetfLUT(Color e_gamma) {
387   return {{{ pqInvOetfLUT(e_gamma.r),
388              pqInvOetfLUT(e_gamma.g),
389              pqInvOetfLUT(e_gamma.b) }}};
390 }
391 
392 
393 ////////////////////////////////////////////////////////////////////////////////
394 // Color conversions
395 
bt709ToP3(Color e)396 Color bt709ToP3(Color e) {
397  return {{{ 0.82254f * e.r + 0.17755f * e.g + 0.00006f * e.b,
398             0.03312f * e.r + 0.96684f * e.g + -0.00001f * e.b,
399             0.01706f * e.r + 0.07240f * e.g + 0.91049f * e.b }}};
400 }
401 
bt709ToBt2100(Color e)402 Color bt709ToBt2100(Color e) {
403  return {{{ 0.62740f * e.r + 0.32930f * e.g + 0.04332f * e.b,
404             0.06904f * e.r + 0.91958f * e.g + 0.01138f * e.b,
405             0.01636f * e.r + 0.08799f * e.g + 0.89555f * e.b }}};
406 }
407 
p3ToBt709(Color e)408 Color p3ToBt709(Color e) {
409  return {{{ 1.22482f * e.r + -0.22490f * e.g + -0.00007f * e.b,
410             -0.04196f * e.r + 1.04199f * e.g + 0.00001f * e.b,
411             -0.01961f * e.r + -0.07865f * e.g + 1.09831f * e.b }}};
412 }
413 
p3ToBt2100(Color e)414 Color p3ToBt2100(Color e) {
415  return {{{ 0.75378f * e.r + 0.19862f * e.g + 0.04754f * e.b,
416             0.04576f * e.r + 0.94177f * e.g + 0.01250f * e.b,
417             -0.00121f * e.r + 0.01757f * e.g + 0.98359f * e.b }}};
418 }
419 
bt2100ToBt709(Color e)420 Color bt2100ToBt709(Color e) {
421  return {{{ 1.66045f * e.r + -0.58764f * e.g + -0.07286f * e.b,
422             -0.12445f * e.r + 1.13282f * e.g + -0.00837f * e.b,
423             -0.01811f * e.r + -0.10057f * e.g + 1.11878f * e.b }}};
424 }
425 
bt2100ToP3(Color e)426 Color bt2100ToP3(Color e) {
427  return {{{ 1.34369f * e.r + -0.28223f * e.g + -0.06135f * e.b,
428             -0.06533f * e.r + 1.07580f * e.g + -0.01051f * e.b,
429             0.00283f * e.r + -0.01957f * e.g + 1.01679f * e.b
430  }}};
431 }
432 
433 // TODO: confirm we always want to convert like this before calculating
434 // luminance.
getHdrConversionFn(ultrahdr_color_gamut sdr_gamut,ultrahdr_color_gamut hdr_gamut)435 ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut,
436                                     ultrahdr_color_gamut hdr_gamut) {
437   switch (sdr_gamut) {
438     case ULTRAHDR_COLORGAMUT_BT709:
439       switch (hdr_gamut) {
440         case ULTRAHDR_COLORGAMUT_BT709:
441           return identityConversion;
442         case ULTRAHDR_COLORGAMUT_P3:
443           return p3ToBt709;
444         case ULTRAHDR_COLORGAMUT_BT2100:
445           return bt2100ToBt709;
446         case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
447           return nullptr;
448       }
449       break;
450     case ULTRAHDR_COLORGAMUT_P3:
451       switch (hdr_gamut) {
452         case ULTRAHDR_COLORGAMUT_BT709:
453           return bt709ToP3;
454         case ULTRAHDR_COLORGAMUT_P3:
455           return identityConversion;
456         case ULTRAHDR_COLORGAMUT_BT2100:
457           return bt2100ToP3;
458         case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
459           return nullptr;
460       }
461       break;
462     case ULTRAHDR_COLORGAMUT_BT2100:
463       switch (hdr_gamut) {
464         case ULTRAHDR_COLORGAMUT_BT709:
465           return bt709ToBt2100;
466         case ULTRAHDR_COLORGAMUT_P3:
467           return p3ToBt2100;
468         case ULTRAHDR_COLORGAMUT_BT2100:
469           return identityConversion;
470         case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
471           return nullptr;
472       }
473       break;
474     case ULTRAHDR_COLORGAMUT_UNSPECIFIED:
475       return nullptr;
476   }
477 }
478 
479 // All of these conversions are derived from the respective input YUV->RGB conversion followed by
480 // the RGB->YUV for the receiving encoding. They are consistent with the RGB<->YUV functions in this
481 // file, given that we uses BT.709 encoding for sRGB and BT.601 encoding for Display-P3, to match
482 // DataSpace.
483 
yuv709To601(Color e_gamma)484 Color yuv709To601(Color e_gamma) {
485   return {{{ 1.0f * e_gamma.y +  0.101579f * e_gamma.u +  0.196076f * e_gamma.v,
486              0.0f * e_gamma.y +  0.989854f * e_gamma.u + -0.110653f * e_gamma.v,
487              0.0f * e_gamma.y + -0.072453f * e_gamma.u +  0.983398f * e_gamma.v }}};
488 }
489 
yuv709To2100(Color e_gamma)490 Color yuv709To2100(Color e_gamma) {
491   return {{{ 1.0f * e_gamma.y + -0.016969f * e_gamma.u +  0.096312f * e_gamma.v,
492              0.0f * e_gamma.y +  0.995306f * e_gamma.u + -0.051192f * e_gamma.v,
493              0.0f * e_gamma.y +  0.011507f * e_gamma.u +  1.002637f * e_gamma.v }}};
494 }
495 
yuv601To709(Color e_gamma)496 Color yuv601To709(Color e_gamma) {
497   return {{{ 1.0f * e_gamma.y + -0.118188f * e_gamma.u + -0.212685f * e_gamma.v,
498              0.0f * e_gamma.y +  1.018640f * e_gamma.u +  0.114618f * e_gamma.v,
499              0.0f * e_gamma.y +  0.075049f * e_gamma.u +  1.025327f * e_gamma.v }}};
500 }
501 
yuv601To2100(Color e_gamma)502 Color yuv601To2100(Color e_gamma) {
503   return {{{ 1.0f * e_gamma.y + -0.128245f * e_gamma.u + -0.115879f * e_gamma.v,
504              0.0f * e_gamma.y +  1.010016f * e_gamma.u +  0.061592f * e_gamma.v,
505              0.0f * e_gamma.y +  0.086969f * e_gamma.u +  1.029350f * e_gamma.v }}};
506 }
507 
yuv2100To709(Color e_gamma)508 Color yuv2100To709(Color e_gamma) {
509   return {{{ 1.0f * e_gamma.y +  0.018149f * e_gamma.u + -0.095132f * e_gamma.v,
510              0.0f * e_gamma.y +  1.004123f * e_gamma.u +  0.051267f * e_gamma.v,
511              0.0f * e_gamma.y + -0.011524f * e_gamma.u +  0.996782f * e_gamma.v }}};
512 }
513 
yuv2100To601(Color e_gamma)514 Color yuv2100To601(Color e_gamma) {
515   return {{{ 1.0f * e_gamma.y +  0.117887f * e_gamma.u +  0.105521f * e_gamma.v,
516              0.0f * e_gamma.y +  0.995211f * e_gamma.u + -0.059549f * e_gamma.v,
517              0.0f * e_gamma.y + -0.084085f * e_gamma.u +  0.976518f * e_gamma.v }}};
518 }
519 
transformYuv420(jr_uncompressed_ptr image,size_t x_chroma,size_t y_chroma,ColorTransformFn fn)520 void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma,
521                      ColorTransformFn fn) {
522   Color yuv1 = getYuv420Pixel(image, x_chroma * 2,     y_chroma * 2    );
523   Color yuv2 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2    );
524   Color yuv3 = getYuv420Pixel(image, x_chroma * 2,     y_chroma * 2 + 1);
525   Color yuv4 = getYuv420Pixel(image, x_chroma * 2 + 1, y_chroma * 2 + 1);
526 
527   yuv1 = fn(yuv1);
528   yuv2 = fn(yuv2);
529   yuv3 = fn(yuv3);
530   yuv4 = fn(yuv4);
531 
532   Color new_uv = (yuv1 + yuv2 + yuv3 + yuv4) / 4.0f;
533 
534   size_t pixel_y1_idx =  x_chroma * 2      +  y_chroma * 2      * image->width;
535   size_t pixel_y2_idx = (x_chroma * 2 + 1) +  y_chroma * 2      * image->width;
536   size_t pixel_y3_idx =  x_chroma * 2      + (y_chroma * 2 + 1) * image->width;
537   size_t pixel_y4_idx = (x_chroma * 2 + 1) + (y_chroma * 2 + 1) * image->width;
538 
539   uint8_t& y1_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y1_idx];
540   uint8_t& y2_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y2_idx];
541   uint8_t& y3_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y3_idx];
542   uint8_t& y4_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y4_idx];
543 
544   size_t pixel_count = image->width * image->height;
545   size_t pixel_uv_idx = x_chroma + y_chroma * (image->width / 2);
546 
547   uint8_t& u_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count + pixel_uv_idx];
548   uint8_t& v_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count * 5 / 4 + pixel_uv_idx];
549 
550   y1_uint = static_cast<uint8_t>(floor(yuv1.y * 255.0f + 0.5f));
551   y2_uint = static_cast<uint8_t>(floor(yuv2.y * 255.0f + 0.5f));
552   y3_uint = static_cast<uint8_t>(floor(yuv3.y * 255.0f + 0.5f));
553   y4_uint = static_cast<uint8_t>(floor(yuv4.y * 255.0f + 0.5f));
554 
555   u_uint = static_cast<uint8_t>(floor(new_uv.u * 255.0f + 128.0f + 0.5f));
556   v_uint = static_cast<uint8_t>(floor(new_uv.v * 255.0f + 128.0f + 0.5f));
557 }
558 
559 ////////////////////////////////////////////////////////////////////////////////
560 // Gain map calculations
encodeGain(float y_sdr,float y_hdr,ultrahdr_metadata_ptr metadata)561 uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata) {
562   return encodeGain(y_sdr, y_hdr, metadata,
563                     log2(metadata->minContentBoost), log2(metadata->maxContentBoost));
564 }
565 
encodeGain(float y_sdr,float y_hdr,ultrahdr_metadata_ptr metadata,float log2MinContentBoost,float log2MaxContentBoost)566 uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata,
567                    float log2MinContentBoost, float log2MaxContentBoost) {
568   float gain = 1.0f;
569   if (y_sdr > 0.0f) {
570     gain = y_hdr / y_sdr;
571   }
572 
573   if (gain < metadata->minContentBoost) gain = metadata->minContentBoost;
574   if (gain > metadata->maxContentBoost) gain = metadata->maxContentBoost;
575 
576   return static_cast<uint8_t>((log2(gain) - log2MinContentBoost)
577                             / (log2MaxContentBoost - log2MinContentBoost)
578                             * 255.0f);
579 }
580 
applyGain(Color e,float gain,ultrahdr_metadata_ptr metadata)581 Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata) {
582   float logBoost = log2(metadata->minContentBoost) * (1.0f - gain)
583                  + log2(metadata->maxContentBoost) * gain;
584   float gainFactor = exp2(logBoost);
585   return e * gainFactor;
586 }
587 
applyGain(Color e,float gain,ultrahdr_metadata_ptr metadata,float displayBoost)588 Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata, float displayBoost) {
589   float logBoost = log2(metadata->minContentBoost) * (1.0f - gain)
590                  + log2(metadata->maxContentBoost) * gain;
591   float gainFactor = exp2(logBoost * displayBoost / metadata->maxContentBoost);
592   return e * gainFactor;
593 }
594 
applyGainLUT(Color e,float gain,GainLUT & gainLUT)595 Color applyGainLUT(Color e, float gain, GainLUT& gainLUT) {
596   float gainFactor = gainLUT.getGainFactor(gain);
597   return e * gainFactor;
598 }
599 
getYuv420Pixel(jr_uncompressed_ptr image,size_t x,size_t y)600 Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y) {
601   size_t pixel_count = image->width * image->height;
602 
603   size_t pixel_y_idx = x + y * image->width;
604   size_t pixel_uv_idx = x / 2 + (y / 2) * (image->width / 2);
605 
606   uint8_t y_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_y_idx];
607   uint8_t u_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count + pixel_uv_idx];
608   uint8_t v_uint = reinterpret_cast<uint8_t*>(image->data)[pixel_count * 5 / 4 + pixel_uv_idx];
609 
610   // 128 bias for UV given we are using jpeglib; see:
611   // https://github.com/kornelski/libjpeg/blob/master/structure.doc
612   return {{{ static_cast<float>(y_uint) / 255.0f,
613              (static_cast<float>(u_uint) - 128.0f) / 255.0f,
614              (static_cast<float>(v_uint) - 128.0f) / 255.0f }}};
615 }
616 
getP010Pixel(jr_uncompressed_ptr image,size_t x,size_t y)617 Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y) {
618   size_t luma_stride = image->luma_stride;
619   size_t chroma_stride = image->chroma_stride;
620   uint16_t* luma_data = reinterpret_cast<uint16_t*>(image->data);
621   uint16_t* chroma_data = reinterpret_cast<uint16_t*>(image->chroma_data);
622 
623   if (luma_stride == 0) {
624     luma_stride = image->width;
625   }
626   if (chroma_stride == 0) {
627     chroma_stride = luma_stride;
628   }
629   if (chroma_data == nullptr) {
630     chroma_data = &reinterpret_cast<uint16_t*>(image->data)[luma_stride * image->height];
631   }
632 
633   size_t pixel_y_idx = y * luma_stride + x;
634   size_t pixel_u_idx = (y >> 1) * chroma_stride + (x & ~0x1);
635   size_t pixel_v_idx = pixel_u_idx + 1;
636 
637   uint16_t y_uint = luma_data[pixel_y_idx] >> 6;
638   uint16_t u_uint = chroma_data[pixel_u_idx] >> 6;
639   uint16_t v_uint = chroma_data[pixel_v_idx] >> 6;
640 
641   // Conversions include taking narrow-range into account.
642   return {{{ (static_cast<float>(y_uint) - 64.0f) / 876.0f,
643              (static_cast<float>(u_uint) - 64.0f) / 896.0f - 0.5f,
644              (static_cast<float>(v_uint) - 64.0f) / 896.0f - 0.5f }}};
645 }
646 
647 typedef Color (*getPixelFn)(jr_uncompressed_ptr, size_t, size_t);
648 
samplePixels(jr_uncompressed_ptr image,size_t map_scale_factor,size_t x,size_t y,getPixelFn get_pixel_fn)649 static Color samplePixels(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y,
650                           getPixelFn get_pixel_fn) {
651   Color e = {{{ 0.0f, 0.0f, 0.0f }}};
652   for (size_t dy = 0; dy < map_scale_factor; ++dy) {
653     for (size_t dx = 0; dx < map_scale_factor; ++dx) {
654       e += get_pixel_fn(image, x * map_scale_factor + dx, y * map_scale_factor + dy);
655     }
656   }
657 
658   return e / static_cast<float>(map_scale_factor * map_scale_factor);
659 }
660 
sampleYuv420(jr_uncompressed_ptr image,size_t map_scale_factor,size_t x,size_t y)661 Color sampleYuv420(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y) {
662   return samplePixels(image, map_scale_factor, x, y, getYuv420Pixel);
663 }
664 
sampleP010(jr_uncompressed_ptr image,size_t map_scale_factor,size_t x,size_t y)665 Color sampleP010(jr_uncompressed_ptr image, size_t map_scale_factor, size_t x, size_t y) {
666   return samplePixels(image, map_scale_factor, x, y, getP010Pixel);
667 }
668 
669 // TODO: do we need something more clever for filtering either the map or images
670 // to generate the map?
671 
clamp(const size_t & val,const size_t & low,const size_t & high)672 static size_t clamp(const size_t& val, const size_t& low, const size_t& high) {
673   return val < low ? low : (high < val ? high : val);
674 }
675 
mapUintToFloat(uint8_t map_uint)676 static float mapUintToFloat(uint8_t map_uint) {
677   return static_cast<float>(map_uint) / 255.0f;
678 }
679 
pythDistance(float x_diff,float y_diff)680 static float pythDistance(float x_diff, float y_diff) {
681   return sqrt(pow(x_diff, 2.0f) + pow(y_diff, 2.0f));
682 }
683 
684 // TODO: If map_scale_factor is guaranteed to be an integer, then remove the following.
sampleMap(jr_uncompressed_ptr map,float map_scale_factor,size_t x,size_t y)685 float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y) {
686   float x_map = static_cast<float>(x) / map_scale_factor;
687   float y_map = static_cast<float>(y) / map_scale_factor;
688 
689   size_t x_lower = static_cast<size_t>(floor(x_map));
690   size_t x_upper = x_lower + 1;
691   size_t y_lower = static_cast<size_t>(floor(y_map));
692   size_t y_upper = y_lower + 1;
693 
694   x_lower = clamp(x_lower, 0, map->width - 1);
695   x_upper = clamp(x_upper, 0, map->width - 1);
696   y_lower = clamp(y_lower, 0, map->height - 1);
697   y_upper = clamp(y_upper, 0, map->height - 1);
698 
699   // Use Shepard's method for inverse distance weighting. For more information:
700   // en.wikipedia.org/wiki/Inverse_distance_weighting#Shepard's_method
701 
702   float e1 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_lower * map->width]);
703   float e1_dist = pythDistance(x_map - static_cast<float>(x_lower),
704                                y_map - static_cast<float>(y_lower));
705   if (e1_dist == 0.0f) return e1;
706 
707   float e2 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_upper * map->width]);
708   float e2_dist = pythDistance(x_map - static_cast<float>(x_lower),
709                                y_map - static_cast<float>(y_upper));
710   if (e2_dist == 0.0f) return e2;
711 
712   float e3 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_lower * map->width]);
713   float e3_dist = pythDistance(x_map - static_cast<float>(x_upper),
714                                y_map - static_cast<float>(y_lower));
715   if (e3_dist == 0.0f) return e3;
716 
717   float e4 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_upper * map->width]);
718   float e4_dist = pythDistance(x_map - static_cast<float>(x_upper),
719                                y_map - static_cast<float>(y_upper));
720   if (e4_dist == 0.0f) return e2;
721 
722   float e1_weight = 1.0f / e1_dist;
723   float e2_weight = 1.0f / e2_dist;
724   float e3_weight = 1.0f / e3_dist;
725   float e4_weight = 1.0f / e4_dist;
726   float total_weight = e1_weight + e2_weight + e3_weight + e4_weight;
727 
728   return e1 * (e1_weight / total_weight)
729        + e2 * (e2_weight / total_weight)
730        + e3 * (e3_weight / total_weight)
731        + e4 * (e4_weight / total_weight);
732 }
733 
sampleMap(jr_uncompressed_ptr map,size_t map_scale_factor,size_t x,size_t y,ShepardsIDW & weightTables)734 float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y,
735                 ShepardsIDW& weightTables) {
736   // TODO: If map_scale_factor is guaranteed to be an integer power of 2, then optimize the
737   // following by computing log2(map_scale_factor) once and then using >> log2(map_scale_factor)
738   int x_lower = x / map_scale_factor;
739   int x_upper = x_lower + 1;
740   int y_lower = y / map_scale_factor;
741   int y_upper = y_lower + 1;
742 
743   x_lower = std::min(x_lower, map->width - 1);
744   x_upper = std::min(x_upper, map->width - 1);
745   y_lower = std::min(y_lower, map->height - 1);
746   y_upper = std::min(y_upper, map->height - 1);
747 
748   float e1 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_lower * map->width]);
749   float e2 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_lower + y_upper * map->width]);
750   float e3 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_lower * map->width]);
751   float e4 = mapUintToFloat(reinterpret_cast<uint8_t*>(map->data)[x_upper + y_upper * map->width]);
752 
753   // TODO: If map_scale_factor is guaranteed to be an integer power of 2, then optimize the
754   // following by using & (map_scale_factor - 1)
755   int offset_x = x % map_scale_factor;
756   int offset_y = y % map_scale_factor;
757 
758   float* weights = weightTables.mWeights;
759   if (x_lower == x_upper && y_lower == y_upper) weights = weightTables.mWeightsC;
760   else if (x_lower == x_upper) weights = weightTables.mWeightsNR;
761   else if (y_lower == y_upper) weights = weightTables.mWeightsNB;
762   weights += offset_y * map_scale_factor * 4 + offset_x * 4;
763 
764   return e1 * weights[0] + e2 * weights[1] + e3 * weights[2] + e4 * weights[3];
765 }
766 
colorToRgba1010102(Color e_gamma)767 uint32_t colorToRgba1010102(Color e_gamma) {
768   return (0x3ff & static_cast<uint32_t>(e_gamma.r * 1023.0f))
769        | ((0x3ff & static_cast<uint32_t>(e_gamma.g * 1023.0f)) << 10)
770        | ((0x3ff & static_cast<uint32_t>(e_gamma.b * 1023.0f)) << 20)
771        | (0x3 << 30);  // Set alpha to 1.0
772 }
773 
colorToRgbaF16(Color e_gamma)774 uint64_t colorToRgbaF16(Color e_gamma) {
775   return (uint64_t) floatToHalf(e_gamma.r)
776        | (((uint64_t) floatToHalf(e_gamma.g)) << 16)
777        | (((uint64_t) floatToHalf(e_gamma.b)) << 32)
778        | (((uint64_t) floatToHalf(1.0f)) << 48);
779 }
780 
781 } // namespace android::ultrahdr
782