1 /*
2 * Copyright 2022 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifndef ANDROID_ULTRAHDR_RECOVERYMAPMATH_H
18 #define ANDROID_ULTRAHDR_RECOVERYMAPMATH_H
19
20 #include <cmath>
21 #include <stdint.h>
22
23 #include <ultrahdr/jpegr.h>
24
25 namespace android::ultrahdr {
26
27 #define CLIP3(x, min, max) ((x) < (min)) ? (min) : ((x) > (max)) ? (max) : (x)
28
29 ////////////////////////////////////////////////////////////////////////////////
30 // Framework
31
32 const float kSdrWhiteNits = 100.0f;
33 const float kHlgMaxNits = 1000.0f;
34 const float kPqMaxNits = 10000.0f;
35
36 struct Color {
37 union {
38 struct {
39 float r;
40 float g;
41 float b;
42 };
43 struct {
44 float y;
45 float u;
46 float v;
47 };
48 };
49 };
50
51 typedef Color (*ColorTransformFn)(Color);
52 typedef float (*ColorCalculationFn)(Color);
53
54 inline Color operator+=(Color& lhs, const Color& rhs) {
55 lhs.r += rhs.r;
56 lhs.g += rhs.g;
57 lhs.b += rhs.b;
58 return lhs;
59 }
60 inline Color operator-=(Color& lhs, const Color& rhs) {
61 lhs.r -= rhs.r;
62 lhs.g -= rhs.g;
63 lhs.b -= rhs.b;
64 return lhs;
65 }
66
67 inline Color operator+(const Color& lhs, const Color& rhs) {
68 Color temp = lhs;
69 return temp += rhs;
70 }
71 inline Color operator-(const Color& lhs, const Color& rhs) {
72 Color temp = lhs;
73 return temp -= rhs;
74 }
75
76 inline Color operator+=(Color& lhs, const float rhs) {
77 lhs.r += rhs;
78 lhs.g += rhs;
79 lhs.b += rhs;
80 return lhs;
81 }
82 inline Color operator-=(Color& lhs, const float rhs) {
83 lhs.r -= rhs;
84 lhs.g -= rhs;
85 lhs.b -= rhs;
86 return lhs;
87 }
88 inline Color operator*=(Color& lhs, const float rhs) {
89 lhs.r *= rhs;
90 lhs.g *= rhs;
91 lhs.b *= rhs;
92 return lhs;
93 }
94 inline Color operator/=(Color& lhs, const float rhs) {
95 lhs.r /= rhs;
96 lhs.g /= rhs;
97 lhs.b /= rhs;
98 return lhs;
99 }
100
101 inline Color operator+(const Color& lhs, const float rhs) {
102 Color temp = lhs;
103 return temp += rhs;
104 }
105 inline Color operator-(const Color& lhs, const float rhs) {
106 Color temp = lhs;
107 return temp -= rhs;
108 }
109 inline Color operator*(const Color& lhs, const float rhs) {
110 Color temp = lhs;
111 return temp *= rhs;
112 }
113 inline Color operator/(const Color& lhs, const float rhs) {
114 Color temp = lhs;
115 return temp /= rhs;
116 }
117
floatToHalf(float f)118 inline uint16_t floatToHalf(float f) {
119 // round-to-nearest-even: add last bit after truncated mantissa
120 const uint32_t b = *((uint32_t*)&f) + 0x00001000;
121
122 const uint32_t e = (b & 0x7F800000) >> 23; // exponent
123 const uint32_t m = b & 0x007FFFFF; // mantissa
124
125 // sign : normalized : denormalized : saturate
126 return (b & 0x80000000) >> 16
127 | (e > 112) * ((((e - 112) << 10) & 0x7C00) | m >> 13)
128 | ((e < 113) & (e > 101)) * ((((0x007FF000 + m) >> (125 - e)) + 1) >> 1)
129 | (e > 143) * 0x7FFF;
130 }
131
132 constexpr size_t kGainFactorPrecision = 10;
133 constexpr size_t kGainFactorNumEntries = 1 << kGainFactorPrecision;
134 struct GainLUT {
GainLUTGainLUT135 GainLUT(ultrahdr_metadata_ptr metadata) {
136 for (int idx = 0; idx < kGainFactorNumEntries; idx++) {
137 float value = static_cast<float>(idx) / static_cast<float>(kGainFactorNumEntries - 1);
138 float logBoost = log2(metadata->minContentBoost) * (1.0f - value)
139 + log2(metadata->maxContentBoost) * value;
140 mGainTable[idx] = exp2(logBoost);
141 }
142 }
143
GainLUTGainLUT144 GainLUT(ultrahdr_metadata_ptr metadata, float displayBoost) {
145 float boostFactor = displayBoost > 0 ? displayBoost / metadata->maxContentBoost : 1.0f;
146 for (int idx = 0; idx < kGainFactorNumEntries; idx++) {
147 float value = static_cast<float>(idx) / static_cast<float>(kGainFactorNumEntries - 1);
148 float logBoost = log2(metadata->minContentBoost) * (1.0f - value)
149 + log2(metadata->maxContentBoost) * value;
150 mGainTable[idx] = exp2(logBoost * boostFactor);
151 }
152 }
153
~GainLUTGainLUT154 ~GainLUT() {
155 }
156
getGainFactorGainLUT157 float getGainFactor(float gain) {
158 uint32_t idx = static_cast<uint32_t>(gain * (kGainFactorNumEntries - 1));
159 //TODO() : Remove once conversion modules have appropriate clamping in place
160 idx = CLIP3(idx, 0, kGainFactorNumEntries - 1);
161 return mGainTable[idx];
162 }
163
164 private:
165 float mGainTable[kGainFactorNumEntries];
166 };
167
168 struct ShepardsIDW {
ShepardsIDWShepardsIDW169 ShepardsIDW(int mapScaleFactor) : mMapScaleFactor{mapScaleFactor} {
170 const int size = mMapScaleFactor * mMapScaleFactor * 4;
171 mWeights = new float[size];
172 mWeightsNR = new float[size];
173 mWeightsNB = new float[size];
174 mWeightsC = new float[size];
175 fillShepardsIDW(mWeights, 1, 1);
176 fillShepardsIDW(mWeightsNR, 0, 1);
177 fillShepardsIDW(mWeightsNB, 1, 0);
178 fillShepardsIDW(mWeightsC, 0, 0);
179 }
~ShepardsIDWShepardsIDW180 ~ShepardsIDW() {
181 delete[] mWeights;
182 delete[] mWeightsNR;
183 delete[] mWeightsNB;
184 delete[] mWeightsC;
185 }
186
187 int mMapScaleFactor;
188 // Image :-
189 // p00 p01 p02 p03 p04 p05 p06 p07
190 // p10 p11 p12 p13 p14 p15 p16 p17
191 // p20 p21 p22 p23 p24 p25 p26 p27
192 // p30 p31 p32 p33 p34 p35 p36 p37
193 // p40 p41 p42 p43 p44 p45 p46 p47
194 // p50 p51 p52 p53 p54 p55 p56 p57
195 // p60 p61 p62 p63 p64 p65 p66 p67
196 // p70 p71 p72 p73 p74 p75 p76 p77
197
198 // Gain Map (for 4 scale factor) :-
199 // m00 p01
200 // m10 m11
201
202 // Gain sample of curr 4x4, right 4x4, bottom 4x4, bottom right 4x4 are used during
203 // reconstruction. hence table weight size is 4.
204 float* mWeights;
205 // TODO: check if its ok to mWeights at places
206 float* mWeightsNR; // no right
207 float* mWeightsNB; // no bottom
208 float* mWeightsC; // no right & bottom
209
210 float euclideanDistance(float x1, float x2, float y1, float y2);
211 void fillShepardsIDW(float *weights, int incR, int incB);
212 };
213
214 ////////////////////////////////////////////////////////////////////////////////
215 // sRGB transformations
216 // NOTE: sRGB has the same color primaries as BT.709, but different transfer
217 // function. For this reason, all sRGB transformations here apply to BT.709,
218 // except for those concerning transfer functions.
219
220 /*
221 * Calculate the luminance of a linear RGB sRGB pixel, according to
222 * IEC 61966-2-1/Amd 1:2003.
223 *
224 * [0.0, 1.0] range in and out.
225 */
226 float srgbLuminance(Color e);
227
228 /*
229 * Convert from OETF'd srgb RGB to YUV, according to ITU-R BT.709-6.
230 *
231 * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace.
232 */
233 Color srgbRgbToYuv(Color e_gamma);
234
235
236 /*
237 * Convert from OETF'd srgb YUV to RGB, according to ITU-R BT.709-6.
238 *
239 * BT.709 YUV<->RGB matrix is used to match expectations for DataSpace.
240 */
241 Color srgbYuvToRgb(Color e_gamma);
242
243 /*
244 * Convert from srgb to linear, according to IEC 61966-2-1/Amd 1:2003.
245 *
246 * [0.0, 1.0] range in and out.
247 */
248 float srgbInvOetf(float e_gamma);
249 Color srgbInvOetf(Color e_gamma);
250 float srgbInvOetfLUT(float e_gamma);
251 Color srgbInvOetfLUT(Color e_gamma);
252
253 constexpr size_t kSrgbInvOETFPrecision = 10;
254 constexpr size_t kSrgbInvOETFNumEntries = 1 << kSrgbInvOETFPrecision;
255
256 ////////////////////////////////////////////////////////////////////////////////
257 // Display-P3 transformations
258
259 /*
260 * Calculated the luminance of a linear RGB P3 pixel, according to SMPTE EG 432-1.
261 *
262 * [0.0, 1.0] range in and out.
263 */
264 float p3Luminance(Color e);
265
266 /*
267 * Convert from OETF'd P3 RGB to YUV, according to ITU-R BT.601-7.
268 *
269 * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace.
270 */
271 Color p3RgbToYuv(Color e_gamma);
272
273 /*
274 * Convert from OETF'd P3 YUV to RGB, according to ITU-R BT.601-7.
275 *
276 * BT.601 YUV<->RGB matrix is used to match expectations for DataSpace.
277 */
278 Color p3YuvToRgb(Color e_gamma);
279
280
281 ////////////////////////////////////////////////////////////////////////////////
282 // BT.2100 transformations - according to ITU-R BT.2100-2
283
284 /*
285 * Calculate the luminance of a linear RGB BT.2100 pixel.
286 *
287 * [0.0, 1.0] range in and out.
288 */
289 float bt2100Luminance(Color e);
290
291 /*
292 * Convert from OETF'd BT.2100 RGB to YUV, according to ITU-R BT.2100-2.
293 *
294 * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace.
295 */
296 Color bt2100RgbToYuv(Color e_gamma);
297
298 /*
299 * Convert from OETF'd BT.2100 YUV to RGB, according to ITU-R BT.2100-2.
300 *
301 * BT.2100 YUV<->RGB matrix is used to match expectations for DataSpace.
302 */
303 Color bt2100YuvToRgb(Color e_gamma);
304
305 /*
306 * Convert from scene luminance to HLG.
307 *
308 * [0.0, 1.0] range in and out.
309 */
310 float hlgOetf(float e);
311 Color hlgOetf(Color e);
312 float hlgOetfLUT(float e);
313 Color hlgOetfLUT(Color e);
314
315 constexpr size_t kHlgOETFPrecision = 10;
316 constexpr size_t kHlgOETFNumEntries = 1 << kHlgOETFPrecision;
317
318 /*
319 * Convert from HLG to scene luminance.
320 *
321 * [0.0, 1.0] range in and out.
322 */
323 float hlgInvOetf(float e_gamma);
324 Color hlgInvOetf(Color e_gamma);
325 float hlgInvOetfLUT(float e_gamma);
326 Color hlgInvOetfLUT(Color e_gamma);
327
328 constexpr size_t kHlgInvOETFPrecision = 10;
329 constexpr size_t kHlgInvOETFNumEntries = 1 << kHlgInvOETFPrecision;
330
331 /*
332 * Convert from scene luminance to PQ.
333 *
334 * [0.0, 1.0] range in and out.
335 */
336 float pqOetf(float e);
337 Color pqOetf(Color e);
338 float pqOetfLUT(float e);
339 Color pqOetfLUT(Color e);
340
341 constexpr size_t kPqOETFPrecision = 10;
342 constexpr size_t kPqOETFNumEntries = 1 << kPqOETFPrecision;
343
344 /*
345 * Convert from PQ to scene luminance in nits.
346 *
347 * [0.0, 1.0] range in and out.
348 */
349 float pqInvOetf(float e_gamma);
350 Color pqInvOetf(Color e_gamma);
351 float pqInvOetfLUT(float e_gamma);
352 Color pqInvOetfLUT(Color e_gamma);
353
354 constexpr size_t kPqInvOETFPrecision = 10;
355 constexpr size_t kPqInvOETFNumEntries = 1 << kPqInvOETFPrecision;
356
357
358 ////////////////////////////////////////////////////////////////////////////////
359 // Color space conversions
360
361 /*
362 * Convert between color spaces with linear RGB data, according to ITU-R BT.2407 and EG 432-1.
363 *
364 * All conversions are derived from multiplying the matrix for XYZ to output RGB color gamut by the
365 * matrix for input RGB color gamut to XYZ. The matrix for converting from XYZ to an RGB gamut is
366 * always the inverse of the RGB gamut to XYZ matrix.
367 */
368 Color bt709ToP3(Color e);
369 Color bt709ToBt2100(Color e);
370 Color p3ToBt709(Color e);
371 Color p3ToBt2100(Color e);
372 Color bt2100ToBt709(Color e);
373 Color bt2100ToP3(Color e);
374
375 /*
376 * Identity conversion.
377 */
identityConversion(Color e)378 inline Color identityConversion(Color e) { return e; }
379
380 /*
381 * Get the conversion to apply to the HDR image for gain map generation
382 */
383 ColorTransformFn getHdrConversionFn(ultrahdr_color_gamut sdr_gamut, ultrahdr_color_gamut hdr_gamut);
384
385 /*
386 * Convert between YUV encodings, according to ITU-R BT.709-6, ITU-R BT.601-7, and ITU-R BT.2100-2.
387 *
388 * Bt.709 and Bt.2100 have well-defined YUV encodings; Display-P3's is less well defined, but is
389 * treated as Bt.601 by DataSpace, hence we do the same.
390 */
391 Color yuv709To601(Color e_gamma);
392 Color yuv709To2100(Color e_gamma);
393 Color yuv601To709(Color e_gamma);
394 Color yuv601To2100(Color e_gamma);
395 Color yuv2100To709(Color e_gamma);
396 Color yuv2100To601(Color e_gamma);
397
398 /*
399 * Performs a transformation at the chroma x and y coordinates provided on a YUV420 image.
400 *
401 * Apply the transformation by determining transformed YUV for each of the 4 Y + 1 UV; each Y gets
402 * this result, and UV gets the averaged result.
403 *
404 * x_chroma and y_chroma should be less than or equal to half the image's width and height
405 * respecitively, since input is 4:2:0 subsampled.
406 */
407 void transformYuv420(jr_uncompressed_ptr image, size_t x_chroma, size_t y_chroma,
408 ColorTransformFn fn);
409
410
411 ////////////////////////////////////////////////////////////////////////////////
412 // Gain map calculations
413
414 /*
415 * Calculate the 8-bit unsigned integer gain value for the given SDR and HDR
416 * luminances in linear space, and the hdr ratio to encode against.
417 *
418 * Note: since this library always uses gamma of 1.0, offsetSdr of 0.0, and
419 * offsetHdr of 0.0, this function doesn't handle different metadata values for
420 * these fields.
421 */
422 uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata);
423 uint8_t encodeGain(float y_sdr, float y_hdr, ultrahdr_metadata_ptr metadata,
424 float log2MinContentBoost, float log2MaxContentBoost);
425
426 /*
427 * Calculates the linear luminance in nits after applying the given gain
428 * value, with the given hdr ratio, to the given sdr input in the range [0, 1].
429 *
430 * Note: similar to encodeGain(), this function only supports gamma 1.0,
431 * offsetSdr 0.0, offsetHdr 0.0, hdrCapacityMin 1.0, and hdrCapacityMax equal to
432 * gainMapMax, as this library encodes.
433 */
434 Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata);
435 Color applyGain(Color e, float gain, ultrahdr_metadata_ptr metadata, float displayBoost);
436 Color applyGainLUT(Color e, float gain, GainLUT& gainLUT);
437
438 /*
439 * Helper for sampling from YUV 420 images.
440 */
441 Color getYuv420Pixel(jr_uncompressed_ptr image, size_t x, size_t y);
442
443 /*
444 * Helper for sampling from P010 images.
445 *
446 * Expect narrow-range image data for P010.
447 */
448 Color getP010Pixel(jr_uncompressed_ptr image, size_t x, size_t y);
449
450 /*
451 * Sample the image at the provided location, with a weighting based on nearby
452 * pixels and the map scale factor.
453 */
454 Color sampleYuv420(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);
455
456 /*
457 * Sample the image at the provided location, with a weighting based on nearby
458 * pixels and the map scale factor.
459 *
460 * Expect narrow-range image data for P010.
461 */
462 Color sampleP010(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y);
463
464 /*
465 * Sample the gain value for the map from a given x,y coordinate on a scale
466 * that is map scale factor larger than the map size.
467 */
468 float sampleMap(jr_uncompressed_ptr map, float map_scale_factor, size_t x, size_t y);
469 float sampleMap(jr_uncompressed_ptr map, size_t map_scale_factor, size_t x, size_t y,
470 ShepardsIDW& weightTables);
471
472 /*
473 * Convert from Color to RGBA1010102.
474 *
475 * Alpha always set to 1.0.
476 */
477 uint32_t colorToRgba1010102(Color e_gamma);
478
479 /*
480 * Convert from Color to F16.
481 *
482 * Alpha always set to 1.0.
483 */
484 uint64_t colorToRgbaF16(Color e_gamma);
485
486 } // namespace android::ultrahdr
487
488 #endif // ANDROID_ULTRAHDR_RECOVERYMAPMATH_H
489