1 /*
2 * jccolext-neon.c - colorspace conversion (32-bit Arm Neon)
3 *
4 * Copyright (C) 2020, Arm Limited. All Rights Reserved.
5 * Copyright (C) 2020, D. R. Commander. All Rights Reserved.
6 *
7 * This software is provided 'as-is', without any express or implied
8 * warranty. In no event will the authors be held liable for any damages
9 * arising from the use of this software.
10 *
11 * Permission is granted to anyone to use this software for any purpose,
12 * including commercial applications, and to alter it and redistribute it
13 * freely, subject to the following restrictions:
14 *
15 * 1. The origin of this software must not be misrepresented; you must not
16 * claim that you wrote the original software. If you use this software
17 * in a product, an acknowledgment in the product documentation would be
18 * appreciated but is not required.
19 * 2. Altered source versions must be plainly marked as such, and must not be
20 * misrepresented as being the original software.
21 * 3. This notice may not be removed or altered from any source distribution.
22 */
23
24 /* This file is included by jccolor-neon.c */
25
26
27 /* RGB -> YCbCr conversion is defined by the following equations:
28 * Y = 0.29900 * R + 0.58700 * G + 0.11400 * B
29 * Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + 128
30 * Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + 128
31 *
32 * Avoid floating point arithmetic by using shifted integer constants:
33 * 0.29899597 = 19595 * 2^-16
34 * 0.58700561 = 38470 * 2^-16
35 * 0.11399841 = 7471 * 2^-16
36 * 0.16874695 = 11059 * 2^-16
37 * 0.33125305 = 21709 * 2^-16
38 * 0.50000000 = 32768 * 2^-16
39 * 0.41868592 = 27439 * 2^-16
40 * 0.08131409 = 5329 * 2^-16
41 * These constants are defined in jccolor-neon.c
42 *
43 * We add the fixed-point equivalent of 0.5 to Cb and Cr, which effectively
44 * rounds up or down the result via integer truncation.
45 */
46
jsimd_rgb_ycc_convert_neon(JDIMENSION image_width,JSAMPARRAY input_buf,JSAMPIMAGE output_buf,JDIMENSION output_row,int num_rows)47 void jsimd_rgb_ycc_convert_neon(JDIMENSION image_width, JSAMPARRAY input_buf,
48 JSAMPIMAGE output_buf, JDIMENSION output_row,
49 int num_rows)
50 {
51 /* Pointer to RGB(X/A) input data */
52 JSAMPROW inptr;
53 /* Pointers to Y, Cb, and Cr output data */
54 JSAMPROW outptr0, outptr1, outptr2;
55 /* Allocate temporary buffer for final (image_width % 8) pixels in row. */
56 ALIGN(16) uint8_t tmp_buf[8 * RGB_PIXELSIZE];
57
58 /* Set up conversion constants. */
59 #ifdef HAVE_VLD1_U16_X2
60 const uint16x4x2_t consts = vld1_u16_x2(jsimd_rgb_ycc_neon_consts);
61 #else
62 /* GCC does not currently support the intrinsic vld1_<type>_x2(). */
63 const uint16x4_t consts1 = vld1_u16(jsimd_rgb_ycc_neon_consts);
64 const uint16x4_t consts2 = vld1_u16(jsimd_rgb_ycc_neon_consts + 4);
65 const uint16x4x2_t consts = { { consts1, consts2 } };
66 #endif
67 const uint32x4_t scaled_128_5 = vdupq_n_u32((128 << 16) + 32767);
68
69 while (--num_rows >= 0) {
70 inptr = *input_buf++;
71 outptr0 = output_buf[0][output_row];
72 outptr1 = output_buf[1][output_row];
73 outptr2 = output_buf[2][output_row];
74 output_row++;
75
76 int cols_remaining = image_width;
77 for (; cols_remaining > 0; cols_remaining -= 8) {
78
79 /* To prevent buffer overread by the vector load instructions, the last
80 * (image_width % 8) columns of data are first memcopied to a temporary
81 * buffer large enough to accommodate the vector load.
82 */
83 if (cols_remaining < 8) {
84 memcpy(tmp_buf, inptr, cols_remaining * RGB_PIXELSIZE);
85 inptr = tmp_buf;
86 }
87
88 #if RGB_PIXELSIZE == 4
89 uint8x8x4_t input_pixels = vld4_u8(inptr);
90 #else
91 uint8x8x3_t input_pixels = vld3_u8(inptr);
92 #endif
93 uint16x8_t r = vmovl_u8(input_pixels.val[RGB_RED]);
94 uint16x8_t g = vmovl_u8(input_pixels.val[RGB_GREEN]);
95 uint16x8_t b = vmovl_u8(input_pixels.val[RGB_BLUE]);
96
97 /* Compute Y = 0.29900 * R + 0.58700 * G + 0.11400 * B */
98 uint32x4_t y_low = vmull_lane_u16(vget_low_u16(r), consts.val[0], 0);
99 y_low = vmlal_lane_u16(y_low, vget_low_u16(g), consts.val[0], 1);
100 y_low = vmlal_lane_u16(y_low, vget_low_u16(b), consts.val[0], 2);
101 uint32x4_t y_high = vmull_lane_u16(vget_high_u16(r), consts.val[0], 0);
102 y_high = vmlal_lane_u16(y_high, vget_high_u16(g), consts.val[0], 1);
103 y_high = vmlal_lane_u16(y_high, vget_high_u16(b), consts.val[0], 2);
104
105 /* Compute Cb = -0.16874 * R - 0.33126 * G + 0.50000 * B + 128 */
106 uint32x4_t cb_low = scaled_128_5;
107 cb_low = vmlsl_lane_u16(cb_low, vget_low_u16(r), consts.val[0], 3);
108 cb_low = vmlsl_lane_u16(cb_low, vget_low_u16(g), consts.val[1], 0);
109 cb_low = vmlal_lane_u16(cb_low, vget_low_u16(b), consts.val[1], 1);
110 uint32x4_t cb_high = scaled_128_5;
111 cb_high = vmlsl_lane_u16(cb_high, vget_high_u16(r), consts.val[0], 3);
112 cb_high = vmlsl_lane_u16(cb_high, vget_high_u16(g), consts.val[1], 0);
113 cb_high = vmlal_lane_u16(cb_high, vget_high_u16(b), consts.val[1], 1);
114
115 /* Compute Cr = 0.50000 * R - 0.41869 * G - 0.08131 * B + 128 */
116 uint32x4_t cr_low = scaled_128_5;
117 cr_low = vmlal_lane_u16(cr_low, vget_low_u16(r), consts.val[1], 1);
118 cr_low = vmlsl_lane_u16(cr_low, vget_low_u16(g), consts.val[1], 2);
119 cr_low = vmlsl_lane_u16(cr_low, vget_low_u16(b), consts.val[1], 3);
120 uint32x4_t cr_high = scaled_128_5;
121 cr_high = vmlal_lane_u16(cr_high, vget_high_u16(r), consts.val[1], 1);
122 cr_high = vmlsl_lane_u16(cr_high, vget_high_u16(g), consts.val[1], 2);
123 cr_high = vmlsl_lane_u16(cr_high, vget_high_u16(b), consts.val[1], 3);
124
125 /* Descale Y values (rounding right shift) and narrow to 16-bit. */
126 uint16x8_t y_u16 = vcombine_u16(vrshrn_n_u32(y_low, 16),
127 vrshrn_n_u32(y_high, 16));
128 /* Descale Cb values (right shift) and narrow to 16-bit. */
129 uint16x8_t cb_u16 = vcombine_u16(vshrn_n_u32(cb_low, 16),
130 vshrn_n_u32(cb_high, 16));
131 /* Descale Cr values (right shift) and narrow to 16-bit. */
132 uint16x8_t cr_u16 = vcombine_u16(vshrn_n_u32(cr_low, 16),
133 vshrn_n_u32(cr_high, 16));
134 /* Narrow Y, Cb, and Cr values to 8-bit and store to memory. Buffer
135 * overwrite is permitted up to the next multiple of ALIGN_SIZE bytes.
136 */
137 vst1_u8(outptr0, vmovn_u16(y_u16));
138 vst1_u8(outptr1, vmovn_u16(cb_u16));
139 vst1_u8(outptr2, vmovn_u16(cr_u16));
140
141 /* Increment pointers. */
142 inptr += (8 * RGB_PIXELSIZE);
143 outptr0 += 8;
144 outptr1 += 8;
145 outptr2 += 8;
146 }
147 }
148 }
149