1 // Copyright 2015 Google Inc. All Rights Reserved.
2 //
3 // Use of this source code is governed by a BSD-style license
4 // that can be found in the COPYING file in the root of the source
5 // tree. An additional intellectual property rights grant can be found
6 // in the file PATENTS. All contributing project authors may
7 // be found in the AUTHORS file in the root of the source tree.
8 // -----------------------------------------------------------------------------
9 //
10 // SSE2 Rescaling functions
11 //
12 // Author: Skal (pascal.massimino@gmail.com)
13
14 #include "./dsp.h"
15
16 #if defined(WEBP_USE_SSE2)
17 #include <emmintrin.h>
18
19 #include <assert.h>
20 #include "../utils/rescaler.h"
21
22 //------------------------------------------------------------------------------
23 // Implementations of critical functions ImportRow / ExportRow
24
25 #define ROUNDER (WEBP_RESCALER_ONE >> 1)
26 #define MULT_FIX(x, y) (((uint64_t)(x) * (y) + ROUNDER) >> WEBP_RESCALER_RFIX)
27
28 // input: 8 bytes ABCDEFGH -> output: A0E0B0F0C0G0D0H0
LoadTwoPixels(const uint8_t * const src,__m128i * out)29 static void LoadTwoPixels(const uint8_t* const src, __m128i* out) {
30 const __m128i zero = _mm_setzero_si128();
31 const __m128i A = _mm_loadl_epi64((const __m128i*)(src)); // ABCDEFGH
32 const __m128i B = _mm_unpacklo_epi8(A, zero); // A0B0C0D0E0F0G0H0
33 const __m128i C = _mm_srli_si128(B, 8); // E0F0G0H0
34 *out = _mm_unpacklo_epi16(B, C);
35 }
36
37 // input: 8 bytes ABCDEFGH -> output: A0B0C0D0E0F0G0H0
LoadHeightPixels(const uint8_t * const src,__m128i * out)38 static void LoadHeightPixels(const uint8_t* const src, __m128i* out) {
39 const __m128i zero = _mm_setzero_si128();
40 const __m128i A = _mm_loadl_epi64((const __m128i*)(src)); // ABCDEFGH
41 *out = _mm_unpacklo_epi8(A, zero);
42 }
43
RescalerImportRowExpandSSE2(WebPRescaler * const wrk,const uint8_t * src)44 static void RescalerImportRowExpandSSE2(WebPRescaler* const wrk,
45 const uint8_t* src) {
46 rescaler_t* frow = wrk->frow;
47 const rescaler_t* const frow_end = frow + wrk->dst_width * wrk->num_channels;
48 const int x_add = wrk->x_add;
49 int accum = x_add;
50 __m128i cur_pixels;
51
52 assert(!WebPRescalerInputDone(wrk));
53 assert(wrk->x_expand);
54 if (wrk->num_channels == 4) {
55 if (wrk->src_width < 2) {
56 WebPRescalerImportRowExpandC(wrk, src);
57 return;
58 }
59 LoadTwoPixels(src, &cur_pixels);
60 src += 4;
61 while (1) {
62 const __m128i mult = _mm_set1_epi32(((x_add - accum) << 16) | accum);
63 const __m128i out = _mm_madd_epi16(cur_pixels, mult);
64 _mm_storeu_si128((__m128i*)frow, out);
65 frow += 4;
66 if (frow >= frow_end) break;
67 accum -= wrk->x_sub;
68 if (accum < 0) {
69 LoadTwoPixels(src, &cur_pixels);
70 src += 4;
71 accum += x_add;
72 }
73 }
74 } else {
75 int left;
76 const uint8_t* const src_limit = src + wrk->src_width - 8;
77 if (wrk->src_width < 8) {
78 WebPRescalerImportRowExpandC(wrk, src);
79 return;
80 }
81 LoadHeightPixels(src, &cur_pixels);
82 src += 7;
83 left = 7;
84 while (1) {
85 const __m128i mult = _mm_cvtsi32_si128(((x_add - accum) << 16) | accum);
86 const __m128i out = _mm_madd_epi16(cur_pixels, mult);
87 assert(sizeof(*frow) == sizeof(uint32_t));
88 WebPUint32ToMem((uint8_t*)frow, _mm_cvtsi128_si32(out));
89 frow += 1;
90 if (frow >= frow_end) break;
91 accum -= wrk->x_sub;
92 if (accum < 0) {
93 if (--left) {
94 cur_pixels = _mm_srli_si128(cur_pixels, 2);
95 } else if (src <= src_limit) {
96 LoadHeightPixels(src, &cur_pixels);
97 src += 7;
98 left = 7;
99 } else { // tail
100 cur_pixels = _mm_srli_si128(cur_pixels, 2);
101 cur_pixels = _mm_insert_epi16(cur_pixels, src[1], 1);
102 src += 1;
103 left = 1;
104 }
105 accum += x_add;
106 }
107 }
108 }
109 assert(accum == 0);
110 }
111
RescalerImportRowShrinkSSE2(WebPRescaler * const wrk,const uint8_t * src)112 static void RescalerImportRowShrinkSSE2(WebPRescaler* const wrk,
113 const uint8_t* src) {
114 const int x_sub = wrk->x_sub;
115 int accum = 0;
116 const __m128i zero = _mm_setzero_si128();
117 const __m128i mult0 = _mm_set1_epi16(x_sub);
118 const __m128i mult1 = _mm_set1_epi32(wrk->fx_scale);
119 const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
120 __m128i sum = zero;
121 rescaler_t* frow = wrk->frow;
122 const rescaler_t* const frow_end = wrk->frow + 4 * wrk->dst_width;
123
124 if (wrk->num_channels != 4 || wrk->x_add > (x_sub << 7)) {
125 WebPRescalerImportRowShrinkC(wrk, src);
126 return;
127 }
128 assert(!WebPRescalerInputDone(wrk));
129 assert(!wrk->x_expand);
130
131 for (; frow < frow_end; frow += 4) {
132 __m128i base = zero;
133 accum += wrk->x_add;
134 while (accum > 0) {
135 const __m128i A = _mm_cvtsi32_si128(WebPMemToUint32(src));
136 src += 4;
137 base = _mm_unpacklo_epi8(A, zero);
138 // To avoid overflow, we need: base * x_add / x_sub < 32768
139 // => x_add < x_sub << 7. That's a 1/128 reduction ratio limit.
140 sum = _mm_add_epi16(sum, base);
141 accum -= x_sub;
142 }
143 { // Emit next horizontal pixel.
144 const __m128i mult = _mm_set1_epi16(-accum);
145 const __m128i frac0 = _mm_mullo_epi16(base, mult); // 16b x 16b -> 32b
146 const __m128i frac1 = _mm_mulhi_epu16(base, mult);
147 const __m128i frac = _mm_unpacklo_epi16(frac0, frac1); // frac is 32b
148 const __m128i A0 = _mm_mullo_epi16(sum, mult0);
149 const __m128i A1 = _mm_mulhi_epu16(sum, mult0);
150 const __m128i B0 = _mm_unpacklo_epi16(A0, A1); // sum * x_sub
151 const __m128i frow_out = _mm_sub_epi32(B0, frac); // sum * x_sub - frac
152 const __m128i D0 = _mm_srli_epi64(frac, 32);
153 const __m128i D1 = _mm_mul_epu32(frac, mult1); // 32b x 16b -> 64b
154 const __m128i D2 = _mm_mul_epu32(D0, mult1);
155 const __m128i E1 = _mm_add_epi64(D1, rounder);
156 const __m128i E2 = _mm_add_epi64(D2, rounder);
157 const __m128i F1 = _mm_shuffle_epi32(E1, 1 | (3 << 2));
158 const __m128i F2 = _mm_shuffle_epi32(E2, 1 | (3 << 2));
159 const __m128i G = _mm_unpacklo_epi32(F1, F2);
160 sum = _mm_packs_epi32(G, zero);
161 _mm_storeu_si128((__m128i*)frow, frow_out);
162 }
163 }
164 assert(accum == 0);
165 }
166
167 //------------------------------------------------------------------------------
168 // Row export
169
170 // load *src as epi64, multiply by mult and store result in [out0 ... out3]
LoadDispatchAndMult(const rescaler_t * const src,const __m128i * const mult,__m128i * const out0,__m128i * const out1,__m128i * const out2,__m128i * const out3)171 static WEBP_INLINE void LoadDispatchAndMult(const rescaler_t* const src,
172 const __m128i* const mult,
173 __m128i* const out0,
174 __m128i* const out1,
175 __m128i* const out2,
176 __m128i* const out3) {
177 const __m128i A0 = _mm_loadu_si128((const __m128i*)(src + 0));
178 const __m128i A1 = _mm_loadu_si128((const __m128i*)(src + 4));
179 const __m128i A2 = _mm_srli_epi64(A0, 32);
180 const __m128i A3 = _mm_srli_epi64(A1, 32);
181 if (mult != NULL) {
182 *out0 = _mm_mul_epu32(A0, *mult);
183 *out1 = _mm_mul_epu32(A1, *mult);
184 *out2 = _mm_mul_epu32(A2, *mult);
185 *out3 = _mm_mul_epu32(A3, *mult);
186 } else {
187 *out0 = A0;
188 *out1 = A1;
189 *out2 = A2;
190 *out3 = A3;
191 }
192 }
193
ProcessRow(const __m128i * const A0,const __m128i * const A1,const __m128i * const A2,const __m128i * const A3,const __m128i * const mult,uint8_t * const dst)194 static WEBP_INLINE void ProcessRow(const __m128i* const A0,
195 const __m128i* const A1,
196 const __m128i* const A2,
197 const __m128i* const A3,
198 const __m128i* const mult,
199 uint8_t* const dst) {
200 const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
201 const __m128i mask = _mm_set_epi32(0xffffffffu, 0, 0xffffffffu, 0);
202 const __m128i B0 = _mm_mul_epu32(*A0, *mult);
203 const __m128i B1 = _mm_mul_epu32(*A1, *mult);
204 const __m128i B2 = _mm_mul_epu32(*A2, *mult);
205 const __m128i B3 = _mm_mul_epu32(*A3, *mult);
206 const __m128i C0 = _mm_add_epi64(B0, rounder);
207 const __m128i C1 = _mm_add_epi64(B1, rounder);
208 const __m128i C2 = _mm_add_epi64(B2, rounder);
209 const __m128i C3 = _mm_add_epi64(B3, rounder);
210 const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX);
211 const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX);
212 #if (WEBP_RESCALER_FIX < 32)
213 const __m128i D2 =
214 _mm_and_si128(_mm_slli_epi64(C2, 32 - WEBP_RESCALER_RFIX), mask);
215 const __m128i D3 =
216 _mm_and_si128(_mm_slli_epi64(C3, 32 - WEBP_RESCALER_RFIX), mask);
217 #else
218 const __m128i D2 = _mm_and_si128(C2, mask);
219 const __m128i D3 = _mm_and_si128(C3, mask);
220 #endif
221 const __m128i E0 = _mm_or_si128(D0, D2);
222 const __m128i E1 = _mm_or_si128(D1, D3);
223 const __m128i F = _mm_packs_epi32(E0, E1);
224 const __m128i G = _mm_packus_epi16(F, F);
225 _mm_storel_epi64((__m128i*)dst, G);
226 }
227
RescalerExportRowExpandSSE2(WebPRescaler * const wrk)228 static void RescalerExportRowExpandSSE2(WebPRescaler* const wrk) {
229 int x_out;
230 uint8_t* const dst = wrk->dst;
231 rescaler_t* const irow = wrk->irow;
232 const int x_out_max = wrk->dst_width * wrk->num_channels;
233 const rescaler_t* const frow = wrk->frow;
234 const __m128i mult = _mm_set_epi32(0, wrk->fy_scale, 0, wrk->fy_scale);
235
236 assert(!WebPRescalerOutputDone(wrk));
237 assert(wrk->y_accum <= 0 && wrk->y_sub + wrk->y_accum >= 0);
238 assert(wrk->y_expand);
239 if (wrk->y_accum == 0) {
240 for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
241 __m128i A0, A1, A2, A3;
242 LoadDispatchAndMult(frow + x_out, NULL, &A0, &A1, &A2, &A3);
243 ProcessRow(&A0, &A1, &A2, &A3, &mult, dst + x_out);
244 }
245 for (; x_out < x_out_max; ++x_out) {
246 const uint32_t J = frow[x_out];
247 const int v = (int)MULT_FIX(J, wrk->fy_scale);
248 assert(v >= 0 && v <= 255);
249 dst[x_out] = v;
250 }
251 } else {
252 const uint32_t B = WEBP_RESCALER_FRAC(-wrk->y_accum, wrk->y_sub);
253 const uint32_t A = (uint32_t)(WEBP_RESCALER_ONE - B);
254 const __m128i mA = _mm_set_epi32(0, A, 0, A);
255 const __m128i mB = _mm_set_epi32(0, B, 0, B);
256 const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
257 for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
258 __m128i A0, A1, A2, A3, B0, B1, B2, B3;
259 LoadDispatchAndMult(frow + x_out, &mA, &A0, &A1, &A2, &A3);
260 LoadDispatchAndMult(irow + x_out, &mB, &B0, &B1, &B2, &B3);
261 {
262 const __m128i C0 = _mm_add_epi64(A0, B0);
263 const __m128i C1 = _mm_add_epi64(A1, B1);
264 const __m128i C2 = _mm_add_epi64(A2, B2);
265 const __m128i C3 = _mm_add_epi64(A3, B3);
266 const __m128i D0 = _mm_add_epi64(C0, rounder);
267 const __m128i D1 = _mm_add_epi64(C1, rounder);
268 const __m128i D2 = _mm_add_epi64(C2, rounder);
269 const __m128i D3 = _mm_add_epi64(C3, rounder);
270 const __m128i E0 = _mm_srli_epi64(D0, WEBP_RESCALER_RFIX);
271 const __m128i E1 = _mm_srli_epi64(D1, WEBP_RESCALER_RFIX);
272 const __m128i E2 = _mm_srli_epi64(D2, WEBP_RESCALER_RFIX);
273 const __m128i E3 = _mm_srli_epi64(D3, WEBP_RESCALER_RFIX);
274 ProcessRow(&E0, &E1, &E2, &E3, &mult, dst + x_out);
275 }
276 }
277 for (; x_out < x_out_max; ++x_out) {
278 const uint64_t I = (uint64_t)A * frow[x_out]
279 + (uint64_t)B * irow[x_out];
280 const uint32_t J = (uint32_t)((I + ROUNDER) >> WEBP_RESCALER_RFIX);
281 const int v = (int)MULT_FIX(J, wrk->fy_scale);
282 assert(v >= 0 && v <= 255);
283 dst[x_out] = v;
284 }
285 }
286 }
287
RescalerExportRowShrinkSSE2(WebPRescaler * const wrk)288 static void RescalerExportRowShrinkSSE2(WebPRescaler* const wrk) {
289 int x_out;
290 uint8_t* const dst = wrk->dst;
291 rescaler_t* const irow = wrk->irow;
292 const int x_out_max = wrk->dst_width * wrk->num_channels;
293 const rescaler_t* const frow = wrk->frow;
294 const uint32_t yscale = wrk->fy_scale * (-wrk->y_accum);
295 assert(!WebPRescalerOutputDone(wrk));
296 assert(wrk->y_accum <= 0);
297 assert(!wrk->y_expand);
298 if (yscale) {
299 const int scale_xy = wrk->fxy_scale;
300 const __m128i mult_xy = _mm_set_epi32(0, scale_xy, 0, scale_xy);
301 const __m128i mult_y = _mm_set_epi32(0, yscale, 0, yscale);
302 const __m128i rounder = _mm_set_epi32(0, ROUNDER, 0, ROUNDER);
303 for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
304 __m128i A0, A1, A2, A3, B0, B1, B2, B3;
305 LoadDispatchAndMult(irow + x_out, NULL, &A0, &A1, &A2, &A3);
306 LoadDispatchAndMult(frow + x_out, &mult_y, &B0, &B1, &B2, &B3);
307 {
308 const __m128i C0 = _mm_add_epi64(B0, rounder);
309 const __m128i C1 = _mm_add_epi64(B1, rounder);
310 const __m128i C2 = _mm_add_epi64(B2, rounder);
311 const __m128i C3 = _mm_add_epi64(B3, rounder);
312 const __m128i D0 = _mm_srli_epi64(C0, WEBP_RESCALER_RFIX); // = frac
313 const __m128i D1 = _mm_srli_epi64(C1, WEBP_RESCALER_RFIX);
314 const __m128i D2 = _mm_srli_epi64(C2, WEBP_RESCALER_RFIX);
315 const __m128i D3 = _mm_srli_epi64(C3, WEBP_RESCALER_RFIX);
316 const __m128i E0 = _mm_sub_epi64(A0, D0); // irow[x] - frac
317 const __m128i E1 = _mm_sub_epi64(A1, D1);
318 const __m128i E2 = _mm_sub_epi64(A2, D2);
319 const __m128i E3 = _mm_sub_epi64(A3, D3);
320 const __m128i F2 = _mm_slli_epi64(D2, 32);
321 const __m128i F3 = _mm_slli_epi64(D3, 32);
322 const __m128i G0 = _mm_or_si128(D0, F2);
323 const __m128i G1 = _mm_or_si128(D1, F3);
324 _mm_storeu_si128((__m128i*)(irow + x_out + 0), G0);
325 _mm_storeu_si128((__m128i*)(irow + x_out + 4), G1);
326 ProcessRow(&E0, &E1, &E2, &E3, &mult_xy, dst + x_out);
327 }
328 }
329 for (; x_out < x_out_max; ++x_out) {
330 const uint32_t frac = (int)MULT_FIX(frow[x_out], yscale);
331 const int v = (int)MULT_FIX(irow[x_out] - frac, wrk->fxy_scale);
332 assert(v >= 0 && v <= 255);
333 dst[x_out] = v;
334 irow[x_out] = frac; // new fractional start
335 }
336 } else {
337 const uint32_t scale = wrk->fxy_scale;
338 const __m128i mult = _mm_set_epi32(0, scale, 0, scale);
339 const __m128i zero = _mm_setzero_si128();
340 for (x_out = 0; x_out + 8 <= x_out_max; x_out += 8) {
341 __m128i A0, A1, A2, A3;
342 LoadDispatchAndMult(irow + x_out, NULL, &A0, &A1, &A2, &A3);
343 _mm_storeu_si128((__m128i*)(irow + x_out + 0), zero);
344 _mm_storeu_si128((__m128i*)(irow + x_out + 4), zero);
345 ProcessRow(&A0, &A1, &A2, &A3, &mult, dst + x_out);
346 }
347 for (; x_out < x_out_max; ++x_out) {
348 const int v = (int)MULT_FIX(irow[x_out], scale);
349 assert(v >= 0 && v <= 255);
350 dst[x_out] = v;
351 irow[x_out] = 0;
352 }
353 }
354 }
355
356 #undef MULT_FIX
357 #undef ROUNDER
358
359 //------------------------------------------------------------------------------
360
361 extern void WebPRescalerDspInitSSE2(void);
362
WebPRescalerDspInitSSE2(void)363 WEBP_TSAN_IGNORE_FUNCTION void WebPRescalerDspInitSSE2(void) {
364 WebPRescalerImportRowExpand = RescalerImportRowExpandSSE2;
365 WebPRescalerImportRowShrink = RescalerImportRowShrinkSSE2;
366 WebPRescalerExportRowExpand = RescalerExportRowExpandSSE2;
367 WebPRescalerExportRowShrink = RescalerExportRowShrinkSSE2;
368 }
369
370 #else // !WEBP_USE_SSE2
371
372 WEBP_DSP_INIT_STUB(WebPRescalerDspInitSSE2)
373
374 #endif // WEBP_USE_SSE2
375