1 /*
2 * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <assert.h>
12
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/ppc/types_vsx.h"
15
16 extern const int16_t vpx_rv[];
17
18 static const uint8x16_t load_merge = { 0x00, 0x02, 0x04, 0x06, 0x08, 0x0A,
19 0x0C, 0x0E, 0x18, 0x19, 0x1A, 0x1B,
20 0x1C, 0x1D, 0x1E, 0x1F };
21
22 static const uint8x16_t st8_perm = { 0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
23 0x06, 0x07, 0x18, 0x19, 0x1A, 0x1B,
24 0x1C, 0x1D, 0x1E, 0x1F };
25
apply_filter(uint8x16_t ctx[4],uint8x16_t v,uint8x16_t filter)26 static INLINE uint8x16_t apply_filter(uint8x16_t ctx[4], uint8x16_t v,
27 uint8x16_t filter) {
28 const uint8x16_t k1 = vec_avg(ctx[0], ctx[1]);
29 const uint8x16_t k2 = vec_avg(ctx[3], ctx[2]);
30 const uint8x16_t k3 = vec_avg(k1, k2);
31 const uint8x16_t f_a = vec_max(vec_absd(v, ctx[0]), vec_absd(v, ctx[1]));
32 const uint8x16_t f_b = vec_max(vec_absd(v, ctx[2]), vec_absd(v, ctx[3]));
33 const bool8x16_t mask = vec_cmplt(vec_max(f_a, f_b), filter);
34 return vec_sel(v, vec_avg(k3, v), mask);
35 }
36
vert_ctx(uint8x16_t ctx[4],int col,uint8_t * src,int stride)37 static INLINE void vert_ctx(uint8x16_t ctx[4], int col, uint8_t *src,
38 int stride) {
39 ctx[0] = vec_vsx_ld(col - 2 * stride, src);
40 ctx[1] = vec_vsx_ld(col - stride, src);
41 ctx[2] = vec_vsx_ld(col + stride, src);
42 ctx[3] = vec_vsx_ld(col + 2 * stride, src);
43 }
44
horz_ctx(uint8x16_t ctx[4],uint8x16_t left_ctx,uint8x16_t v,uint8x16_t right_ctx)45 static INLINE void horz_ctx(uint8x16_t ctx[4], uint8x16_t left_ctx,
46 uint8x16_t v, uint8x16_t right_ctx) {
47 static const uint8x16_t l2_perm = { 0x0E, 0x0F, 0x10, 0x11, 0x12, 0x13,
48 0x14, 0x15, 0x16, 0x17, 0x18, 0x19,
49 0x1A, 0x1B, 0x1C, 0x1D };
50
51 static const uint8x16_t l1_perm = { 0x0F, 0x10, 0x11, 0x12, 0x13, 0x14,
52 0x15, 0x16, 0x17, 0x18, 0x19, 0x1A,
53 0x1B, 0x1C, 0x1D, 0x1E };
54
55 static const uint8x16_t r1_perm = { 0x01, 0x02, 0x03, 0x04, 0x05, 0x06,
56 0x07, 0x08, 0x09, 0x0A, 0x0B, 0x0C,
57 0x0D, 0x0E, 0x0F, 0x10 };
58
59 static const uint8x16_t r2_perm = { 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
60 0x08, 0x09, 0x0A, 0x0B, 0x0C, 0x0D,
61 0x0E, 0x0F, 0x10, 0x11 };
62 ctx[0] = vec_perm(left_ctx, v, l2_perm);
63 ctx[1] = vec_perm(left_ctx, v, l1_perm);
64 ctx[2] = vec_perm(v, right_ctx, r1_perm);
65 ctx[3] = vec_perm(v, right_ctx, r2_perm);
66 }
vpx_post_proc_down_and_across_mb_row_vsx(unsigned char * src_ptr,unsigned char * dst_ptr,int src_pixels_per_line,int dst_pixels_per_line,int cols,unsigned char * f,int size)67 void vpx_post_proc_down_and_across_mb_row_vsx(unsigned char *src_ptr,
68 unsigned char *dst_ptr,
69 int src_pixels_per_line,
70 int dst_pixels_per_line, int cols,
71 unsigned char *f, int size) {
72 int row, col;
73 uint8x16_t ctx[4], out, v, left_ctx;
74
75 for (row = 0; row < size; row++) {
76 for (col = 0; col < cols - 8; col += 16) {
77 const uint8x16_t filter = vec_vsx_ld(col, f);
78 v = vec_vsx_ld(col, src_ptr);
79 vert_ctx(ctx, col, src_ptr, src_pixels_per_line);
80 vec_vsx_st(apply_filter(ctx, v, filter), col, dst_ptr);
81 }
82
83 if (col != cols) {
84 const uint8x16_t filter = vec_vsx_ld(col, f);
85 v = vec_vsx_ld(col, src_ptr);
86 vert_ctx(ctx, col, src_ptr, src_pixels_per_line);
87 out = apply_filter(ctx, v, filter);
88 vec_vsx_st(vec_perm(out, v, st8_perm), col, dst_ptr);
89 }
90
91 /* now post_proc_across */
92 left_ctx = vec_splats(dst_ptr[0]);
93 v = vec_vsx_ld(0, dst_ptr);
94 for (col = 0; col < cols - 8; col += 16) {
95 const uint8x16_t filter = vec_vsx_ld(col, f);
96 const uint8x16_t right_ctx = (col + 16 == cols)
97 ? vec_splats(dst_ptr[cols - 1])
98 : vec_vsx_ld(col, dst_ptr + 16);
99 horz_ctx(ctx, left_ctx, v, right_ctx);
100 vec_vsx_st(apply_filter(ctx, v, filter), col, dst_ptr);
101 left_ctx = v;
102 v = right_ctx;
103 }
104
105 if (col != cols) {
106 const uint8x16_t filter = vec_vsx_ld(col, f);
107 const uint8x16_t right_ctx = vec_splats(dst_ptr[cols - 1]);
108 horz_ctx(ctx, left_ctx, v, right_ctx);
109 out = apply_filter(ctx, v, filter);
110 vec_vsx_st(vec_perm(out, v, st8_perm), col, dst_ptr);
111 }
112
113 src_ptr += src_pixels_per_line;
114 dst_ptr += dst_pixels_per_line;
115 }
116 }
117
118 // C: s[c + 7]
next7l_s16(uint8x16_t c)119 static INLINE int16x8_t next7l_s16(uint8x16_t c) {
120 static const uint8x16_t next7_perm = {
121 0x07, 0x10, 0x08, 0x11, 0x09, 0x12, 0x0A, 0x13,
122 0x0B, 0x14, 0x0C, 0x15, 0x0D, 0x16, 0x0E, 0x17,
123 };
124 return (int16x8_t)vec_perm(c, vec_zeros_u8, next7_perm);
125 }
126
127 // Slide across window and add.
slide_sum_s16(int16x8_t x)128 static INLINE int16x8_t slide_sum_s16(int16x8_t x) {
129 // x = A B C D E F G H
130 //
131 // 0 A B C D E F G
132 const int16x8_t sum1 = vec_add(x, vec_slo(x, vec_splats((int8_t)(2 << 3))));
133 // 0 0 A B C D E F
134 const int16x8_t sum2 = vec_add(vec_slo(x, vec_splats((int8_t)(4 << 3))),
135 // 0 0 0 A B C D E
136 vec_slo(x, vec_splats((int8_t)(6 << 3))));
137 // 0 0 0 0 A B C D
138 const int16x8_t sum3 = vec_add(vec_slo(x, vec_splats((int8_t)(8 << 3))),
139 // 0 0 0 0 0 A B C
140 vec_slo(x, vec_splats((int8_t)(10 << 3))));
141 // 0 0 0 0 0 0 A B
142 const int16x8_t sum4 = vec_add(vec_slo(x, vec_splats((int8_t)(12 << 3))),
143 // 0 0 0 0 0 0 0 A
144 vec_slo(x, vec_splats((int8_t)(14 << 3))));
145 return vec_add(vec_add(sum1, sum2), vec_add(sum3, sum4));
146 }
147
148 // Slide across window and add.
slide_sumsq_s32(int32x4_t xsq_even,int32x4_t xsq_odd)149 static INLINE int32x4_t slide_sumsq_s32(int32x4_t xsq_even, int32x4_t xsq_odd) {
150 // 0 A C E
151 // + 0 B D F
152 int32x4_t sumsq_1 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(4 << 3))),
153 vec_slo(xsq_odd, vec_splats((int8_t)(4 << 3))));
154 // 0 0 A C
155 // + 0 0 B D
156 int32x4_t sumsq_2 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(8 << 3))),
157 vec_slo(xsq_odd, vec_splats((int8_t)(8 << 3))));
158 // 0 0 0 A
159 // + 0 0 0 B
160 int32x4_t sumsq_3 = vec_add(vec_slo(xsq_even, vec_splats((int8_t)(12 << 3))),
161 vec_slo(xsq_odd, vec_splats((int8_t)(12 << 3))));
162 sumsq_1 = vec_add(sumsq_1, xsq_even);
163 sumsq_2 = vec_add(sumsq_2, sumsq_3);
164 return vec_add(sumsq_1, sumsq_2);
165 }
166
167 // C: (b + sum + val) >> 4
filter_s16(int16x8_t b,int16x8_t sum,int16x8_t val)168 static INLINE int16x8_t filter_s16(int16x8_t b, int16x8_t sum, int16x8_t val) {
169 return vec_sra(vec_add(vec_add(b, sum), val), vec_splats((uint16_t)4));
170 }
171
172 // C: sumsq * 15 - sum * sum
mask_s16(int32x4_t sumsq_even,int32x4_t sumsq_odd,int16x8_t sum,int32x4_t lim)173 static INLINE bool16x8_t mask_s16(int32x4_t sumsq_even, int32x4_t sumsq_odd,
174 int16x8_t sum, int32x4_t lim) {
175 static const uint8x16_t mask_merge = { 0x00, 0x01, 0x10, 0x11, 0x04, 0x05,
176 0x14, 0x15, 0x08, 0x09, 0x18, 0x19,
177 0x0C, 0x0D, 0x1C, 0x1D };
178 const int32x4_t sumsq_odd_scaled =
179 vec_mul(sumsq_odd, vec_splats((int32_t)15));
180 const int32x4_t sumsq_even_scaled =
181 vec_mul(sumsq_even, vec_splats((int32_t)15));
182 const int32x4_t thres_odd = vec_sub(sumsq_odd_scaled, vec_mulo(sum, sum));
183 const int32x4_t thres_even = vec_sub(sumsq_even_scaled, vec_mule(sum, sum));
184
185 const bool32x4_t mask_odd = vec_cmplt(thres_odd, lim);
186 const bool32x4_t mask_even = vec_cmplt(thres_even, lim);
187 return vec_perm((bool16x8_t)mask_even, (bool16x8_t)mask_odd, mask_merge);
188 }
189
vpx_mbpost_proc_across_ip_vsx(unsigned char * src,int pitch,int rows,int cols,int flimit)190 void vpx_mbpost_proc_across_ip_vsx(unsigned char *src, int pitch, int rows,
191 int cols, int flimit) {
192 int row, col;
193 const int32x4_t lim = vec_splats(flimit);
194
195 // 8 columns are processed at a time.
196 assert(cols % 8 == 0);
197
198 for (row = 0; row < rows; row++) {
199 // The sum is signed and requires at most 13 bits.
200 // (8 bits + sign) * 15 (4 bits)
201 int16x8_t sum;
202 // The sum of squares requires at most 20 bits.
203 // (16 bits + sign) * 15 (4 bits)
204 int32x4_t sumsq_even, sumsq_odd;
205
206 // Fill left context with first col.
207 int16x8_t left_ctx = vec_splats((int16_t)src[0]);
208 int16_t s = src[0] * 9;
209 int32_t ssq = src[0] * src[0] * 9 + 16;
210
211 // Fill the next 6 columns of the sliding window with cols 2 to 7.
212 for (col = 1; col <= 6; ++col) {
213 s += src[col];
214 ssq += src[col] * src[col];
215 }
216 // Set this sum to every element in the window.
217 sum = vec_splats(s);
218 sumsq_even = vec_splats(ssq);
219 sumsq_odd = vec_splats(ssq);
220
221 for (col = 0; col < cols; col += 8) {
222 bool16x8_t mask;
223 int16x8_t filtered, masked;
224 uint8x16_t out;
225
226 const uint8x16_t val = vec_vsx_ld(0, src + col);
227 const int16x8_t val_high = unpack_to_s16_h(val);
228
229 // C: s[c + 7]
230 const int16x8_t right_ctx = (col + 8 == cols)
231 ? vec_splats((int16_t)src[col + 7])
232 : next7l_s16(val);
233
234 // C: x = s[c + 7] - s[c - 8];
235 const int16x8_t x = vec_sub(right_ctx, left_ctx);
236 const int32x4_t xsq_even =
237 vec_sub(vec_mule(right_ctx, right_ctx), vec_mule(left_ctx, left_ctx));
238 const int32x4_t xsq_odd =
239 vec_sub(vec_mulo(right_ctx, right_ctx), vec_mulo(left_ctx, left_ctx));
240
241 const int32x4_t sumsq_tmp = slide_sumsq_s32(xsq_even, xsq_odd);
242 // A C E G
243 // 0 B D F
244 // 0 A C E
245 // 0 0 B D
246 // 0 0 A C
247 // 0 0 0 B
248 // 0 0 0 A
249 sumsq_even = vec_add(sumsq_even, sumsq_tmp);
250 // B D F G
251 // A C E G
252 // 0 B D F
253 // 0 A C E
254 // 0 0 B D
255 // 0 0 A C
256 // 0 0 0 B
257 // 0 0 0 A
258 sumsq_odd = vec_add(sumsq_odd, vec_add(sumsq_tmp, xsq_odd));
259
260 sum = vec_add(sum, slide_sum_s16(x));
261
262 // C: (8 + sum + s[c]) >> 4
263 filtered = filter_s16(vec_splats((int16_t)8), sum, val_high);
264 // C: sumsq * 15 - sum * sum
265 mask = mask_s16(sumsq_even, sumsq_odd, sum, lim);
266 masked = vec_sel(val_high, filtered, mask);
267
268 out = vec_perm((uint8x16_t)masked, vec_vsx_ld(0, src + col), load_merge);
269 vec_vsx_st(out, 0, src + col);
270
271 // Update window sum and square sum
272 sum = vec_splat(sum, 7);
273 sumsq_even = vec_splat(sumsq_odd, 3);
274 sumsq_odd = vec_splat(sumsq_odd, 3);
275
276 // C: s[c - 8] (for next iteration)
277 left_ctx = val_high;
278 }
279 src += pitch;
280 }
281 }
282
vpx_mbpost_proc_down_vsx(uint8_t * dst,int pitch,int rows,int cols,int flimit)283 void vpx_mbpost_proc_down_vsx(uint8_t *dst, int pitch, int rows, int cols,
284 int flimit) {
285 int col, row, i;
286 int16x8_t window[16];
287 const int32x4_t lim = vec_splats(flimit);
288
289 // 8 columns are processed at a time.
290 assert(cols % 8 == 0);
291 // If rows is less than 8 the bottom border extension fails.
292 assert(rows >= 8);
293
294 for (col = 0; col < cols; col += 8) {
295 // The sum is signed and requires at most 13 bits.
296 // (8 bits + sign) * 15 (4 bits)
297 int16x8_t r1, sum;
298 // The sum of squares requires at most 20 bits.
299 // (16 bits + sign) * 15 (4 bits)
300 int32x4_t sumsq_even, sumsq_odd;
301
302 r1 = unpack_to_s16_h(vec_vsx_ld(0, dst));
303 // Fill sliding window with first row.
304 for (i = 0; i <= 8; i++) {
305 window[i] = r1;
306 }
307 // First 9 rows of the sliding window are the same.
308 // sum = r1 * 9
309 sum = vec_mladd(r1, vec_splats((int16_t)9), vec_zeros_s16);
310
311 // sumsq = r1 * r1 * 9
312 sumsq_even = vec_mule(sum, r1);
313 sumsq_odd = vec_mulo(sum, r1);
314
315 // Fill the next 6 rows of the sliding window with rows 2 to 7.
316 for (i = 1; i <= 6; ++i) {
317 const int16x8_t next_row = unpack_to_s16_h(vec_vsx_ld(i * pitch, dst));
318 window[i + 8] = next_row;
319 sum = vec_add(sum, next_row);
320 sumsq_odd = vec_add(sumsq_odd, vec_mulo(next_row, next_row));
321 sumsq_even = vec_add(sumsq_even, vec_mule(next_row, next_row));
322 }
323
324 for (row = 0; row < rows; row++) {
325 int32x4_t d15_even, d15_odd, d0_even, d0_odd;
326 bool16x8_t mask;
327 int16x8_t filtered, masked;
328 uint8x16_t out;
329
330 const int16x8_t rv = vec_vsx_ld(0, vpx_rv + (row & 127));
331
332 // Move the sliding window
333 if (row + 7 < rows) {
334 window[15] = unpack_to_s16_h(vec_vsx_ld((row + 7) * pitch, dst));
335 } else {
336 window[15] = window[14];
337 }
338
339 // C: sum += s[7 * pitch] - s[-8 * pitch];
340 sum = vec_add(sum, vec_sub(window[15], window[0]));
341
342 // C: sumsq += s[7 * pitch] * s[7 * pitch] - s[-8 * pitch] * s[-8 *
343 // pitch];
344 // Optimization Note: Caching a squared-window for odd and even is
345 // slower than just repeating the multiplies.
346 d15_odd = vec_mulo(window[15], window[15]);
347 d15_even = vec_mule(window[15], window[15]);
348 d0_odd = vec_mulo(window[0], window[0]);
349 d0_even = vec_mule(window[0], window[0]);
350 sumsq_odd = vec_add(sumsq_odd, vec_sub(d15_odd, d0_odd));
351 sumsq_even = vec_add(sumsq_even, vec_sub(d15_even, d0_even));
352
353 // C: (vpx_rv[(r & 127) + (c & 7)] + sum + s[0]) >> 4
354 filtered = filter_s16(rv, sum, window[8]);
355
356 // C: sumsq * 15 - sum * sum
357 mask = mask_s16(sumsq_even, sumsq_odd, sum, lim);
358 masked = vec_sel(window[8], filtered, mask);
359
360 // TODO(ltrudeau) If cols % 16 == 0, we could just process 16 per
361 // iteration
362 out = vec_perm((uint8x16_t)masked, vec_vsx_ld(0, dst + row * pitch),
363 load_merge);
364 vec_vsx_st(out, 0, dst + row * pitch);
365
366 // Optimization Note: Turns out that the following loop is faster than
367 // using pointers to manage the sliding window.
368 for (i = 1; i < 16; i++) {
369 window[i - 1] = window[i];
370 }
371 }
372 dst += 8;
373 }
374 }
375