• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2019, Alliance for Open Media. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 #include <assert.h>
13 #include <stdlib.h>
14 
15 #include "config/aom_config.h"
16 #include "config/aom_dsp_rtcd.h"
17 #include "aom/aom_integer.h"
18 #include "aom_dsp/arm/mem_neon.h"
19 #include "aom_dsp/arm/sum_neon.h"
20 #include "aom_dsp/arm/transpose_neon.h"
21 #include "aom_ports/mem.h"
22 
aom_avg_4x4_neon(const uint8_t * p,int stride)23 unsigned int aom_avg_4x4_neon(const uint8_t *p, int stride) {
24   const uint8x8_t s0 = load_unaligned_u8(p, stride);
25   const uint8x8_t s1 = load_unaligned_u8(p + 2 * stride, stride);
26 
27   const uint32_t sum = horizontal_add_u16x8(vaddl_u8(s0, s1));
28   return (sum + (1 << 3)) >> 4;
29 }
30 
aom_avg_8x8_neon(const uint8_t * p,int stride)31 unsigned int aom_avg_8x8_neon(const uint8_t *p, int stride) {
32   uint8x8_t s0 = vld1_u8(p);
33   p += stride;
34   uint8x8_t s1 = vld1_u8(p);
35   p += stride;
36   uint16x8_t acc = vaddl_u8(s0, s1);
37 
38   int i = 0;
39   do {
40     const uint8x8_t si = vld1_u8(p);
41     p += stride;
42     acc = vaddw_u8(acc, si);
43   } while (++i < 6);
44 
45   const uint32_t sum = horizontal_add_u16x8(acc);
46   return (sum + (1 << 5)) >> 6;
47 }
48 
aom_avg_8x8_quad_neon(const uint8_t * s,int p,int x16_idx,int y16_idx,int * avg)49 void aom_avg_8x8_quad_neon(const uint8_t *s, int p, int x16_idx, int y16_idx,
50                            int *avg) {
51   avg[0] = aom_avg_8x8_neon(s + y16_idx * p + x16_idx, p);
52   avg[1] = aom_avg_8x8_neon(s + y16_idx * p + (x16_idx + 8), p);
53   avg[2] = aom_avg_8x8_neon(s + (y16_idx + 8) * p + x16_idx, p);
54   avg[3] = aom_avg_8x8_neon(s + (y16_idx + 8) * p + (x16_idx + 8), p);
55 }
56 
aom_satd_lp_neon(const int16_t * coeff,int length)57 int aom_satd_lp_neon(const int16_t *coeff, int length) {
58   int16x8_t s0 = vld1q_s16(coeff);
59   int16x8_t s1 = vld1q_s16(coeff + 8);
60 
61   int16x8_t abs0 = vabsq_s16(s0);
62   int16x8_t abs1 = vabsq_s16(s1);
63 
64   int32x4_t acc0 = vpaddlq_s16(abs0);
65   int32x4_t acc1 = vpaddlq_s16(abs1);
66 
67   length -= 16;
68   coeff += 16;
69 
70   while (length != 0) {
71     s0 = vld1q_s16(coeff);
72     s1 = vld1q_s16(coeff + 8);
73 
74     abs0 = vabsq_s16(s0);
75     abs1 = vabsq_s16(s1);
76 
77     acc0 = vpadalq_s16(acc0, abs0);
78     acc1 = vpadalq_s16(acc1, abs1);
79 
80     length -= 16;
81     coeff += 16;
82   }
83 
84   int32x4_t accum = vaddq_s32(acc0, acc1);
85   return horizontal_add_s32x4(accum);
86 }
87 
aom_int_pro_row_neon(int16_t * hbuf,const uint8_t * ref,const int ref_stride,const int width,const int height,int norm_factor)88 void aom_int_pro_row_neon(int16_t *hbuf, const uint8_t *ref,
89                           const int ref_stride, const int width,
90                           const int height, int norm_factor) {
91   assert(width % 16 == 0);
92   assert(height % 4 == 0);
93 
94   const int16x8_t neg_norm_factor = vdupq_n_s16(-norm_factor);
95   uint16x8_t sum_lo[2], sum_hi[2];
96 
97   int w = 0;
98   do {
99     const uint8_t *r = ref + w;
100     uint8x16_t r0 = vld1q_u8(r + 0 * ref_stride);
101     uint8x16_t r1 = vld1q_u8(r + 1 * ref_stride);
102     uint8x16_t r2 = vld1q_u8(r + 2 * ref_stride);
103     uint8x16_t r3 = vld1q_u8(r + 3 * ref_stride);
104 
105     sum_lo[0] = vaddl_u8(vget_low_u8(r0), vget_low_u8(r1));
106     sum_hi[0] = vaddl_u8(vget_high_u8(r0), vget_high_u8(r1));
107     sum_lo[1] = vaddl_u8(vget_low_u8(r2), vget_low_u8(r3));
108     sum_hi[1] = vaddl_u8(vget_high_u8(r2), vget_high_u8(r3));
109 
110     r += 4 * ref_stride;
111 
112     for (int h = height - 4; h != 0; h -= 4) {
113       r0 = vld1q_u8(r + 0 * ref_stride);
114       r1 = vld1q_u8(r + 1 * ref_stride);
115       r2 = vld1q_u8(r + 2 * ref_stride);
116       r3 = vld1q_u8(r + 3 * ref_stride);
117 
118       uint16x8_t tmp0_lo = vaddl_u8(vget_low_u8(r0), vget_low_u8(r1));
119       uint16x8_t tmp0_hi = vaddl_u8(vget_high_u8(r0), vget_high_u8(r1));
120       uint16x8_t tmp1_lo = vaddl_u8(vget_low_u8(r2), vget_low_u8(r3));
121       uint16x8_t tmp1_hi = vaddl_u8(vget_high_u8(r2), vget_high_u8(r3));
122 
123       sum_lo[0] = vaddq_u16(sum_lo[0], tmp0_lo);
124       sum_hi[0] = vaddq_u16(sum_hi[0], tmp0_hi);
125       sum_lo[1] = vaddq_u16(sum_lo[1], tmp1_lo);
126       sum_hi[1] = vaddq_u16(sum_hi[1], tmp1_hi);
127 
128       r += 4 * ref_stride;
129     }
130 
131     sum_lo[0] = vaddq_u16(sum_lo[0], sum_lo[1]);
132     sum_hi[0] = vaddq_u16(sum_hi[0], sum_hi[1]);
133 
134     const int16x8_t avg0 =
135         vshlq_s16(vreinterpretq_s16_u16(sum_lo[0]), neg_norm_factor);
136     const int16x8_t avg1 =
137         vshlq_s16(vreinterpretq_s16_u16(sum_hi[0]), neg_norm_factor);
138 
139     vst1q_s16(hbuf + w, avg0);
140     vst1q_s16(hbuf + w + 8, avg1);
141     w += 16;
142   } while (w < width);
143 }
144 
aom_int_pro_col_neon(int16_t * vbuf,const uint8_t * ref,const int ref_stride,const int width,const int height,int norm_factor)145 void aom_int_pro_col_neon(int16_t *vbuf, const uint8_t *ref,
146                           const int ref_stride, const int width,
147                           const int height, int norm_factor) {
148   assert(width % 16 == 0);
149   assert(height % 4 == 0);
150 
151   const int16x4_t neg_norm_factor = vdup_n_s16(-norm_factor);
152   uint16x8_t sum[4];
153 
154   int h = 0;
155   do {
156     sum[0] = vpaddlq_u8(vld1q_u8(ref + 0 * ref_stride));
157     sum[1] = vpaddlq_u8(vld1q_u8(ref + 1 * ref_stride));
158     sum[2] = vpaddlq_u8(vld1q_u8(ref + 2 * ref_stride));
159     sum[3] = vpaddlq_u8(vld1q_u8(ref + 3 * ref_stride));
160 
161     for (int w = 16; w < width; w += 16) {
162       sum[0] = vpadalq_u8(sum[0], vld1q_u8(ref + 0 * ref_stride + w));
163       sum[1] = vpadalq_u8(sum[1], vld1q_u8(ref + 1 * ref_stride + w));
164       sum[2] = vpadalq_u8(sum[2], vld1q_u8(ref + 2 * ref_stride + w));
165       sum[3] = vpadalq_u8(sum[3], vld1q_u8(ref + 3 * ref_stride + w));
166     }
167 
168     uint16x4_t sum_4d = vmovn_u32(horizontal_add_4d_u16x8(sum));
169     int16x4_t avg = vshl_s16(vreinterpret_s16_u16(sum_4d), neg_norm_factor);
170     vst1_s16(vbuf + h, avg);
171 
172     ref += 4 * ref_stride;
173     h += 4;
174   } while (h < height);
175 }
176 
177 // coeff: 20 bits, dynamic range [-524287, 524287].
178 // length: value range {16, 32, 64, 128, 256, 512, 1024}.
aom_satd_neon(const tran_low_t * coeff,int length)179 int aom_satd_neon(const tran_low_t *coeff, int length) {
180   const int32x4_t zero = vdupq_n_s32(0);
181 
182   int32x4_t s0 = vld1q_s32(&coeff[0]);
183   int32x4_t s1 = vld1q_s32(&coeff[4]);
184   int32x4_t s2 = vld1q_s32(&coeff[8]);
185   int32x4_t s3 = vld1q_s32(&coeff[12]);
186 
187   int32x4_t accum0 = vabsq_s32(s0);
188   int32x4_t accum1 = vabsq_s32(s2);
189   accum0 = vabaq_s32(accum0, s1, zero);
190   accum1 = vabaq_s32(accum1, s3, zero);
191 
192   length -= 16;
193   coeff += 16;
194 
195   while (length != 0) {
196     s0 = vld1q_s32(&coeff[0]);
197     s1 = vld1q_s32(&coeff[4]);
198     s2 = vld1q_s32(&coeff[8]);
199     s3 = vld1q_s32(&coeff[12]);
200 
201     accum0 = vabaq_s32(accum0, s0, zero);
202     accum1 = vabaq_s32(accum1, s1, zero);
203     accum0 = vabaq_s32(accum0, s2, zero);
204     accum1 = vabaq_s32(accum1, s3, zero);
205 
206     length -= 16;
207     coeff += 16;
208   }
209 
210   // satd: 30 bits, dynamic range [-524287 * 1024, 524287 * 1024]
211   return horizontal_add_s32x4(vaddq_s32(accum0, accum1));
212 }
213 
aom_vector_var_neon(const int16_t * ref,const int16_t * src,int bwl)214 int aom_vector_var_neon(const int16_t *ref, const int16_t *src, int bwl) {
215   assert(bwl >= 2 && bwl <= 5);
216   int width = 4 << bwl;
217 
218   int16x8_t r = vld1q_s16(ref);
219   int16x8_t s = vld1q_s16(src);
220 
221   // diff: dynamic range [-510, 510] 10 (signed) bits.
222   int16x8_t diff = vsubq_s16(r, s);
223   // v_mean: dynamic range 16 * diff -> [-8160, 8160], 14 (signed) bits.
224   int16x8_t v_mean = diff;
225   // v_sse: dynamic range 2 * 16 * diff^2 -> [0, 8,323,200], 24 (signed) bits.
226   int32x4_t v_sse[2];
227   v_sse[0] = vmull_s16(vget_low_s16(diff), vget_low_s16(diff));
228   v_sse[1] = vmull_s16(vget_high_s16(diff), vget_high_s16(diff));
229 
230   ref += 8;
231   src += 8;
232   width -= 8;
233 
234   do {
235     r = vld1q_s16(ref);
236     s = vld1q_s16(src);
237 
238     diff = vsubq_s16(r, s);
239     v_mean = vaddq_s16(v_mean, diff);
240 
241     v_sse[0] = vmlal_s16(v_sse[0], vget_low_s16(diff), vget_low_s16(diff));
242     v_sse[1] = vmlal_s16(v_sse[1], vget_high_s16(diff), vget_high_s16(diff));
243 
244     ref += 8;
245     src += 8;
246     width -= 8;
247   } while (width != 0);
248 
249   // Dynamic range [0, 65280], 16 (unsigned) bits.
250   const uint32_t mean_abs = abs(horizontal_add_s16x8(v_mean));
251   const int32_t sse = horizontal_add_s32x4(vaddq_s32(v_sse[0], v_sse[1]));
252 
253   // (mean_abs * mean_abs): dynamic range 32 (unsigned) bits.
254   return sse - ((mean_abs * mean_abs) >> (bwl + 2));
255 }
256 
aom_minmax_8x8_neon(const uint8_t * a,int a_stride,const uint8_t * b,int b_stride,int * min,int * max)257 void aom_minmax_8x8_neon(const uint8_t *a, int a_stride, const uint8_t *b,
258                          int b_stride, int *min, int *max) {
259   // Load and concatenate.
260   const uint8x16_t a01 = load_u8_8x2(a + 0 * a_stride, a_stride);
261   const uint8x16_t a23 = load_u8_8x2(a + 2 * a_stride, a_stride);
262   const uint8x16_t a45 = load_u8_8x2(a + 4 * a_stride, a_stride);
263   const uint8x16_t a67 = load_u8_8x2(a + 6 * a_stride, a_stride);
264 
265   const uint8x16_t b01 = load_u8_8x2(b + 0 * b_stride, b_stride);
266   const uint8x16_t b23 = load_u8_8x2(b + 2 * b_stride, b_stride);
267   const uint8x16_t b45 = load_u8_8x2(b + 4 * b_stride, b_stride);
268   const uint8x16_t b67 = load_u8_8x2(b + 6 * b_stride, b_stride);
269 
270   // Absolute difference.
271   const uint8x16_t ab01_diff = vabdq_u8(a01, b01);
272   const uint8x16_t ab23_diff = vabdq_u8(a23, b23);
273   const uint8x16_t ab45_diff = vabdq_u8(a45, b45);
274   const uint8x16_t ab67_diff = vabdq_u8(a67, b67);
275 
276   // Max values between the Q vectors.
277   const uint8x16_t ab0123_max = vmaxq_u8(ab01_diff, ab23_diff);
278   const uint8x16_t ab4567_max = vmaxq_u8(ab45_diff, ab67_diff);
279   const uint8x16_t ab0123_min = vminq_u8(ab01_diff, ab23_diff);
280   const uint8x16_t ab4567_min = vminq_u8(ab45_diff, ab67_diff);
281 
282   const uint8x16_t ab07_max = vmaxq_u8(ab0123_max, ab4567_max);
283   const uint8x16_t ab07_min = vminq_u8(ab0123_min, ab4567_min);
284 
285 #if AOM_ARCH_AARCH64
286   *min = *max = 0;  // Clear high bits
287   *((uint8_t *)max) = vmaxvq_u8(ab07_max);
288   *((uint8_t *)min) = vminvq_u8(ab07_min);
289 #else
290   // Split into 64-bit vectors and execute pairwise min/max.
291   uint8x8_t ab_max = vmax_u8(vget_high_u8(ab07_max), vget_low_u8(ab07_max));
292   uint8x8_t ab_min = vmin_u8(vget_high_u8(ab07_min), vget_low_u8(ab07_min));
293 
294   // Enough runs of vpmax/min propagate the max/min values to every position.
295   ab_max = vpmax_u8(ab_max, ab_max);
296   ab_min = vpmin_u8(ab_min, ab_min);
297 
298   ab_max = vpmax_u8(ab_max, ab_max);
299   ab_min = vpmin_u8(ab_min, ab_min);
300 
301   ab_max = vpmax_u8(ab_max, ab_max);
302   ab_min = vpmin_u8(ab_min, ab_min);
303 
304   *min = *max = 0;  // Clear high bits
305   // Store directly to avoid costly neon->gpr transfer.
306   vst1_lane_u8((uint8_t *)max, ab_max, 0);
307   vst1_lane_u8((uint8_t *)min, ab_min, 0);
308 #endif
309 }
310