• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2019, Alliance for Open Media. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 
13 #include "config/aom_dsp_rtcd.h"
14 #include "aom/aom_integer.h"
15 #include "aom_dsp/arm/mem_neon.h"
16 #include "aom_dsp/arm/transpose_neon.h"
17 
hadamard_4x4_one_pass(int16x4_t * a0,int16x4_t * a1,int16x4_t * a2,int16x4_t * a3)18 static INLINE void hadamard_4x4_one_pass(int16x4_t *a0, int16x4_t *a1,
19                                          int16x4_t *a2, int16x4_t *a3) {
20   const int16x4_t b0 = vhadd_s16(*a0, *a1);
21   const int16x4_t b1 = vhsub_s16(*a0, *a1);
22   const int16x4_t b2 = vhadd_s16(*a2, *a3);
23   const int16x4_t b3 = vhsub_s16(*a2, *a3);
24 
25   *a0 = vadd_s16(b0, b2);
26   *a1 = vadd_s16(b1, b3);
27   *a2 = vsub_s16(b0, b2);
28   *a3 = vsub_s16(b1, b3);
29 }
30 
aom_hadamard_4x4_neon(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)31 void aom_hadamard_4x4_neon(const int16_t *src_diff, ptrdiff_t src_stride,
32                            tran_low_t *coeff) {
33   int16x4_t a0 = vld1_s16(src_diff);
34   int16x4_t a1 = vld1_s16(src_diff + src_stride);
35   int16x4_t a2 = vld1_s16(src_diff + 2 * src_stride);
36   int16x4_t a3 = vld1_s16(src_diff + 3 * src_stride);
37 
38   hadamard_4x4_one_pass(&a0, &a1, &a2, &a3);
39 
40   transpose_elems_inplace_s16_4x4(&a0, &a1, &a2, &a3);
41 
42   hadamard_4x4_one_pass(&a0, &a1, &a2, &a3);
43 
44   store_s16_to_tran_low(coeff, a0);
45   store_s16_to_tran_low(coeff + 4, a1);
46   store_s16_to_tran_low(coeff + 8, a2);
47   store_s16_to_tran_low(coeff + 12, a3);
48 }
49 
hadamard8x8_one_pass(int16x8_t * a0,int16x8_t * a1,int16x8_t * a2,int16x8_t * a3,int16x8_t * a4,int16x8_t * a5,int16x8_t * a6,int16x8_t * a7)50 static void hadamard8x8_one_pass(int16x8_t *a0, int16x8_t *a1, int16x8_t *a2,
51                                  int16x8_t *a3, int16x8_t *a4, int16x8_t *a5,
52                                  int16x8_t *a6, int16x8_t *a7) {
53   const int16x8_t b0 = vaddq_s16(*a0, *a1);
54   const int16x8_t b1 = vsubq_s16(*a0, *a1);
55   const int16x8_t b2 = vaddq_s16(*a2, *a3);
56   const int16x8_t b3 = vsubq_s16(*a2, *a3);
57   const int16x8_t b4 = vaddq_s16(*a4, *a5);
58   const int16x8_t b5 = vsubq_s16(*a4, *a5);
59   const int16x8_t b6 = vaddq_s16(*a6, *a7);
60   const int16x8_t b7 = vsubq_s16(*a6, *a7);
61 
62   const int16x8_t c0 = vaddq_s16(b0, b2);
63   const int16x8_t c1 = vaddq_s16(b1, b3);
64   const int16x8_t c2 = vsubq_s16(b0, b2);
65   const int16x8_t c3 = vsubq_s16(b1, b3);
66   const int16x8_t c4 = vaddq_s16(b4, b6);
67   const int16x8_t c5 = vaddq_s16(b5, b7);
68   const int16x8_t c6 = vsubq_s16(b4, b6);
69   const int16x8_t c7 = vsubq_s16(b5, b7);
70 
71   *a0 = vaddq_s16(c0, c4);
72   *a1 = vsubq_s16(c2, c6);
73   *a2 = vsubq_s16(c0, c4);
74   *a3 = vaddq_s16(c2, c6);
75   *a4 = vaddq_s16(c3, c7);
76   *a5 = vsubq_s16(c3, c7);
77   *a6 = vsubq_s16(c1, c5);
78   *a7 = vaddq_s16(c1, c5);
79 }
80 
aom_hadamard_8x8_neon(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)81 void aom_hadamard_8x8_neon(const int16_t *src_diff, ptrdiff_t src_stride,
82                            tran_low_t *coeff) {
83   int16x8_t a0 = vld1q_s16(src_diff);
84   int16x8_t a1 = vld1q_s16(src_diff + src_stride);
85   int16x8_t a2 = vld1q_s16(src_diff + 2 * src_stride);
86   int16x8_t a3 = vld1q_s16(src_diff + 3 * src_stride);
87   int16x8_t a4 = vld1q_s16(src_diff + 4 * src_stride);
88   int16x8_t a5 = vld1q_s16(src_diff + 5 * src_stride);
89   int16x8_t a6 = vld1q_s16(src_diff + 6 * src_stride);
90   int16x8_t a7 = vld1q_s16(src_diff + 7 * src_stride);
91 
92   hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
93 
94   transpose_elems_inplace_s16_8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
95 
96   hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
97 
98   // Skip the second transpose because it is not required.
99 
100   store_s16q_to_tran_low(coeff + 0, a0);
101   store_s16q_to_tran_low(coeff + 8, a1);
102   store_s16q_to_tran_low(coeff + 16, a2);
103   store_s16q_to_tran_low(coeff + 24, a3);
104   store_s16q_to_tran_low(coeff + 32, a4);
105   store_s16q_to_tran_low(coeff + 40, a5);
106   store_s16q_to_tran_low(coeff + 48, a6);
107   store_s16q_to_tran_low(coeff + 56, a7);
108 }
109 
aom_hadamard_lp_8x8_neon(const int16_t * src_diff,ptrdiff_t src_stride,int16_t * coeff)110 void aom_hadamard_lp_8x8_neon(const int16_t *src_diff, ptrdiff_t src_stride,
111                               int16_t *coeff) {
112   int16x8_t a0 = vld1q_s16(src_diff);
113   int16x8_t a1 = vld1q_s16(src_diff + src_stride);
114   int16x8_t a2 = vld1q_s16(src_diff + 2 * src_stride);
115   int16x8_t a3 = vld1q_s16(src_diff + 3 * src_stride);
116   int16x8_t a4 = vld1q_s16(src_diff + 4 * src_stride);
117   int16x8_t a5 = vld1q_s16(src_diff + 5 * src_stride);
118   int16x8_t a6 = vld1q_s16(src_diff + 6 * src_stride);
119   int16x8_t a7 = vld1q_s16(src_diff + 7 * src_stride);
120 
121   hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
122 
123   transpose_elems_inplace_s16_8x8(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
124 
125   hadamard8x8_one_pass(&a0, &a1, &a2, &a3, &a4, &a5, &a6, &a7);
126 
127   // Skip the second transpose because it is not required.
128 
129   vst1q_s16(coeff + 0, a0);
130   vst1q_s16(coeff + 8, a1);
131   vst1q_s16(coeff + 16, a2);
132   vst1q_s16(coeff + 24, a3);
133   vst1q_s16(coeff + 32, a4);
134   vst1q_s16(coeff + 40, a5);
135   vst1q_s16(coeff + 48, a6);
136   vst1q_s16(coeff + 56, a7);
137 }
138 
aom_hadamard_lp_8x8_dual_neon(const int16_t * src_diff,ptrdiff_t src_stride,int16_t * coeff)139 void aom_hadamard_lp_8x8_dual_neon(const int16_t *src_diff,
140                                    ptrdiff_t src_stride, int16_t *coeff) {
141   for (int i = 0; i < 2; i++) {
142     aom_hadamard_lp_8x8_neon(src_diff + (i * 8), src_stride, coeff + (i * 64));
143   }
144 }
145 
aom_hadamard_lp_16x16_neon(const int16_t * src_diff,ptrdiff_t src_stride,int16_t * coeff)146 void aom_hadamard_lp_16x16_neon(const int16_t *src_diff, ptrdiff_t src_stride,
147                                 int16_t *coeff) {
148   /* Rearrange 16x16 to 8x32 and remove stride.
149    * Top left first. */
150   aom_hadamard_lp_8x8_neon(src_diff + 0 + 0 * src_stride, src_stride,
151                            coeff + 0);
152   /* Top right. */
153   aom_hadamard_lp_8x8_neon(src_diff + 8 + 0 * src_stride, src_stride,
154                            coeff + 64);
155   /* Bottom left. */
156   aom_hadamard_lp_8x8_neon(src_diff + 0 + 8 * src_stride, src_stride,
157                            coeff + 128);
158   /* Bottom right. */
159   aom_hadamard_lp_8x8_neon(src_diff + 8 + 8 * src_stride, src_stride,
160                            coeff + 192);
161 
162   for (int i = 0; i < 64; i += 8) {
163     const int16x8_t a0 = vld1q_s16(coeff + 0);
164     const int16x8_t a1 = vld1q_s16(coeff + 64);
165     const int16x8_t a2 = vld1q_s16(coeff + 128);
166     const int16x8_t a3 = vld1q_s16(coeff + 192);
167 
168     const int16x8_t b0 = vhaddq_s16(a0, a1);
169     const int16x8_t b1 = vhsubq_s16(a0, a1);
170     const int16x8_t b2 = vhaddq_s16(a2, a3);
171     const int16x8_t b3 = vhsubq_s16(a2, a3);
172 
173     const int16x8_t c0 = vaddq_s16(b0, b2);
174     const int16x8_t c1 = vaddq_s16(b1, b3);
175     const int16x8_t c2 = vsubq_s16(b0, b2);
176     const int16x8_t c3 = vsubq_s16(b1, b3);
177 
178     vst1q_s16(coeff + 0, c0);
179     vst1q_s16(coeff + 64, c1);
180     vst1q_s16(coeff + 128, c2);
181     vst1q_s16(coeff + 192, c3);
182 
183     coeff += 8;
184   }
185 }
186 
aom_hadamard_16x16_neon(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)187 void aom_hadamard_16x16_neon(const int16_t *src_diff, ptrdiff_t src_stride,
188                              tran_low_t *coeff) {
189   /* Rearrange 16x16 to 8x32 and remove stride.
190    * Top left first. */
191   aom_hadamard_8x8_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0);
192   /* Top right. */
193   aom_hadamard_8x8_neon(src_diff + 8 + 0 * src_stride, src_stride, coeff + 64);
194   /* Bottom left. */
195   aom_hadamard_8x8_neon(src_diff + 0 + 8 * src_stride, src_stride, coeff + 128);
196   /* Bottom right. */
197   aom_hadamard_8x8_neon(src_diff + 8 + 8 * src_stride, src_stride, coeff + 192);
198 
199   // Each iteration of the loop operates on entire rows (16 samples each)
200   // because we need to swap the second and third quarters of every row in the
201   // output to match AVX2 output (i.e., aom_hadamard_16x16_avx2). See the for
202   // loop at the end of aom_hadamard_16x16_c.
203   for (int i = 0; i < 64; i += 16) {
204     const int32x4_t a00 = vld1q_s32(coeff + 0);
205     const int32x4_t a01 = vld1q_s32(coeff + 64);
206     const int32x4_t a02 = vld1q_s32(coeff + 128);
207     const int32x4_t a03 = vld1q_s32(coeff + 192);
208 
209     const int32x4_t b00 = vhaddq_s32(a00, a01);
210     const int32x4_t b01 = vhsubq_s32(a00, a01);
211     const int32x4_t b02 = vhaddq_s32(a02, a03);
212     const int32x4_t b03 = vhsubq_s32(a02, a03);
213 
214     const int32x4_t c00 = vaddq_s32(b00, b02);
215     const int32x4_t c01 = vaddq_s32(b01, b03);
216     const int32x4_t c02 = vsubq_s32(b00, b02);
217     const int32x4_t c03 = vsubq_s32(b01, b03);
218 
219     const int32x4_t a10 = vld1q_s32(coeff + 4 + 0);
220     const int32x4_t a11 = vld1q_s32(coeff + 4 + 64);
221     const int32x4_t a12 = vld1q_s32(coeff + 4 + 128);
222     const int32x4_t a13 = vld1q_s32(coeff + 4 + 192);
223 
224     const int32x4_t b10 = vhaddq_s32(a10, a11);
225     const int32x4_t b11 = vhsubq_s32(a10, a11);
226     const int32x4_t b12 = vhaddq_s32(a12, a13);
227     const int32x4_t b13 = vhsubq_s32(a12, a13);
228 
229     const int32x4_t c10 = vaddq_s32(b10, b12);
230     const int32x4_t c11 = vaddq_s32(b11, b13);
231     const int32x4_t c12 = vsubq_s32(b10, b12);
232     const int32x4_t c13 = vsubq_s32(b11, b13);
233 
234     const int32x4_t a20 = vld1q_s32(coeff + 8 + 0);
235     const int32x4_t a21 = vld1q_s32(coeff + 8 + 64);
236     const int32x4_t a22 = vld1q_s32(coeff + 8 + 128);
237     const int32x4_t a23 = vld1q_s32(coeff + 8 + 192);
238 
239     const int32x4_t b20 = vhaddq_s32(a20, a21);
240     const int32x4_t b21 = vhsubq_s32(a20, a21);
241     const int32x4_t b22 = vhaddq_s32(a22, a23);
242     const int32x4_t b23 = vhsubq_s32(a22, a23);
243 
244     const int32x4_t c20 = vaddq_s32(b20, b22);
245     const int32x4_t c21 = vaddq_s32(b21, b23);
246     const int32x4_t c22 = vsubq_s32(b20, b22);
247     const int32x4_t c23 = vsubq_s32(b21, b23);
248 
249     const int32x4_t a30 = vld1q_s32(coeff + 12 + 0);
250     const int32x4_t a31 = vld1q_s32(coeff + 12 + 64);
251     const int32x4_t a32 = vld1q_s32(coeff + 12 + 128);
252     const int32x4_t a33 = vld1q_s32(coeff + 12 + 192);
253 
254     const int32x4_t b30 = vhaddq_s32(a30, a31);
255     const int32x4_t b31 = vhsubq_s32(a30, a31);
256     const int32x4_t b32 = vhaddq_s32(a32, a33);
257     const int32x4_t b33 = vhsubq_s32(a32, a33);
258 
259     const int32x4_t c30 = vaddq_s32(b30, b32);
260     const int32x4_t c31 = vaddq_s32(b31, b33);
261     const int32x4_t c32 = vsubq_s32(b30, b32);
262     const int32x4_t c33 = vsubq_s32(b31, b33);
263 
264     vst1q_s32(coeff + 0 + 0, c00);
265     vst1q_s32(coeff + 0 + 4, c20);
266     vst1q_s32(coeff + 0 + 8, c10);
267     vst1q_s32(coeff + 0 + 12, c30);
268 
269     vst1q_s32(coeff + 64 + 0, c01);
270     vst1q_s32(coeff + 64 + 4, c21);
271     vst1q_s32(coeff + 64 + 8, c11);
272     vst1q_s32(coeff + 64 + 12, c31);
273 
274     vst1q_s32(coeff + 128 + 0, c02);
275     vst1q_s32(coeff + 128 + 4, c22);
276     vst1q_s32(coeff + 128 + 8, c12);
277     vst1q_s32(coeff + 128 + 12, c32);
278 
279     vst1q_s32(coeff + 192 + 0, c03);
280     vst1q_s32(coeff + 192 + 4, c23);
281     vst1q_s32(coeff + 192 + 8, c13);
282     vst1q_s32(coeff + 192 + 12, c33);
283 
284     coeff += 16;
285   }
286 }
287 
aom_hadamard_32x32_neon(const int16_t * src_diff,ptrdiff_t src_stride,tran_low_t * coeff)288 void aom_hadamard_32x32_neon(const int16_t *src_diff, ptrdiff_t src_stride,
289                              tran_low_t *coeff) {
290   /* Top left first. */
291   aom_hadamard_16x16_neon(src_diff + 0 + 0 * src_stride, src_stride, coeff + 0);
292   /* Top right. */
293   aom_hadamard_16x16_neon(src_diff + 16 + 0 * src_stride, src_stride,
294                           coeff + 256);
295   /* Bottom left. */
296   aom_hadamard_16x16_neon(src_diff + 0 + 16 * src_stride, src_stride,
297                           coeff + 512);
298   /* Bottom right. */
299   aom_hadamard_16x16_neon(src_diff + 16 + 16 * src_stride, src_stride,
300                           coeff + 768);
301 
302   for (int i = 0; i < 256; i += 4) {
303     const int32x4_t a0 = vld1q_s32(coeff);
304     const int32x4_t a1 = vld1q_s32(coeff + 256);
305     const int32x4_t a2 = vld1q_s32(coeff + 512);
306     const int32x4_t a3 = vld1q_s32(coeff + 768);
307 
308     const int32x4_t b0 = vshrq_n_s32(vaddq_s32(a0, a1), 2);
309     const int32x4_t b1 = vshrq_n_s32(vsubq_s32(a0, a1), 2);
310     const int32x4_t b2 = vshrq_n_s32(vaddq_s32(a2, a3), 2);
311     const int32x4_t b3 = vshrq_n_s32(vsubq_s32(a2, a3), 2);
312 
313     const int32x4_t c0 = vaddq_s32(b0, b2);
314     const int32x4_t c1 = vaddq_s32(b1, b3);
315     const int32x4_t c2 = vsubq_s32(b0, b2);
316     const int32x4_t c3 = vsubq_s32(b1, b3);
317 
318     vst1q_s32(coeff + 0, c0);
319     vst1q_s32(coeff + 256, c1);
320     vst1q_s32(coeff + 512, c2);
321     vst1q_s32(coeff + 768, c3);
322 
323     coeff += 4;
324   }
325 }
326