• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2020, Alliance for Open Media. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 
13 #include "config/aom_dsp_rtcd.h"
14 #include "aom_dsp/arm/mem_neon.h"
15 #include "aom_dsp/arm/sum_neon.h"
16 
sse_16x1_neon(const uint8_t * src,const uint8_t * ref,uint32x4_t * sse)17 static INLINE void sse_16x1_neon(const uint8_t *src, const uint8_t *ref,
18                                  uint32x4_t *sse) {
19   uint8x16_t s = vld1q_u8(src);
20   uint8x16_t r = vld1q_u8(ref);
21 
22   uint8x16_t abs_diff = vabdq_u8(s, r);
23   uint8x8_t abs_diff_lo = vget_low_u8(abs_diff);
24   uint8x8_t abs_diff_hi = vget_high_u8(abs_diff);
25 
26   *sse = vpadalq_u16(*sse, vmull_u8(abs_diff_lo, abs_diff_lo));
27   *sse = vpadalq_u16(*sse, vmull_u8(abs_diff_hi, abs_diff_hi));
28 }
29 
sse_8x1_neon(const uint8_t * src,const uint8_t * ref,uint32x4_t * sse)30 static INLINE void sse_8x1_neon(const uint8_t *src, const uint8_t *ref,
31                                 uint32x4_t *sse) {
32   uint8x8_t s = vld1_u8(src);
33   uint8x8_t r = vld1_u8(ref);
34 
35   uint8x8_t abs_diff = vabd_u8(s, r);
36 
37   *sse = vpadalq_u16(*sse, vmull_u8(abs_diff, abs_diff));
38 }
39 
sse_4x2_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,uint32x4_t * sse)40 static INLINE void sse_4x2_neon(const uint8_t *src, int src_stride,
41                                 const uint8_t *ref, int ref_stride,
42                                 uint32x4_t *sse) {
43   uint8x8_t s = load_unaligned_u8(src, src_stride);
44   uint8x8_t r = load_unaligned_u8(ref, ref_stride);
45 
46   uint8x8_t abs_diff = vabd_u8(s, r);
47 
48   *sse = vpadalq_u16(*sse, vmull_u8(abs_diff, abs_diff));
49 }
50 
sse_wxh_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int width,int height)51 static INLINE uint32_t sse_wxh_neon(const uint8_t *src, int src_stride,
52                                     const uint8_t *ref, int ref_stride,
53                                     int width, int height) {
54   uint32x4_t sse = vdupq_n_u32(0);
55 
56   if ((width & 0x07) && ((width & 0x07) < 5)) {
57     int i = height;
58     do {
59       int j = 0;
60       do {
61         sse_8x1_neon(src + j, ref + j, &sse);
62         sse_8x1_neon(src + j + src_stride, ref + j + ref_stride, &sse);
63         j += 8;
64       } while (j + 4 < width);
65 
66       sse_4x2_neon(src + j, src_stride, ref + j, ref_stride, &sse);
67       src += 2 * src_stride;
68       ref += 2 * ref_stride;
69       i -= 2;
70     } while (i != 0);
71   } else {
72     int i = height;
73     do {
74       int j = 0;
75       do {
76         sse_8x1_neon(src + j, ref + j, &sse);
77         j += 8;
78       } while (j < width);
79 
80       src += src_stride;
81       ref += ref_stride;
82     } while (--i != 0);
83   }
84   return horizontal_add_u32x4(sse);
85 }
86 
sse_128xh_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int height)87 static INLINE uint32_t sse_128xh_neon(const uint8_t *src, int src_stride,
88                                       const uint8_t *ref, int ref_stride,
89                                       int height) {
90   uint32x4_t sse[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
91 
92   int i = height;
93   do {
94     sse_16x1_neon(src, ref, &sse[0]);
95     sse_16x1_neon(src + 16, ref + 16, &sse[1]);
96     sse_16x1_neon(src + 32, ref + 32, &sse[0]);
97     sse_16x1_neon(src + 48, ref + 48, &sse[1]);
98     sse_16x1_neon(src + 64, ref + 64, &sse[0]);
99     sse_16x1_neon(src + 80, ref + 80, &sse[1]);
100     sse_16x1_neon(src + 96, ref + 96, &sse[0]);
101     sse_16x1_neon(src + 112, ref + 112, &sse[1]);
102 
103     src += src_stride;
104     ref += ref_stride;
105   } while (--i != 0);
106 
107   return horizontal_add_u32x4(vaddq_u32(sse[0], sse[1]));
108 }
109 
sse_64xh_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int height)110 static INLINE uint32_t sse_64xh_neon(const uint8_t *src, int src_stride,
111                                      const uint8_t *ref, int ref_stride,
112                                      int height) {
113   uint32x4_t sse[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
114 
115   int i = height;
116   do {
117     sse_16x1_neon(src, ref, &sse[0]);
118     sse_16x1_neon(src + 16, ref + 16, &sse[1]);
119     sse_16x1_neon(src + 32, ref + 32, &sse[0]);
120     sse_16x1_neon(src + 48, ref + 48, &sse[1]);
121 
122     src += src_stride;
123     ref += ref_stride;
124   } while (--i != 0);
125 
126   return horizontal_add_u32x4(vaddq_u32(sse[0], sse[1]));
127 }
128 
sse_32xh_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int height)129 static INLINE uint32_t sse_32xh_neon(const uint8_t *src, int src_stride,
130                                      const uint8_t *ref, int ref_stride,
131                                      int height) {
132   uint32x4_t sse[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
133 
134   int i = height;
135   do {
136     sse_16x1_neon(src, ref, &sse[0]);
137     sse_16x1_neon(src + 16, ref + 16, &sse[1]);
138 
139     src += src_stride;
140     ref += ref_stride;
141   } while (--i != 0);
142 
143   return horizontal_add_u32x4(vaddq_u32(sse[0], sse[1]));
144 }
145 
sse_16xh_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int height)146 static INLINE uint32_t sse_16xh_neon(const uint8_t *src, int src_stride,
147                                      const uint8_t *ref, int ref_stride,
148                                      int height) {
149   uint32x4_t sse[2] = { vdupq_n_u32(0), vdupq_n_u32(0) };
150 
151   int i = height;
152   do {
153     sse_16x1_neon(src, ref, &sse[0]);
154     src += src_stride;
155     ref += ref_stride;
156     sse_16x1_neon(src, ref, &sse[1]);
157     src += src_stride;
158     ref += ref_stride;
159     i -= 2;
160   } while (i != 0);
161 
162   return horizontal_add_u32x4(vaddq_u32(sse[0], sse[1]));
163 }
164 
sse_8xh_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int height)165 static INLINE uint32_t sse_8xh_neon(const uint8_t *src, int src_stride,
166                                     const uint8_t *ref, int ref_stride,
167                                     int height) {
168   uint32x4_t sse = vdupq_n_u32(0);
169 
170   int i = height;
171   do {
172     sse_8x1_neon(src, ref, &sse);
173 
174     src += src_stride;
175     ref += ref_stride;
176   } while (--i != 0);
177 
178   return horizontal_add_u32x4(sse);
179 }
180 
sse_4xh_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int height)181 static INLINE uint32_t sse_4xh_neon(const uint8_t *src, int src_stride,
182                                     const uint8_t *ref, int ref_stride,
183                                     int height) {
184   uint32x4_t sse = vdupq_n_u32(0);
185 
186   int i = height;
187   do {
188     sse_4x2_neon(src, src_stride, ref, ref_stride, &sse);
189 
190     src += 2 * src_stride;
191     ref += 2 * ref_stride;
192     i -= 2;
193   } while (i != 0);
194 
195   return horizontal_add_u32x4(sse);
196 }
197 
aom_sse_neon(const uint8_t * src,int src_stride,const uint8_t * ref,int ref_stride,int width,int height)198 int64_t aom_sse_neon(const uint8_t *src, int src_stride, const uint8_t *ref,
199                      int ref_stride, int width, int height) {
200   switch (width) {
201     case 4: return sse_4xh_neon(src, src_stride, ref, ref_stride, height);
202     case 8: return sse_8xh_neon(src, src_stride, ref, ref_stride, height);
203     case 16: return sse_16xh_neon(src, src_stride, ref, ref_stride, height);
204     case 32: return sse_32xh_neon(src, src_stride, ref, ref_stride, height);
205     case 64: return sse_64xh_neon(src, src_stride, ref, ref_stride, height);
206     case 128: return sse_128xh_neon(src, src_stride, ref, ref_stride, height);
207     default:
208       return sse_wxh_neon(src, src_stride, ref, ref_stride, width, height);
209   }
210 }
211