1 /*
2 * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include <arm_neon.h>
12
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx/vpx_integer.h"
15
vpx_highbd_convolve_avg_neon(const uint16_t * src,ptrdiff_t src_stride,uint16_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h,int bd)16 void vpx_highbd_convolve_avg_neon(const uint16_t *src, ptrdiff_t src_stride,
17 uint16_t *dst, ptrdiff_t dst_stride,
18 const InterpKernel *filter, int x0_q4,
19 int x_step_q4, int y0_q4, int y_step_q4,
20 int w, int h, int bd) {
21 (void)filter;
22 (void)x0_q4;
23 (void)x_step_q4;
24 (void)y0_q4;
25 (void)y_step_q4;
26 (void)bd;
27
28 if (w < 8) { // avg4
29 uint16x4_t s0, s1, d0, d1;
30 uint16x8_t s01, d01;
31 do {
32 s0 = vld1_u16(src);
33 d0 = vld1_u16(dst);
34 src += src_stride;
35 s1 = vld1_u16(src);
36 d1 = vld1_u16(dst + dst_stride);
37 src += src_stride;
38 s01 = vcombine_u16(s0, s1);
39 d01 = vcombine_u16(d0, d1);
40 d01 = vrhaddq_u16(s01, d01);
41 vst1_u16(dst, vget_low_u16(d01));
42 dst += dst_stride;
43 vst1_u16(dst, vget_high_u16(d01));
44 dst += dst_stride;
45 h -= 2;
46 } while (h > 0);
47 } else if (w == 8) { // avg8
48 uint16x8_t s0, s1, d0, d1;
49 do {
50 s0 = vld1q_u16(src);
51 d0 = vld1q_u16(dst);
52 src += src_stride;
53 s1 = vld1q_u16(src);
54 d1 = vld1q_u16(dst + dst_stride);
55 src += src_stride;
56
57 d0 = vrhaddq_u16(s0, d0);
58 d1 = vrhaddq_u16(s1, d1);
59
60 vst1q_u16(dst, d0);
61 dst += dst_stride;
62 vst1q_u16(dst, d1);
63 dst += dst_stride;
64 h -= 2;
65 } while (h > 0);
66 } else if (w < 32) { // avg16
67 uint16x8_t s0l, s0h, s1l, s1h, d0l, d0h, d1l, d1h;
68 do {
69 s0l = vld1q_u16(src);
70 s0h = vld1q_u16(src + 8);
71 d0l = vld1q_u16(dst);
72 d0h = vld1q_u16(dst + 8);
73 src += src_stride;
74 s1l = vld1q_u16(src);
75 s1h = vld1q_u16(src + 8);
76 d1l = vld1q_u16(dst + dst_stride);
77 d1h = vld1q_u16(dst + dst_stride + 8);
78 src += src_stride;
79
80 d0l = vrhaddq_u16(s0l, d0l);
81 d0h = vrhaddq_u16(s0h, d0h);
82 d1l = vrhaddq_u16(s1l, d1l);
83 d1h = vrhaddq_u16(s1h, d1h);
84
85 vst1q_u16(dst, d0l);
86 vst1q_u16(dst + 8, d0h);
87 dst += dst_stride;
88 vst1q_u16(dst, d1l);
89 vst1q_u16(dst + 8, d1h);
90 dst += dst_stride;
91 h -= 2;
92 } while (h > 0);
93 } else if (w == 32) { // avg32
94 uint16x8_t s0, s1, s2, s3, d0, d1, d2, d3;
95 do {
96 s0 = vld1q_u16(src);
97 s1 = vld1q_u16(src + 8);
98 s2 = vld1q_u16(src + 16);
99 s3 = vld1q_u16(src + 24);
100 d0 = vld1q_u16(dst);
101 d1 = vld1q_u16(dst + 8);
102 d2 = vld1q_u16(dst + 16);
103 d3 = vld1q_u16(dst + 24);
104 src += src_stride;
105
106 d0 = vrhaddq_u16(s0, d0);
107 d1 = vrhaddq_u16(s1, d1);
108 d2 = vrhaddq_u16(s2, d2);
109 d3 = vrhaddq_u16(s3, d3);
110
111 vst1q_u16(dst, d0);
112 vst1q_u16(dst + 8, d1);
113 vst1q_u16(dst + 16, d2);
114 vst1q_u16(dst + 24, d3);
115 dst += dst_stride;
116
117 s0 = vld1q_u16(src);
118 s1 = vld1q_u16(src + 8);
119 s2 = vld1q_u16(src + 16);
120 s3 = vld1q_u16(src + 24);
121 d0 = vld1q_u16(dst);
122 d1 = vld1q_u16(dst + 8);
123 d2 = vld1q_u16(dst + 16);
124 d3 = vld1q_u16(dst + 24);
125 src += src_stride;
126
127 d0 = vrhaddq_u16(s0, d0);
128 d1 = vrhaddq_u16(s1, d1);
129 d2 = vrhaddq_u16(s2, d2);
130 d3 = vrhaddq_u16(s3, d3);
131
132 vst1q_u16(dst, d0);
133 vst1q_u16(dst + 8, d1);
134 vst1q_u16(dst + 16, d2);
135 vst1q_u16(dst + 24, d3);
136 dst += dst_stride;
137 h -= 2;
138 } while (h > 0);
139 } else { // avg64
140 uint16x8_t s0, s1, s2, s3, d0, d1, d2, d3;
141 do {
142 s0 = vld1q_u16(src);
143 s1 = vld1q_u16(src + 8);
144 s2 = vld1q_u16(src + 16);
145 s3 = vld1q_u16(src + 24);
146 d0 = vld1q_u16(dst);
147 d1 = vld1q_u16(dst + 8);
148 d2 = vld1q_u16(dst + 16);
149 d3 = vld1q_u16(dst + 24);
150
151 d0 = vrhaddq_u16(s0, d0);
152 d1 = vrhaddq_u16(s1, d1);
153 d2 = vrhaddq_u16(s2, d2);
154 d3 = vrhaddq_u16(s3, d3);
155
156 vst1q_u16(dst, d0);
157 vst1q_u16(dst + 8, d1);
158 vst1q_u16(dst + 16, d2);
159 vst1q_u16(dst + 24, d3);
160
161 s0 = vld1q_u16(src + 32);
162 s1 = vld1q_u16(src + 40);
163 s2 = vld1q_u16(src + 48);
164 s3 = vld1q_u16(src + 56);
165 d0 = vld1q_u16(dst + 32);
166 d1 = vld1q_u16(dst + 40);
167 d2 = vld1q_u16(dst + 48);
168 d3 = vld1q_u16(dst + 56);
169
170 d0 = vrhaddq_u16(s0, d0);
171 d1 = vrhaddq_u16(s1, d1);
172 d2 = vrhaddq_u16(s2, d2);
173 d3 = vrhaddq_u16(s3, d3);
174
175 vst1q_u16(dst + 32, d0);
176 vst1q_u16(dst + 40, d1);
177 vst1q_u16(dst + 48, d2);
178 vst1q_u16(dst + 56, d3);
179 src += src_stride;
180 dst += dst_stride;
181 } while (--h);
182 }
183 }
184