1 /*
2 * Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #ifndef VPX_VPX_DSP_ARM_MEM_NEON_H_
12 #define VPX_VPX_DSP_ARM_MEM_NEON_H_
13
14 #include <arm_neon.h>
15 #include <assert.h>
16 #include <string.h>
17
18 #include "./vpx_config.h"
19 #include "vpx/vpx_integer.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21
22 // Support for these xN intrinsics is lacking in older versions of GCC.
23 #if defined(__GNUC__) && !defined(__clang__)
24 #if __GNUC__ < 8 || defined(__arm__)
vld1q_u8_x2(uint8_t const * ptr)25 static INLINE uint8x16x2_t vld1q_u8_x2(uint8_t const *ptr) {
26 uint8x16x2_t res = { { vld1q_u8(ptr + 0 * 16), vld1q_u8(ptr + 1 * 16) } };
27 return res;
28 }
29 #endif
30
31 #if __GNUC__ < 9 || defined(__arm__)
vld1q_u8_x3(uint8_t const * ptr)32 static INLINE uint8x16x3_t vld1q_u8_x3(uint8_t const *ptr) {
33 uint8x16x3_t res = { { vld1q_u8(ptr + 0 * 16), vld1q_u8(ptr + 1 * 16),
34 vld1q_u8(ptr + 2 * 16) } };
35 return res;
36 }
37 #endif
38 #endif
39
create_s16x4_neon(const int16_t c0,const int16_t c1,const int16_t c2,const int16_t c3)40 static INLINE int16x4_t create_s16x4_neon(const int16_t c0, const int16_t c1,
41 const int16_t c2, const int16_t c3) {
42 return vcreate_s16((uint16_t)c0 | ((uint32_t)c1 << 16) |
43 ((int64_t)(uint16_t)c2 << 32) | ((int64_t)c3 << 48));
44 }
45
create_s32x2_neon(const int32_t c0,const int32_t c1)46 static INLINE int32x2_t create_s32x2_neon(const int32_t c0, const int32_t c1) {
47 return vcreate_s32((uint32_t)c0 | ((int64_t)(uint32_t)c1 << 32));
48 }
49
create_s32x4_neon(const int32_t c0,const int32_t c1,const int32_t c2,const int32_t c3)50 static INLINE int32x4_t create_s32x4_neon(const int32_t c0, const int32_t c1,
51 const int32_t c2, const int32_t c3) {
52 return vcombine_s32(create_s32x2_neon(c0, c1), create_s32x2_neon(c2, c3));
53 }
54
55 // Helper functions used to load tran_low_t into int16, narrowing if necessary.
load_tran_low_to_s16x2q(const tran_low_t * buf)56 static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) {
57 #if CONFIG_VP9_HIGHBITDEPTH
58 const int32x4x2_t v0 = vld2q_s32(buf);
59 const int32x4x2_t v1 = vld2q_s32(buf + 8);
60 const int16x4_t s0 = vmovn_s32(v0.val[0]);
61 const int16x4_t s1 = vmovn_s32(v0.val[1]);
62 const int16x4_t s2 = vmovn_s32(v1.val[0]);
63 const int16x4_t s3 = vmovn_s32(v1.val[1]);
64 int16x8x2_t res;
65 res.val[0] = vcombine_s16(s0, s2);
66 res.val[1] = vcombine_s16(s1, s3);
67 return res;
68 #else
69 return vld2q_s16(buf);
70 #endif
71 }
72
load_tran_low_to_s16q(const tran_low_t * buf)73 static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
74 #if CONFIG_VP9_HIGHBITDEPTH
75 const int32x4_t v0 = vld1q_s32(buf);
76 const int32x4_t v1 = vld1q_s32(buf + 4);
77 const int16x4_t s0 = vmovn_s32(v0);
78 const int16x4_t s1 = vmovn_s32(v1);
79 return vcombine_s16(s0, s1);
80 #else
81 return vld1q_s16(buf);
82 #endif
83 }
84
load_tran_low_to_s16d(const tran_low_t * buf)85 static INLINE int16x4_t load_tran_low_to_s16d(const tran_low_t *buf) {
86 #if CONFIG_VP9_HIGHBITDEPTH
87 const int32x4_t v0 = vld1q_s32(buf);
88 return vmovn_s32(v0);
89 #else
90 return vld1_s16(buf);
91 #endif
92 }
93
store_s16q_to_tran_low(tran_low_t * buf,const int16x8_t a)94 static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) {
95 #if CONFIG_VP9_HIGHBITDEPTH
96 const int32x4_t v0 = vmovl_s16(vget_low_s16(a));
97 const int32x4_t v1 = vmovl_s16(vget_high_s16(a));
98 vst1q_s32(buf, v0);
99 vst1q_s32(buf + 4, v1);
100 #else
101 vst1q_s16(buf, a);
102 #endif
103 }
104
105 #if CONFIG_VP9_HIGHBITDEPTH
store_s32q_to_tran_low(tran_low_t * buf,const int32x4_t a)106 static INLINE void store_s32q_to_tran_low(tran_low_t *buf, const int32x4_t a) {
107 vst1q_s32(buf, a);
108 }
109
load_tran_low_to_s32q(const tran_low_t * buf)110 static INLINE int32x4_t load_tran_low_to_s32q(const tran_low_t *buf) {
111 return vld1q_s32(buf);
112 }
113 #endif
114
115 // Propagate type information to the compiler. Without this the compiler may
116 // assume the required alignment of uint32_t (4 bytes) and add alignment hints
117 // to the memory access.
118 //
119 // This is used for functions operating on uint8_t which wish to load or store 4
120 // values at a time but which may not be on 4 byte boundaries.
uint32_to_mem(uint8_t * buf,uint32_t a)121 static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) {
122 memcpy(buf, &a, 4);
123 }
124
125 // Load 4 contiguous bytes when alignment is not guaranteed.
load_unaligned_u8_4x1(const uint8_t * buf)126 static INLINE uint8x8_t load_unaligned_u8_4x1(const uint8_t *buf) {
127 uint32_t a;
128 uint32x2_t a_u32;
129 memcpy(&a, buf, 4);
130 a_u32 = vdup_n_u32(0);
131 a_u32 = vset_lane_u32(a, a_u32, 0);
132 return vreinterpret_u8_u32(a_u32);
133 }
134
135 // Load 4 contiguous bytes and replicate across a vector when alignment is not
136 // guaranteed.
load_replicate_u8_4x1(const uint8_t * buf)137 static INLINE uint8x8_t load_replicate_u8_4x1(const uint8_t *buf) {
138 uint32_t a;
139 memcpy(&a, buf, 4);
140 return vreinterpret_u8_u32(vdup_n_u32(a));
141 }
142
143 // Store 4 contiguous bytes from the low half of an 8x8 vector.
store_u8_4x1(uint8_t * buf,uint8x8_t a)144 static INLINE void store_u8_4x1(uint8_t *buf, uint8x8_t a) {
145 vst1_lane_u32((uint32_t *)buf, vreinterpret_u32_u8(a), 0);
146 }
147
148 // Store 4 contiguous bytes from the high half of an 8x8 vector.
store_u8_4x1_high(uint8_t * buf,uint8x8_t a)149 static INLINE void store_u8_4x1_high(uint8_t *buf, uint8x8_t a) {
150 vst1_lane_u32((uint32_t *)buf, vreinterpret_u32_u8(a), 1);
151 }
152
153 // Load 2 sets of 4 bytes when alignment is not guaranteed.
load_unaligned_u8(const uint8_t * buf,ptrdiff_t stride)154 static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf,
155 ptrdiff_t stride) {
156 uint32_t a;
157 uint32x2_t a_u32;
158 if (stride == 4) return vld1_u8(buf);
159 memcpy(&a, buf, 4);
160 buf += stride;
161 a_u32 = vdup_n_u32(a);
162 memcpy(&a, buf, 4);
163 a_u32 = vset_lane_u32(a, a_u32, 1);
164 return vreinterpret_u8_u32(a_u32);
165 }
166
167 // Load 8 bytes when alignment is not guaranteed.
load_unaligned_u16(const uint16_t * buf)168 static INLINE uint16x4_t load_unaligned_u16(const uint16_t *buf) {
169 uint64_t a;
170 uint64x1_t a_u64 = vdup_n_u64(0);
171 memcpy(&a, buf, 8);
172 a_u64 = vset_lane_u64(a, a_u64, 0);
173 return vreinterpret_u16_u64(a_u64);
174 }
175
176 // Load 2 sets of 8 bytes when alignment is not guaranteed.
load_unaligned_u16q(const uint16_t * buf,ptrdiff_t stride)177 static INLINE uint16x8_t load_unaligned_u16q(const uint16_t *buf,
178 ptrdiff_t stride) {
179 uint64_t a;
180 uint64x2_t a_u64;
181 if (stride == 4) return vld1q_u16(buf);
182 memcpy(&a, buf, 8);
183 buf += stride;
184 a_u64 = vdupq_n_u64(a);
185 memcpy(&a, buf, 8);
186 a_u64 = vsetq_lane_u64(a, a_u64, 1);
187 return vreinterpretq_u16_u64(a_u64);
188 }
189
190 // Store 2 sets of 4 bytes when alignment is not guaranteed.
store_unaligned_u8(uint8_t * buf,ptrdiff_t stride,const uint8x8_t a)191 static INLINE void store_unaligned_u8(uint8_t *buf, ptrdiff_t stride,
192 const uint8x8_t a) {
193 const uint32x2_t a_u32 = vreinterpret_u32_u8(a);
194 if (stride == 4) {
195 vst1_u8(buf, a);
196 return;
197 }
198 uint32_to_mem(buf, vget_lane_u32(a_u32, 0));
199 buf += stride;
200 uint32_to_mem(buf, vget_lane_u32(a_u32, 1));
201 }
202
203 // Load 4 sets of 4 bytes when alignment is not guaranteed.
load_unaligned_u8q(const uint8_t * buf,ptrdiff_t stride)204 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf,
205 ptrdiff_t stride) {
206 uint32_t a;
207 uint32x4_t a_u32;
208 if (stride == 4) return vld1q_u8(buf);
209 memcpy(&a, buf, 4);
210 buf += stride;
211 a_u32 = vdupq_n_u32(a);
212 memcpy(&a, buf, 4);
213 buf += stride;
214 a_u32 = vsetq_lane_u32(a, a_u32, 1);
215 memcpy(&a, buf, 4);
216 buf += stride;
217 a_u32 = vsetq_lane_u32(a, a_u32, 2);
218 memcpy(&a, buf, 4);
219 buf += stride;
220 a_u32 = vsetq_lane_u32(a, a_u32, 3);
221 return vreinterpretq_u8_u32(a_u32);
222 }
223
224 // Store 4 sets of 4 bytes when alignment is not guaranteed.
store_unaligned_u8q(uint8_t * buf,ptrdiff_t stride,const uint8x16_t a)225 static INLINE void store_unaligned_u8q(uint8_t *buf, ptrdiff_t stride,
226 const uint8x16_t a) {
227 const uint32x4_t a_u32 = vreinterpretq_u32_u8(a);
228 if (stride == 4) {
229 vst1q_u8(buf, a);
230 return;
231 }
232 uint32_to_mem(buf, vgetq_lane_u32(a_u32, 0));
233 buf += stride;
234 uint32_to_mem(buf, vgetq_lane_u32(a_u32, 1));
235 buf += stride;
236 uint32_to_mem(buf, vgetq_lane_u32(a_u32, 2));
237 buf += stride;
238 uint32_to_mem(buf, vgetq_lane_u32(a_u32, 3));
239 }
240
241 // Load 2 sets of 4 bytes when alignment is guaranteed.
load_u8(const uint8_t * buf,ptrdiff_t stride)242 static INLINE uint8x8_t load_u8(const uint8_t *buf, ptrdiff_t stride) {
243 uint32x2_t a = vdup_n_u32(0);
244
245 assert(!((intptr_t)buf % sizeof(uint32_t)));
246 assert(!(stride % sizeof(uint32_t)));
247
248 a = vld1_lane_u32((const uint32_t *)buf, a, 0);
249 buf += stride;
250 a = vld1_lane_u32((const uint32_t *)buf, a, 1);
251 return vreinterpret_u8_u32(a);
252 }
253
254 // Store 2 sets of 4 bytes when alignment is guaranteed.
store_u8(uint8_t * buf,ptrdiff_t stride,const uint8x8_t a)255 static INLINE void store_u8(uint8_t *buf, ptrdiff_t stride, const uint8x8_t a) {
256 uint32x2_t a_u32 = vreinterpret_u32_u8(a);
257
258 assert(!((intptr_t)buf % sizeof(uint32_t)));
259 assert(!(stride % sizeof(uint32_t)));
260
261 vst1_lane_u32((uint32_t *)buf, a_u32, 0);
262 buf += stride;
263 vst1_lane_u32((uint32_t *)buf, a_u32, 1);
264 }
265
store_u8_8x3(uint8_t * s,const ptrdiff_t p,const uint8x8_t s0,const uint8x8_t s1,const uint8x8_t s2)266 static INLINE void store_u8_8x3(uint8_t *s, const ptrdiff_t p,
267 const uint8x8_t s0, const uint8x8_t s1,
268 const uint8x8_t s2) {
269 vst1_u8(s, s0);
270 s += p;
271 vst1_u8(s, s1);
272 s += p;
273 vst1_u8(s, s2);
274 }
275
load_u8_8x4(const uint8_t * s,const ptrdiff_t p,uint8x8_t * const s0,uint8x8_t * const s1,uint8x8_t * const s2,uint8x8_t * const s3)276 static INLINE void load_u8_8x4(const uint8_t *s, const ptrdiff_t p,
277 uint8x8_t *const s0, uint8x8_t *const s1,
278 uint8x8_t *const s2, uint8x8_t *const s3) {
279 *s0 = vld1_u8(s);
280 s += p;
281 *s1 = vld1_u8(s);
282 s += p;
283 *s2 = vld1_u8(s);
284 s += p;
285 *s3 = vld1_u8(s);
286 }
287
store_u8_8x4(uint8_t * s,const ptrdiff_t p,const uint8x8_t s0,const uint8x8_t s1,const uint8x8_t s2,const uint8x8_t s3)288 static INLINE void store_u8_8x4(uint8_t *s, const ptrdiff_t p,
289 const uint8x8_t s0, const uint8x8_t s1,
290 const uint8x8_t s2, const uint8x8_t s3) {
291 vst1_u8(s, s0);
292 s += p;
293 vst1_u8(s, s1);
294 s += p;
295 vst1_u8(s, s2);
296 s += p;
297 vst1_u8(s, s3);
298 }
299
load_u8_16x3(const uint8_t * s,const ptrdiff_t p,uint8x16_t * const s0,uint8x16_t * const s1,uint8x16_t * const s2)300 static INLINE void load_u8_16x3(const uint8_t *s, const ptrdiff_t p,
301 uint8x16_t *const s0, uint8x16_t *const s1,
302 uint8x16_t *const s2) {
303 *s0 = vld1q_u8(s);
304 s += p;
305 *s1 = vld1q_u8(s);
306 s += p;
307 *s2 = vld1q_u8(s);
308 }
309
load_u8_16x4(const uint8_t * s,const ptrdiff_t p,uint8x16_t * const s0,uint8x16_t * const s1,uint8x16_t * const s2,uint8x16_t * const s3)310 static INLINE void load_u8_16x4(const uint8_t *s, const ptrdiff_t p,
311 uint8x16_t *const s0, uint8x16_t *const s1,
312 uint8x16_t *const s2, uint8x16_t *const s3) {
313 *s0 = vld1q_u8(s);
314 s += p;
315 *s1 = vld1q_u8(s);
316 s += p;
317 *s2 = vld1q_u8(s);
318 s += p;
319 *s3 = vld1q_u8(s);
320 }
321
store_u8_16x4(uint8_t * s,const ptrdiff_t p,const uint8x16_t s0,const uint8x16_t s1,const uint8x16_t s2,const uint8x16_t s3)322 static INLINE void store_u8_16x4(uint8_t *s, const ptrdiff_t p,
323 const uint8x16_t s0, const uint8x16_t s1,
324 const uint8x16_t s2, const uint8x16_t s3) {
325 vst1q_u8(s, s0);
326 s += p;
327 vst1q_u8(s, s1);
328 s += p;
329 vst1q_u8(s, s2);
330 s += p;
331 vst1q_u8(s, s3);
332 }
333
load_u8_8x7(const uint8_t * s,const ptrdiff_t p,uint8x8_t * const s0,uint8x8_t * const s1,uint8x8_t * const s2,uint8x8_t * const s3,uint8x8_t * const s4,uint8x8_t * const s5,uint8x8_t * const s6)334 static INLINE void load_u8_8x7(const uint8_t *s, const ptrdiff_t p,
335 uint8x8_t *const s0, uint8x8_t *const s1,
336 uint8x8_t *const s2, uint8x8_t *const s3,
337 uint8x8_t *const s4, uint8x8_t *const s5,
338 uint8x8_t *const s6) {
339 *s0 = vld1_u8(s);
340 s += p;
341 *s1 = vld1_u8(s);
342 s += p;
343 *s2 = vld1_u8(s);
344 s += p;
345 *s3 = vld1_u8(s);
346 s += p;
347 *s4 = vld1_u8(s);
348 s += p;
349 *s5 = vld1_u8(s);
350 s += p;
351 *s6 = vld1_u8(s);
352 }
353
load_u8_8x8(const uint8_t * s,const ptrdiff_t p,uint8x8_t * const s0,uint8x8_t * const s1,uint8x8_t * const s2,uint8x8_t * const s3,uint8x8_t * const s4,uint8x8_t * const s5,uint8x8_t * const s6,uint8x8_t * const s7)354 static INLINE void load_u8_8x8(const uint8_t *s, const ptrdiff_t p,
355 uint8x8_t *const s0, uint8x8_t *const s1,
356 uint8x8_t *const s2, uint8x8_t *const s3,
357 uint8x8_t *const s4, uint8x8_t *const s5,
358 uint8x8_t *const s6, uint8x8_t *const s7) {
359 *s0 = vld1_u8(s);
360 s += p;
361 *s1 = vld1_u8(s);
362 s += p;
363 *s2 = vld1_u8(s);
364 s += p;
365 *s3 = vld1_u8(s);
366 s += p;
367 *s4 = vld1_u8(s);
368 s += p;
369 *s5 = vld1_u8(s);
370 s += p;
371 *s6 = vld1_u8(s);
372 s += p;
373 *s7 = vld1_u8(s);
374 }
375
store_u8_8x8(uint8_t * s,const ptrdiff_t p,const uint8x8_t s0,const uint8x8_t s1,const uint8x8_t s2,const uint8x8_t s3,const uint8x8_t s4,const uint8x8_t s5,const uint8x8_t s6,const uint8x8_t s7)376 static INLINE void store_u8_8x8(uint8_t *s, const ptrdiff_t p,
377 const uint8x8_t s0, const uint8x8_t s1,
378 const uint8x8_t s2, const uint8x8_t s3,
379 const uint8x8_t s4, const uint8x8_t s5,
380 const uint8x8_t s6, const uint8x8_t s7) {
381 vst1_u8(s, s0);
382 s += p;
383 vst1_u8(s, s1);
384 s += p;
385 vst1_u8(s, s2);
386 s += p;
387 vst1_u8(s, s3);
388 s += p;
389 vst1_u8(s, s4);
390 s += p;
391 vst1_u8(s, s5);
392 s += p;
393 vst1_u8(s, s6);
394 s += p;
395 vst1_u8(s, s7);
396 }
397
load_u8_16x8(const uint8_t * s,const ptrdiff_t p,uint8x16_t * const s0,uint8x16_t * const s1,uint8x16_t * const s2,uint8x16_t * const s3,uint8x16_t * const s4,uint8x16_t * const s5,uint8x16_t * const s6,uint8x16_t * const s7)398 static INLINE void load_u8_16x8(const uint8_t *s, const ptrdiff_t p,
399 uint8x16_t *const s0, uint8x16_t *const s1,
400 uint8x16_t *const s2, uint8x16_t *const s3,
401 uint8x16_t *const s4, uint8x16_t *const s5,
402 uint8x16_t *const s6, uint8x16_t *const s7) {
403 *s0 = vld1q_u8(s);
404 s += p;
405 *s1 = vld1q_u8(s);
406 s += p;
407 *s2 = vld1q_u8(s);
408 s += p;
409 *s3 = vld1q_u8(s);
410 s += p;
411 *s4 = vld1q_u8(s);
412 s += p;
413 *s5 = vld1q_u8(s);
414 s += p;
415 *s6 = vld1q_u8(s);
416 s += p;
417 *s7 = vld1q_u8(s);
418 }
419
store_u8_16x8(uint8_t * s,const ptrdiff_t p,const uint8x16_t s0,const uint8x16_t s1,const uint8x16_t s2,const uint8x16_t s3,const uint8x16_t s4,const uint8x16_t s5,const uint8x16_t s6,const uint8x16_t s7)420 static INLINE void store_u8_16x8(uint8_t *s, const ptrdiff_t p,
421 const uint8x16_t s0, const uint8x16_t s1,
422 const uint8x16_t s2, const uint8x16_t s3,
423 const uint8x16_t s4, const uint8x16_t s5,
424 const uint8x16_t s6, const uint8x16_t s7) {
425 vst1q_u8(s, s0);
426 s += p;
427 vst1q_u8(s, s1);
428 s += p;
429 vst1q_u8(s, s2);
430 s += p;
431 vst1q_u8(s, s3);
432 s += p;
433 vst1q_u8(s, s4);
434 s += p;
435 vst1q_u8(s, s5);
436 s += p;
437 vst1q_u8(s, s6);
438 s += p;
439 vst1q_u8(s, s7);
440 }
441
load_u16_8x8(const uint16_t * s,const ptrdiff_t p,uint16x8_t * s0,uint16x8_t * s1,uint16x8_t * s2,uint16x8_t * s3,uint16x8_t * s4,uint16x8_t * s5,uint16x8_t * s6,uint16x8_t * s7)442 static INLINE void load_u16_8x8(const uint16_t *s, const ptrdiff_t p,
443 uint16x8_t *s0, uint16x8_t *s1, uint16x8_t *s2,
444 uint16x8_t *s3, uint16x8_t *s4, uint16x8_t *s5,
445 uint16x8_t *s6, uint16x8_t *s7) {
446 *s0 = vld1q_u16(s);
447 s += p;
448 *s1 = vld1q_u16(s);
449 s += p;
450 *s2 = vld1q_u16(s);
451 s += p;
452 *s3 = vld1q_u16(s);
453 s += p;
454 *s4 = vld1q_u16(s);
455 s += p;
456 *s5 = vld1q_u16(s);
457 s += p;
458 *s6 = vld1q_u16(s);
459 s += p;
460 *s7 = vld1q_u16(s);
461 }
462
463 #endif // VPX_VPX_DSP_ARM_MEM_NEON_H_
464