• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #ifndef VPX_VPX_DSP_ARM_MEM_NEON_H_
12 #define VPX_VPX_DSP_ARM_MEM_NEON_H_
13 
14 #include <arm_neon.h>
15 #include <assert.h>
16 #include <string.h>
17 
18 #include "./vpx_config.h"
19 #include "vpx/vpx_integer.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21 
22 // Support for these xN intrinsics is lacking in older versions of GCC.
23 #if defined(__GNUC__) && !defined(__clang__)
24 #if __GNUC__ < 8 || defined(__arm__)
vld1q_u8_x2(uint8_t const * ptr)25 static INLINE uint8x16x2_t vld1q_u8_x2(uint8_t const *ptr) {
26   uint8x16x2_t res = { { vld1q_u8(ptr + 0 * 16), vld1q_u8(ptr + 1 * 16) } };
27   return res;
28 }
29 #endif
30 
31 #if __GNUC__ < 9 || defined(__arm__)
vld1q_u8_x3(uint8_t const * ptr)32 static INLINE uint8x16x3_t vld1q_u8_x3(uint8_t const *ptr) {
33   uint8x16x3_t res = { { vld1q_u8(ptr + 0 * 16), vld1q_u8(ptr + 1 * 16),
34                          vld1q_u8(ptr + 2 * 16) } };
35   return res;
36 }
37 #endif
38 #endif
39 
create_s16x4_neon(const int16_t c0,const int16_t c1,const int16_t c2,const int16_t c3)40 static INLINE int16x4_t create_s16x4_neon(const int16_t c0, const int16_t c1,
41                                           const int16_t c2, const int16_t c3) {
42   return vcreate_s16((uint16_t)c0 | ((uint32_t)c1 << 16) |
43                      ((int64_t)(uint16_t)c2 << 32) | ((int64_t)c3 << 48));
44 }
45 
create_s32x2_neon(const int32_t c0,const int32_t c1)46 static INLINE int32x2_t create_s32x2_neon(const int32_t c0, const int32_t c1) {
47   return vcreate_s32((uint32_t)c0 | ((int64_t)(uint32_t)c1 << 32));
48 }
49 
create_s32x4_neon(const int32_t c0,const int32_t c1,const int32_t c2,const int32_t c3)50 static INLINE int32x4_t create_s32x4_neon(const int32_t c0, const int32_t c1,
51                                           const int32_t c2, const int32_t c3) {
52   return vcombine_s32(create_s32x2_neon(c0, c1), create_s32x2_neon(c2, c3));
53 }
54 
55 // Helper functions used to load tran_low_t into int16, narrowing if necessary.
load_tran_low_to_s16x2q(const tran_low_t * buf)56 static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) {
57 #if CONFIG_VP9_HIGHBITDEPTH
58   const int32x4x2_t v0 = vld2q_s32(buf);
59   const int32x4x2_t v1 = vld2q_s32(buf + 8);
60   const int16x4_t s0 = vmovn_s32(v0.val[0]);
61   const int16x4_t s1 = vmovn_s32(v0.val[1]);
62   const int16x4_t s2 = vmovn_s32(v1.val[0]);
63   const int16x4_t s3 = vmovn_s32(v1.val[1]);
64   int16x8x2_t res;
65   res.val[0] = vcombine_s16(s0, s2);
66   res.val[1] = vcombine_s16(s1, s3);
67   return res;
68 #else
69   return vld2q_s16(buf);
70 #endif
71 }
72 
load_tran_low_to_s16q(const tran_low_t * buf)73 static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
74 #if CONFIG_VP9_HIGHBITDEPTH
75   const int32x4_t v0 = vld1q_s32(buf);
76   const int32x4_t v1 = vld1q_s32(buf + 4);
77   const int16x4_t s0 = vmovn_s32(v0);
78   const int16x4_t s1 = vmovn_s32(v1);
79   return vcombine_s16(s0, s1);
80 #else
81   return vld1q_s16(buf);
82 #endif
83 }
84 
load_tran_low_to_s16d(const tran_low_t * buf)85 static INLINE int16x4_t load_tran_low_to_s16d(const tran_low_t *buf) {
86 #if CONFIG_VP9_HIGHBITDEPTH
87   const int32x4_t v0 = vld1q_s32(buf);
88   return vmovn_s32(v0);
89 #else
90   return vld1_s16(buf);
91 #endif
92 }
93 
store_s16q_to_tran_low(tran_low_t * buf,const int16x8_t a)94 static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) {
95 #if CONFIG_VP9_HIGHBITDEPTH
96   const int32x4_t v0 = vmovl_s16(vget_low_s16(a));
97   const int32x4_t v1 = vmovl_s16(vget_high_s16(a));
98   vst1q_s32(buf, v0);
99   vst1q_s32(buf + 4, v1);
100 #else
101   vst1q_s16(buf, a);
102 #endif
103 }
104 
105 // Propagate type information to the compiler. Without this the compiler may
106 // assume the required alignment of uint32_t (4 bytes) and add alignment hints
107 // to the memory access.
108 //
109 // This is used for functions operating on uint8_t which wish to load or store 4
110 // values at a time but which may not be on 4 byte boundaries.
uint32_to_mem(uint8_t * buf,uint32_t a)111 static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) {
112   memcpy(buf, &a, 4);
113 }
114 
115 // Load 2 sets of 4 bytes when alignment is not guaranteed.
load_unaligned_u8(const uint8_t * buf,ptrdiff_t stride)116 static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf,
117                                           ptrdiff_t stride) {
118   uint32_t a;
119   uint32x2_t a_u32;
120   if (stride == 4) return vld1_u8(buf);
121   memcpy(&a, buf, 4);
122   buf += stride;
123   a_u32 = vdup_n_u32(a);
124   memcpy(&a, buf, 4);
125   a_u32 = vset_lane_u32(a, a_u32, 1);
126   return vreinterpret_u8_u32(a_u32);
127 }
128 
129 // Store 2 sets of 4 bytes when alignment is not guaranteed.
store_unaligned_u8(uint8_t * buf,ptrdiff_t stride,const uint8x8_t a)130 static INLINE void store_unaligned_u8(uint8_t *buf, ptrdiff_t stride,
131                                       const uint8x8_t a) {
132   const uint32x2_t a_u32 = vreinterpret_u32_u8(a);
133   if (stride == 4) {
134     vst1_u8(buf, a);
135     return;
136   }
137   uint32_to_mem(buf, vget_lane_u32(a_u32, 0));
138   buf += stride;
139   uint32_to_mem(buf, vget_lane_u32(a_u32, 1));
140 }
141 
142 // Load 4 sets of 4 bytes when alignment is not guaranteed.
load_unaligned_u8q(const uint8_t * buf,ptrdiff_t stride)143 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf,
144                                             ptrdiff_t stride) {
145   uint32_t a;
146   uint32x4_t a_u32;
147   if (stride == 4) return vld1q_u8(buf);
148   memcpy(&a, buf, 4);
149   buf += stride;
150   a_u32 = vdupq_n_u32(a);
151   memcpy(&a, buf, 4);
152   buf += stride;
153   a_u32 = vsetq_lane_u32(a, a_u32, 1);
154   memcpy(&a, buf, 4);
155   buf += stride;
156   a_u32 = vsetq_lane_u32(a, a_u32, 2);
157   memcpy(&a, buf, 4);
158   buf += stride;
159   a_u32 = vsetq_lane_u32(a, a_u32, 3);
160   return vreinterpretq_u8_u32(a_u32);
161 }
162 
163 // Store 4 sets of 4 bytes when alignment is not guaranteed.
store_unaligned_u8q(uint8_t * buf,ptrdiff_t stride,const uint8x16_t a)164 static INLINE void store_unaligned_u8q(uint8_t *buf, ptrdiff_t stride,
165                                        const uint8x16_t a) {
166   const uint32x4_t a_u32 = vreinterpretq_u32_u8(a);
167   if (stride == 4) {
168     vst1q_u8(buf, a);
169     return;
170   }
171   uint32_to_mem(buf, vgetq_lane_u32(a_u32, 0));
172   buf += stride;
173   uint32_to_mem(buf, vgetq_lane_u32(a_u32, 1));
174   buf += stride;
175   uint32_to_mem(buf, vgetq_lane_u32(a_u32, 2));
176   buf += stride;
177   uint32_to_mem(buf, vgetq_lane_u32(a_u32, 3));
178 }
179 
180 // Load 2 sets of 4 bytes when alignment is guaranteed.
load_u8(const uint8_t * buf,ptrdiff_t stride)181 static INLINE uint8x8_t load_u8(const uint8_t *buf, ptrdiff_t stride) {
182   uint32x2_t a = vdup_n_u32(0);
183 
184   assert(!((intptr_t)buf % sizeof(uint32_t)));
185   assert(!(stride % sizeof(uint32_t)));
186 
187   a = vld1_lane_u32((const uint32_t *)buf, a, 0);
188   buf += stride;
189   a = vld1_lane_u32((const uint32_t *)buf, a, 1);
190   return vreinterpret_u8_u32(a);
191 }
192 
193 // Store 2 sets of 4 bytes when alignment is guaranteed.
store_u8(uint8_t * buf,ptrdiff_t stride,const uint8x8_t a)194 static INLINE void store_u8(uint8_t *buf, ptrdiff_t stride, const uint8x8_t a) {
195   uint32x2_t a_u32 = vreinterpret_u32_u8(a);
196 
197   assert(!((intptr_t)buf % sizeof(uint32_t)));
198   assert(!(stride % sizeof(uint32_t)));
199 
200   vst1_lane_u32((uint32_t *)buf, a_u32, 0);
201   buf += stride;
202   vst1_lane_u32((uint32_t *)buf, a_u32, 1);
203 }
204 
load_u8_8x4(const uint8_t * s,const ptrdiff_t p,uint8x8_t * const s0,uint8x8_t * const s1,uint8x8_t * const s2,uint8x8_t * const s3)205 static INLINE void load_u8_8x4(const uint8_t *s, const ptrdiff_t p,
206                                uint8x8_t *const s0, uint8x8_t *const s1,
207                                uint8x8_t *const s2, uint8x8_t *const s3) {
208   *s0 = vld1_u8(s);
209   s += p;
210   *s1 = vld1_u8(s);
211   s += p;
212   *s2 = vld1_u8(s);
213   s += p;
214   *s3 = vld1_u8(s);
215 }
216 
store_u8_8x4(uint8_t * s,const ptrdiff_t p,const uint8x8_t s0,const uint8x8_t s1,const uint8x8_t s2,const uint8x8_t s3)217 static INLINE void store_u8_8x4(uint8_t *s, const ptrdiff_t p,
218                                 const uint8x8_t s0, const uint8x8_t s1,
219                                 const uint8x8_t s2, const uint8x8_t s3) {
220   vst1_u8(s, s0);
221   s += p;
222   vst1_u8(s, s1);
223   s += p;
224   vst1_u8(s, s2);
225   s += p;
226   vst1_u8(s, s3);
227 }
228 
load_u8_16x4(const uint8_t * s,const ptrdiff_t p,uint8x16_t * const s0,uint8x16_t * const s1,uint8x16_t * const s2,uint8x16_t * const s3)229 static INLINE void load_u8_16x4(const uint8_t *s, const ptrdiff_t p,
230                                 uint8x16_t *const s0, uint8x16_t *const s1,
231                                 uint8x16_t *const s2, uint8x16_t *const s3) {
232   *s0 = vld1q_u8(s);
233   s += p;
234   *s1 = vld1q_u8(s);
235   s += p;
236   *s2 = vld1q_u8(s);
237   s += p;
238   *s3 = vld1q_u8(s);
239 }
240 
store_u8_16x4(uint8_t * s,const ptrdiff_t p,const uint8x16_t s0,const uint8x16_t s1,const uint8x16_t s2,const uint8x16_t s3)241 static INLINE void store_u8_16x4(uint8_t *s, const ptrdiff_t p,
242                                  const uint8x16_t s0, const uint8x16_t s1,
243                                  const uint8x16_t s2, const uint8x16_t s3) {
244   vst1q_u8(s, s0);
245   s += p;
246   vst1q_u8(s, s1);
247   s += p;
248   vst1q_u8(s, s2);
249   s += p;
250   vst1q_u8(s, s3);
251 }
252 
load_u8_8x7(const uint8_t * s,const ptrdiff_t p,uint8x8_t * const s0,uint8x8_t * const s1,uint8x8_t * const s2,uint8x8_t * const s3,uint8x8_t * const s4,uint8x8_t * const s5,uint8x8_t * const s6)253 static INLINE void load_u8_8x7(const uint8_t *s, const ptrdiff_t p,
254                                uint8x8_t *const s0, uint8x8_t *const s1,
255                                uint8x8_t *const s2, uint8x8_t *const s3,
256                                uint8x8_t *const s4, uint8x8_t *const s5,
257                                uint8x8_t *const s6) {
258   *s0 = vld1_u8(s);
259   s += p;
260   *s1 = vld1_u8(s);
261   s += p;
262   *s2 = vld1_u8(s);
263   s += p;
264   *s3 = vld1_u8(s);
265   s += p;
266   *s4 = vld1_u8(s);
267   s += p;
268   *s5 = vld1_u8(s);
269   s += p;
270   *s6 = vld1_u8(s);
271 }
272 
load_u8_8x8(const uint8_t * s,const ptrdiff_t p,uint8x8_t * const s0,uint8x8_t * const s1,uint8x8_t * const s2,uint8x8_t * const s3,uint8x8_t * const s4,uint8x8_t * const s5,uint8x8_t * const s6,uint8x8_t * const s7)273 static INLINE void load_u8_8x8(const uint8_t *s, const ptrdiff_t p,
274                                uint8x8_t *const s0, uint8x8_t *const s1,
275                                uint8x8_t *const s2, uint8x8_t *const s3,
276                                uint8x8_t *const s4, uint8x8_t *const s5,
277                                uint8x8_t *const s6, uint8x8_t *const s7) {
278   *s0 = vld1_u8(s);
279   s += p;
280   *s1 = vld1_u8(s);
281   s += p;
282   *s2 = vld1_u8(s);
283   s += p;
284   *s3 = vld1_u8(s);
285   s += p;
286   *s4 = vld1_u8(s);
287   s += p;
288   *s5 = vld1_u8(s);
289   s += p;
290   *s6 = vld1_u8(s);
291   s += p;
292   *s7 = vld1_u8(s);
293 }
294 
store_u8_8x8(uint8_t * s,const ptrdiff_t p,const uint8x8_t s0,const uint8x8_t s1,const uint8x8_t s2,const uint8x8_t s3,const uint8x8_t s4,const uint8x8_t s5,const uint8x8_t s6,const uint8x8_t s7)295 static INLINE void store_u8_8x8(uint8_t *s, const ptrdiff_t p,
296                                 const uint8x8_t s0, const uint8x8_t s1,
297                                 const uint8x8_t s2, const uint8x8_t s3,
298                                 const uint8x8_t s4, const uint8x8_t s5,
299                                 const uint8x8_t s6, const uint8x8_t s7) {
300   vst1_u8(s, s0);
301   s += p;
302   vst1_u8(s, s1);
303   s += p;
304   vst1_u8(s, s2);
305   s += p;
306   vst1_u8(s, s3);
307   s += p;
308   vst1_u8(s, s4);
309   s += p;
310   vst1_u8(s, s5);
311   s += p;
312   vst1_u8(s, s6);
313   s += p;
314   vst1_u8(s, s7);
315 }
316 
load_u8_16x8(const uint8_t * s,const ptrdiff_t p,uint8x16_t * const s0,uint8x16_t * const s1,uint8x16_t * const s2,uint8x16_t * const s3,uint8x16_t * const s4,uint8x16_t * const s5,uint8x16_t * const s6,uint8x16_t * const s7)317 static INLINE void load_u8_16x8(const uint8_t *s, const ptrdiff_t p,
318                                 uint8x16_t *const s0, uint8x16_t *const s1,
319                                 uint8x16_t *const s2, uint8x16_t *const s3,
320                                 uint8x16_t *const s4, uint8x16_t *const s5,
321                                 uint8x16_t *const s6, uint8x16_t *const s7) {
322   *s0 = vld1q_u8(s);
323   s += p;
324   *s1 = vld1q_u8(s);
325   s += p;
326   *s2 = vld1q_u8(s);
327   s += p;
328   *s3 = vld1q_u8(s);
329   s += p;
330   *s4 = vld1q_u8(s);
331   s += p;
332   *s5 = vld1q_u8(s);
333   s += p;
334   *s6 = vld1q_u8(s);
335   s += p;
336   *s7 = vld1q_u8(s);
337 }
338 
store_u8_16x8(uint8_t * s,const ptrdiff_t p,const uint8x16_t s0,const uint8x16_t s1,const uint8x16_t s2,const uint8x16_t s3,const uint8x16_t s4,const uint8x16_t s5,const uint8x16_t s6,const uint8x16_t s7)339 static INLINE void store_u8_16x8(uint8_t *s, const ptrdiff_t p,
340                                  const uint8x16_t s0, const uint8x16_t s1,
341                                  const uint8x16_t s2, const uint8x16_t s3,
342                                  const uint8x16_t s4, const uint8x16_t s5,
343                                  const uint8x16_t s6, const uint8x16_t s7) {
344   vst1q_u8(s, s0);
345   s += p;
346   vst1q_u8(s, s1);
347   s += p;
348   vst1q_u8(s, s2);
349   s += p;
350   vst1q_u8(s, s3);
351   s += p;
352   vst1q_u8(s, s4);
353   s += p;
354   vst1q_u8(s, s5);
355   s += p;
356   vst1q_u8(s, s6);
357   s += p;
358   vst1q_u8(s, s7);
359 }
360 
361 #endif  // VPX_VPX_DSP_ARM_MEM_NEON_H_
362