• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2017 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #ifndef VPX_VPX_DSP_ARM_MEM_NEON_H_
12 #define VPX_VPX_DSP_ARM_MEM_NEON_H_
13 
14 #include <arm_neon.h>
15 #include <assert.h>
16 #include <string.h>
17 
18 #include "./vpx_config.h"
19 #include "vpx/vpx_integer.h"
20 #include "vpx_dsp/vpx_dsp_common.h"
21 
create_s16x4_neon(const int16_t c0,const int16_t c1,const int16_t c2,const int16_t c3)22 static INLINE int16x4_t create_s16x4_neon(const int16_t c0, const int16_t c1,
23                                           const int16_t c2, const int16_t c3) {
24   return vcreate_s16((uint16_t)c0 | ((uint32_t)c1 << 16) |
25                      ((int64_t)(uint16_t)c2 << 32) | ((int64_t)c3 << 48));
26 }
27 
create_s32x2_neon(const int32_t c0,const int32_t c1)28 static INLINE int32x2_t create_s32x2_neon(const int32_t c0, const int32_t c1) {
29   return vcreate_s32((uint32_t)c0 | ((int64_t)(uint32_t)c1 << 32));
30 }
31 
create_s32x4_neon(const int32_t c0,const int32_t c1,const int32_t c2,const int32_t c3)32 static INLINE int32x4_t create_s32x4_neon(const int32_t c0, const int32_t c1,
33                                           const int32_t c2, const int32_t c3) {
34   return vcombine_s32(create_s32x2_neon(c0, c1), create_s32x2_neon(c2, c3));
35 }
36 
37 // Helper functions used to load tran_low_t into int16, narrowing if necessary.
load_tran_low_to_s16x2q(const tran_low_t * buf)38 static INLINE int16x8x2_t load_tran_low_to_s16x2q(const tran_low_t *buf) {
39 #if CONFIG_VP9_HIGHBITDEPTH
40   const int32x4x2_t v0 = vld2q_s32(buf);
41   const int32x4x2_t v1 = vld2q_s32(buf + 8);
42   const int16x4_t s0 = vmovn_s32(v0.val[0]);
43   const int16x4_t s1 = vmovn_s32(v0.val[1]);
44   const int16x4_t s2 = vmovn_s32(v1.val[0]);
45   const int16x4_t s3 = vmovn_s32(v1.val[1]);
46   int16x8x2_t res;
47   res.val[0] = vcombine_s16(s0, s2);
48   res.val[1] = vcombine_s16(s1, s3);
49   return res;
50 #else
51   return vld2q_s16(buf);
52 #endif
53 }
54 
load_tran_low_to_s16q(const tran_low_t * buf)55 static INLINE int16x8_t load_tran_low_to_s16q(const tran_low_t *buf) {
56 #if CONFIG_VP9_HIGHBITDEPTH
57   const int32x4_t v0 = vld1q_s32(buf);
58   const int32x4_t v1 = vld1q_s32(buf + 4);
59   const int16x4_t s0 = vmovn_s32(v0);
60   const int16x4_t s1 = vmovn_s32(v1);
61   return vcombine_s16(s0, s1);
62 #else
63   return vld1q_s16(buf);
64 #endif
65 }
66 
load_tran_low_to_s16d(const tran_low_t * buf)67 static INLINE int16x4_t load_tran_low_to_s16d(const tran_low_t *buf) {
68 #if CONFIG_VP9_HIGHBITDEPTH
69   const int32x4_t v0 = vld1q_s32(buf);
70   return vmovn_s32(v0);
71 #else
72   return vld1_s16(buf);
73 #endif
74 }
75 
store_s16q_to_tran_low(tran_low_t * buf,const int16x8_t a)76 static INLINE void store_s16q_to_tran_low(tran_low_t *buf, const int16x8_t a) {
77 #if CONFIG_VP9_HIGHBITDEPTH
78   const int32x4_t v0 = vmovl_s16(vget_low_s16(a));
79   const int32x4_t v1 = vmovl_s16(vget_high_s16(a));
80   vst1q_s32(buf, v0);
81   vst1q_s32(buf + 4, v1);
82 #else
83   vst1q_s16(buf, a);
84 #endif
85 }
86 
87 // Propagate type information to the compiler. Without this the compiler may
88 // assume the required alignment of uint32_t (4 bytes) and add alignment hints
89 // to the memory access.
90 //
91 // This is used for functions operating on uint8_t which wish to load or store 4
92 // values at a time but which may not be on 4 byte boundaries.
uint32_to_mem(uint8_t * buf,uint32_t a)93 static INLINE void uint32_to_mem(uint8_t *buf, uint32_t a) {
94   memcpy(buf, &a, 4);
95 }
96 
97 // Load 2 sets of 4 bytes when alignment is not guaranteed.
load_unaligned_u8(const uint8_t * buf,int stride)98 static INLINE uint8x8_t load_unaligned_u8(const uint8_t *buf, int stride) {
99   uint32_t a;
100   uint32x2_t a_u32 = vdup_n_u32(0);
101   if (stride == 4) return vld1_u8(buf);
102   memcpy(&a, buf, 4);
103   buf += stride;
104   a_u32 = vset_lane_u32(a, a_u32, 0);
105   memcpy(&a, buf, 4);
106   a_u32 = vset_lane_u32(a, a_u32, 1);
107   return vreinterpret_u8_u32(a_u32);
108 }
109 
110 // Store 2 sets of 4 bytes when alignment is not guaranteed.
store_unaligned_u8(uint8_t * buf,int stride,const uint8x8_t a)111 static INLINE void store_unaligned_u8(uint8_t *buf, int stride,
112                                       const uint8x8_t a) {
113   const uint32x2_t a_u32 = vreinterpret_u32_u8(a);
114   if (stride == 4) {
115     vst1_u8(buf, a);
116     return;
117   }
118   uint32_to_mem(buf, vget_lane_u32(a_u32, 0));
119   buf += stride;
120   uint32_to_mem(buf, vget_lane_u32(a_u32, 1));
121 }
122 
123 // Load 4 sets of 4 bytes when alignment is not guaranteed.
load_unaligned_u8q(const uint8_t * buf,int stride)124 static INLINE uint8x16_t load_unaligned_u8q(const uint8_t *buf, int stride) {
125   uint32_t a;
126   uint32x4_t a_u32 = vdupq_n_u32(0);
127   if (stride == 4) return vld1q_u8(buf);
128   memcpy(&a, buf, 4);
129   buf += stride;
130   a_u32 = vsetq_lane_u32(a, a_u32, 0);
131   memcpy(&a, buf, 4);
132   buf += stride;
133   a_u32 = vsetq_lane_u32(a, a_u32, 1);
134   memcpy(&a, buf, 4);
135   buf += stride;
136   a_u32 = vsetq_lane_u32(a, a_u32, 2);
137   memcpy(&a, buf, 4);
138   buf += stride;
139   a_u32 = vsetq_lane_u32(a, a_u32, 3);
140   return vreinterpretq_u8_u32(a_u32);
141 }
142 
143 // Store 4 sets of 4 bytes when alignment is not guaranteed.
store_unaligned_u8q(uint8_t * buf,int stride,const uint8x16_t a)144 static INLINE void store_unaligned_u8q(uint8_t *buf, int stride,
145                                        const uint8x16_t a) {
146   const uint32x4_t a_u32 = vreinterpretq_u32_u8(a);
147   if (stride == 4) {
148     vst1q_u8(buf, a);
149     return;
150   }
151   uint32_to_mem(buf, vgetq_lane_u32(a_u32, 0));
152   buf += stride;
153   uint32_to_mem(buf, vgetq_lane_u32(a_u32, 1));
154   buf += stride;
155   uint32_to_mem(buf, vgetq_lane_u32(a_u32, 2));
156   buf += stride;
157   uint32_to_mem(buf, vgetq_lane_u32(a_u32, 3));
158 }
159 
160 // Load 2 sets of 4 bytes when alignment is guaranteed.
load_u8(const uint8_t * buf,int stride)161 static INLINE uint8x8_t load_u8(const uint8_t *buf, int stride) {
162   uint32x2_t a = vdup_n_u32(0);
163 
164   assert(!((intptr_t)buf % sizeof(uint32_t)));
165   assert(!(stride % sizeof(uint32_t)));
166 
167   a = vld1_lane_u32((const uint32_t *)buf, a, 0);
168   buf += stride;
169   a = vld1_lane_u32((const uint32_t *)buf, a, 1);
170   return vreinterpret_u8_u32(a);
171 }
172 
173 // Store 2 sets of 4 bytes when alignment is guaranteed.
store_u8(uint8_t * buf,int stride,const uint8x8_t a)174 static INLINE void store_u8(uint8_t *buf, int stride, const uint8x8_t a) {
175   uint32x2_t a_u32 = vreinterpret_u32_u8(a);
176 
177   assert(!((intptr_t)buf % sizeof(uint32_t)));
178   assert(!(stride % sizeof(uint32_t)));
179 
180   vst1_lane_u32((uint32_t *)buf, a_u32, 0);
181   buf += stride;
182   vst1_lane_u32((uint32_t *)buf, a_u32, 1);
183 }
184 #endif  // VPX_VPX_DSP_ARM_MEM_NEON_H_
185