1 /*
2 * Copyright (c) 2019, Alliance for Open Media. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "config/aom_dsp_rtcd.h"
12 #include "config/aom_config.h"
13
14 #include "aom/aom_integer.h"
15 #include "aom_ports/mem.h"
16
horizontal_add_s16x8(const int16x8_t a)17 static INLINE int horizontal_add_s16x8(const int16x8_t a) {
18 #if defined(__aarch64__)
19 return vaddlvq_s16(a);
20 #else
21 const int32x4_t b = vpaddlq_s16(a);
22 const int64x2_t c = vpaddlq_s32(b);
23 const int32x2_t d = vadd_s32(vreinterpret_s32_s64(vget_low_s64(c)),
24 vreinterpret_s32_s64(vget_high_s64(c)));
25 return vget_lane_s32(d, 0);
26 #endif
27 }
28
horizontal_add_s32x4(const int32x4_t a)29 static INLINE int horizontal_add_s32x4(const int32x4_t a) {
30 #if defined(__aarch64__)
31 return vaddvq_s32(a);
32 #else
33 const int64x2_t b = vpaddlq_s32(a);
34 const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
35 vreinterpret_s32_s64(vget_high_s64(b)));
36 return vget_lane_s32(c, 0);
37 #endif
38 }
39
horizontal_add_u64x2(const uint64x2_t a)40 static INLINE uint64_t horizontal_add_u64x2(const uint64x2_t a) {
41 #if defined(__aarch64__)
42 return vaddvq_u64(a);
43 #else
44 return vgetq_lane_u64(a, 0) + vgetq_lane_u64(a, 1);
45 #endif
46 }
47
horizontal_long_add_u32x4(const uint32x4_t a)48 static INLINE uint64_t horizontal_long_add_u32x4(const uint32x4_t a) {
49 #if defined(__aarch64__)
50 return vaddlvq_u32(a);
51 #else
52 const uint64x2_t b = vpaddlq_u32(a);
53 return vgetq_lane_u64(b, 0) + vgetq_lane_u64(b, 1);
54 #endif
55 }
56
horizontal_add_u32x4(const uint32x4_t a)57 static INLINE unsigned int horizontal_add_u32x4(const uint32x4_t a) {
58 #if defined(__aarch64__)
59 return vaddvq_u32(a);
60 #else
61 const uint64x2_t b = vpaddlq_u32(a);
62 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
63 vreinterpret_u32_u64(vget_high_u64(b)));
64 return vget_lane_u32(c, 0);
65 #endif
66 }
67
horizontal_long_add_u16x8(const uint16x8_t vec_lo,const uint16x8_t vec_hi)68 static INLINE uint32_t horizontal_long_add_u16x8(const uint16x8_t vec_lo,
69 const uint16x8_t vec_hi) {
70 #if defined(__aarch64__)
71 return vaddlvq_u16(vec_lo) + vaddlvq_u16(vec_hi);
72 #else
73 const uint32x4_t vec_l_lo =
74 vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo));
75 const uint32x4_t vec_l_hi =
76 vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi));
77 const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi);
78 const uint64x2_t b = vpaddlq_u32(a);
79 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
80 vreinterpret_u32_u64(vget_high_u64(b)));
81 return vget_lane_u32(c, 0);
82 #endif
83 }
84
horizontal_add_u16x8(const uint16x8_t a)85 static INLINE uint32_t horizontal_add_u16x8(const uint16x8_t a) {
86 #if defined(__aarch64__)
87 return vaddlvq_u16(a);
88 #else
89 const uint32x4_t b = vpaddlq_u16(a);
90 const uint64x2_t c = vpaddlq_u32(b);
91 const uint32x2_t d = vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)),
92 vreinterpret_u32_u64(vget_high_u64(c)));
93 return vget_lane_u32(d, 0);
94 #endif
95 }
96
horizontal_add_u32x2(const uint32x2_t a)97 static INLINE uint32_t horizontal_add_u32x2(const uint32x2_t a) {
98 #if defined(__aarch64__)
99 return vaddv_u32(a);
100 #else
101 const uint64x1_t b = vpaddl_u32(a);
102 return vget_lane_u32(vreinterpret_u32_u64(b), 0);
103 #endif
104 }
105
horizontal_add_u16x4(const uint16x4_t a)106 static INLINE uint32_t horizontal_add_u16x4(const uint16x4_t a) {
107 #if defined(__aarch64__)
108 return vaddlv_u16(a);
109 #else
110 const uint32x2_t b = vpaddl_u16(a);
111 const uint64x1_t c = vpaddl_u32(b);
112 return vget_lane_u32(vreinterpret_u32_u64(c), 0);
113 #endif
114 }
115