1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #pragma once
7
8 #include <xnnpack/common.h>
9 #include <xnnpack/unaligned.h>
10
11
12 #if defined(__SSE2__)
13 #include <emmintrin.h>
14
15 // GCC pre-11, Clang pre-8, Android NDK Clang pre-8.0.7, Apple Clang pre-11, and ICC pre-16
16 #if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && __GNUC__ < 11) || \
17 (defined(__clang__) && !defined(__apple_build_version__) && (__clang_major__ < 8)) || \
18 (defined(__clang__) && defined(__ANDROID__) && (__clang_major__ == 8) && (__clang_minor__ == 0) && (__clang_patchlevel__ < 7)) || \
19 (defined(__clang__) && defined(__apple_build_version__) && (__apple_build_version__ < 11000000)) || \
20 (defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1600))
21
22 static XNN_INTRINSIC
_mm_storeu_si32(void * address,__m128i v)23 void _mm_storeu_si32(void* address, __m128i v) {
24 unaligned_store_u32(address, (uint32_t) _mm_cvtsi128_si32(v));
25 }
26
27 static XNN_INTRINSIC
_mm_storeu_si16(void * address,__m128i v)28 void _mm_storeu_si16(void* address, __m128i v) {
29 unaligned_store_u16(address, (uint16_t) _mm_extract_epi16(v, 0));
30 }
31 #endif // GCC pre-11, Clang pre-8, Android NDK Clang pre-8.0.7, Apple Clang pre-11, and ICC pre-16
32 #endif // SSE2
33
34 #ifdef __AVX512F__
35 #include <immintrin.h>
36
37 // GCC pre-7, Clang pre-8, Android NDK Clang pre-8.0.7, Apple Clang pre-11, ICC pre-18, and MSVC pre-2019
38 #if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 7)) || \
39 (defined(__clang__) && !defined(__apple_build_version__) && (__clang_major__ < 8)) || \
40 (defined(__clang__) && defined(__ANDROID__) && (__clang_major__ == 8) && (__clang_minor__ == 0) && (__clang_patchlevel__ < 7)) || \
41 (defined(__clang__) && defined(__apple_build_version__) && (__apple_build_version__ < 11000000)) || \
42 (defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1800)) || \
43 (defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__) && (_MSC_VER <= 1916))
44
45 static XNN_INTRINSIC
_cvtu32_mask16(unsigned int mask)46 __mmask16 _cvtu32_mask16(unsigned int mask) {
47 return (__mmask16) mask;
48 }
49
50 static XNN_INTRINSIC
_cvtu64_mask64(unsigned long long mask)51 __mmask64 _cvtu64_mask64(unsigned long long mask) {
52 return (__mmask64) mask;
53 }
54
55 static XNN_INTRINSIC
_kshiftli_mask64(__mmask64 a,unsigned int count)56 __mmask64 _kshiftli_mask64(__mmask64 a, unsigned int count) {
57 return (__mmask64) ((unsigned long long) a << count);
58 }
59
60 static XNN_INTRINSIC
_kshiftri_mask64(__mmask64 a,unsigned int count)61 __mmask64 _kshiftri_mask64(__mmask64 a, unsigned int count) {
62 return (__mmask64) ((unsigned long long) a >> count);
63 }
64
65 #endif // GCC pre-7, Clang pre-8, Android NDK Clang pre-8.0.7, Apple Clang pre-11, and ICC pre-18
66
67 // GCC pre-7, Clang pre-4, and ICC pre-18
68 #if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 7)) || \
69 (defined(__clang__) && (__clang_major__ < 4)) || \
70 (defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1800))
71
72 static XNN_INTRINSIC
_mm512_reduce_add_ps(__m512 v)73 float _mm512_reduce_add_ps(__m512 v) {
74 #if __AVX512DQ__
75 const __m256 sum2 = _mm256_add_ps(_mm512_castps512_ps256(v), _mm512_extractf32x8_ps(v, 1));
76 #else
77 const __m256 sum2 = _mm256_add_ps(_mm512_castps512_ps256(v), _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(v), 1)));
78 #endif
79 const __m128 sum4 = _mm_add_ps(_mm256_castps256_ps128(sum2), _mm256_extractf128_ps(sum2, 1));
80 const __m128 sum8 = _mm_add_ps(sum4, _mm_movehl_ps(sum4, sum4));
81 const __m128 sum16 = _mm_add_ss(sum8, _mm_movehdup_ps(sum8));
82 return _mm_cvtss_f32(sum16);
83 }
84
85 static XNN_INTRINSIC
_mm512_reduce_max_ps(__m512 v)86 float _mm512_reduce_max_ps(__m512 v) {
87 #if __AVX512DQ__
88 const __m256 sum2 = _mm256_max_ps(_mm512_castps512_ps256(v), _mm512_extractf32x8_ps(v, 1));
89 #else
90 const __m256 sum2 = _mm256_max_ps(_mm512_castps512_ps256(v), _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(v), 1)));
91 #endif
92 const __m128 sum4 = _mm_max_ps(_mm256_castps256_ps128(sum2), _mm256_extractf128_ps(sum2, 1));
93 const __m128 sum8 = _mm_max_ps(sum4, _mm_movehl_ps(sum4, sum4));
94 const __m128 sum16 = _mm_max_ss(sum8, _mm_movehdup_ps(sum8));
95 return _mm_cvtss_f32(sum16);
96 }
97
98 #endif // GCC pre-7, Clang pre-4, and ICC pre-18
99
100 #if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 9)
101 static XNN_INTRINSIC
_mm512_set_epi8(char e63,char e62,char e61,char e60,char e59,char e58,char e57,char e56,char e55,char e54,char e53,char e52,char e51,char e50,char e49,char e48,char e47,char e46,char e45,char e44,char e43,char e42,char e41,char e40,char e39,char e38,char e37,char e36,char e35,char e34,char e33,char e32,char e31,char e30,char e29,char e28,char e27,char e26,char e25,char e24,char e23,char e22,char e21,char e20,char e19,char e18,char e17,char e16,char e15,char e14,char e13,char e12,char e11,char e10,char e09,char e08,char e07,char e06,char e05,char e04,char e03,char e02,char e01,char e00)102 __m512i _mm512_set_epi8(
103 char e63, char e62, char e61, char e60,
104 char e59, char e58, char e57, char e56,
105 char e55, char e54, char e53, char e52,
106 char e51, char e50, char e49, char e48,
107 char e47, char e46, char e45, char e44,
108 char e43, char e42, char e41, char e40,
109 char e39, char e38, char e37, char e36,
110 char e35, char e34, char e33, char e32,
111 char e31, char e30, char e29, char e28,
112 char e27, char e26, char e25, char e24,
113 char e23, char e22, char e21, char e20,
114 char e19, char e18, char e17, char e16,
115 char e15, char e14, char e13, char e12,
116 char e11, char e10, char e09, char e08,
117 char e07, char e06, char e05, char e04,
118 char e03, char e02, char e01, char e00)
119 {
120 return (__m512i) (__v64qi) {
121 e00, e01, e02, e03, e04, e05, e06, e07,
122 e08, e09, e10, e11, e12, e13, e14, e15,
123 e16, e17, e18, e19, e20, e21, e22, e23,
124 e24, e25, e26, e27, e28, e29, e30, e31,
125 e32, e33, e34, e35, e36, e37, e38, e39,
126 e40, e41, e42, e43, e44, e45, e46, e47,
127 e48, e49, e50, e51, e52, e53, e54, e55,
128 e56, e57, e58, e59, e60, e61, e62, e63
129 };
130 }
131 #endif // GCC pre-9
132
133 #endif // __AVX512F__
134
135 #if XNN_ARCH_ARM
136
137 // AArch32 GCC 10+ implements arm_acle.h header, but lacks __ror intrinsic
138 #if defined(__GNUC__) && !defined(__clang__)
__ror(uint32_t x,uint32_t y)139 static XNN_INTRINSIC uint32_t __ror(uint32_t x, uint32_t y) {
140 return (x >> y) | (x << (32 - y));
141 }
142 #endif // AArch32 GCC
143
144 #endif // ARM
145
146 #if XNN_ARCH_ARM && (defined(__ARM_NEON) || defined(__ARM_NEON__))
147 #include <arm_neon.h>
148
149 // AArch32 GCC targeting ARMv8 NEON, see
150 // - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71233
151 // - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95399
152 #if defined(__GNUC__) && !defined(__clang__) && (__ARM_ARCH >= 8)
153 static XNN_INTRINSIC
vcvtnq_s32_f32(float32x4_t v)154 int32x4_t vcvtnq_s32_f32(float32x4_t v) {
155 return vcvtq_s32_f32(vrndnq_f32(v));
156 }
157 #endif // AArch32 GCC targeting ARMv8 NEON
158
159 #endif // ARM NEON
160
161 #if XNN_ARCH_ARM64
162 #include <arm_neon.h>
163
164 // AArch64 GCC pre-8, 8.1-8.4, 9.1-9.3
165 #if defined(__GNUC__) && !defined(__clang__) && \
166 (__GNUC__ < 8 || __GNUC__ == 8 && __GNUC_MINOR__ < 5 || __GNUC__ == 9 && __GNUC_MINOR__ < 4)
167 static XNN_INTRINSIC
vld1q_u8_x4(const uint8_t * address)168 uint8x16x4_t vld1q_u8_x4(const uint8_t* address) {
169 uint8x16x4_t result;
170 result.val[0] = vld1q_u8(address);
171 result.val[1] = vld1q_u8(address + 16);
172 result.val[2] = vld1q_u8(address + 32);
173 result.val[3] = vld1q_u8(address + 48);
174 return result;
175 }
176 #endif // AArch64 GCC pre-8, 8.1-8.4, 9.1-9.3
177
178 #endif // ARM64 NEON
179