• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #pragma once
7 
8 #include <xnnpack/common.h>
9 
10 
11 #if defined(__SSE2__)
12 #include <emmintrin.h>
13 
14 // GCC pre-11, Clang pre-8, Android NDK Clang pre-8.0.7, Apple Clang pre-11, and ICC pre-16
15 #if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && __GNUC__ < 11) || \
16     (defined(__clang__) && !defined(__apple_build_version__) && (__clang_major__ < 8)) || \
17     (defined(__clang__) && defined(__ANDROID__) && (__clang_major__ == 8) && (__clang_minor__ == 0) && (__clang_patchlevel__ < 7)) || \
18     (defined(__clang__) && defined(__apple_build_version__) && (__apple_build_version__ < 11000000)) || \
19     (defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1600))
20 
21 static XNN_INTRINSIC
_mm_loadu_si32(const void * address)22 __m128i _mm_loadu_si32(const void* address) {
23   return _mm_cvtsi32_si128(*((const int*) address));
24 }
25 
26 static XNN_INTRINSIC
_mm_storeu_si32(const void * address,__m128i v)27 void _mm_storeu_si32(const void* address, __m128i v) {
28   *((int*) address) = _mm_cvtsi128_si32(v);
29 }
30 #endif  // GCC pre-11, Clang pre-8, Android NDK Clang pre-8.0.7, Apple Clang pre-11, and ICC pre-16
31 #endif  // SSE2
32 
33 #ifdef __AVX512F__
34 #include <immintrin.h>
35 
36 // GCC pre-7, Clang pre-8, Android NDK Clang pre-8.0.7, Apple Clang pre-11, ICC pre-18, and MSVC pre-2019
37 #if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 7)) || \
38     (defined(__clang__) && !defined(__apple_build_version__) && (__clang_major__ < 8)) || \
39     (defined(__clang__) && defined(__ANDROID__) && (__clang_major__ == 8) && (__clang_minor__ == 0) && (__clang_patchlevel__ < 7)) || \
40     (defined(__clang__) && defined(__apple_build_version__) && (__apple_build_version__ < 11000000)) || \
41     (defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1800)) || \
42     (defined(_MSC_VER) && !defined(__clang__) && !defined(__GNUC__) && (_MSC_VER <= 1916))
43 
44 static XNN_INTRINSIC
_cvtu32_mask16(unsigned int mask)45 __mmask16 _cvtu32_mask16(unsigned int mask) {
46   return (__mmask16) mask;
47 }
48 
49 static XNN_INTRINSIC
_cvtu64_mask64(unsigned long long mask)50 __mmask64 _cvtu64_mask64(unsigned long long mask) {
51   return (__mmask64) mask;
52 }
53 
54 static XNN_INTRINSIC
_kshiftli_mask64(__mmask64 a,unsigned int count)55 __mmask64 _kshiftli_mask64(__mmask64 a, unsigned int count) {
56   return (__mmask64) ((unsigned long long) a << count);
57 }
58 
59 static XNN_INTRINSIC
_kshiftri_mask64(__mmask64 a,unsigned int count)60 __mmask64 _kshiftri_mask64(__mmask64 a, unsigned int count) {
61   return (__mmask64) ((unsigned long long) a >> count);
62 }
63 
64 #endif  // GCC pre-7, Clang pre-8, Android NDK Clang pre-8.0.7, Apple Clang pre-11, and ICC pre-18
65 
66 // GCC pre-7, Clang pre-4, and ICC pre-18
67 #if (defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 7)) || \
68     (defined(__clang__) && (__clang_major__ < 4)) || \
69     (defined(__INTEL_COMPILER) && (__INTEL_COMPILER < 1800))
70 
71 static XNN_INTRINSIC
_mm512_reduce_add_ps(__m512 v)72 float _mm512_reduce_add_ps(__m512 v) {
73 #if __AVX512DQ__
74   const __m256 sum2 = _mm256_add_ps(_mm512_castps512_ps256(v), _mm512_extractf32x8_ps(v, 1));
75 #else
76   const __m256 sum2 = _mm256_add_ps(_mm512_castps512_ps256(v), _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(v), 1)));
77 #endif
78   const __m128 sum4 = _mm_add_ps(_mm256_castps256_ps128(sum2), _mm256_extractf128_ps(sum2, 1));
79   const __m128 sum8 = _mm_add_ps(sum4, _mm_movehl_ps(sum4, sum4));
80   const __m128 sum16 = _mm_add_ss(sum8, _mm_movehdup_ps(sum8));
81   return _mm_cvtss_f32(sum16);
82 }
83 
84 static XNN_INTRINSIC
_mm512_reduce_max_ps(__m512 v)85 float _mm512_reduce_max_ps(__m512 v) {
86 #if __AVX512DQ__
87   const __m256 sum2 = _mm256_max_ps(_mm512_castps512_ps256(v), _mm512_extractf32x8_ps(v, 1));
88 #else
89   const __m256 sum2 = _mm256_max_ps(_mm512_castps512_ps256(v), _mm256_castpd_ps(_mm512_extractf64x4_pd(_mm512_castps_pd(v), 1)));
90 #endif
91   const __m128 sum4 = _mm_max_ps(_mm256_castps256_ps128(sum2), _mm256_extractf128_ps(sum2, 1));
92   const __m128 sum8 = _mm_max_ps(sum4, _mm_movehl_ps(sum4, sum4));
93   const __m128 sum16 = _mm_max_ss(sum8, _mm_movehdup_ps(sum8));
94   return _mm_cvtss_f32(sum16);
95 }
96 
97 #endif  // GCC pre-7, Clang pre-4, and ICC pre-18
98 
99 #if defined(__GNUC__) && !defined(__clang__) && !defined(__INTEL_COMPILER) && (__GNUC__ < 9)
100 static XNN_INTRINSIC
_mm512_set_epi8(char e63,char e62,char e61,char e60,char e59,char e58,char e57,char e56,char e55,char e54,char e53,char e52,char e51,char e50,char e49,char e48,char e47,char e46,char e45,char e44,char e43,char e42,char e41,char e40,char e39,char e38,char e37,char e36,char e35,char e34,char e33,char e32,char e31,char e30,char e29,char e28,char e27,char e26,char e25,char e24,char e23,char e22,char e21,char e20,char e19,char e18,char e17,char e16,char e15,char e14,char e13,char e12,char e11,char e10,char e09,char e08,char e07,char e06,char e05,char e04,char e03,char e02,char e01,char e00)101 __m512i _mm512_set_epi8(
102   char e63, char e62, char e61, char e60,
103   char e59, char e58, char e57, char e56,
104   char e55, char e54, char e53, char e52,
105   char e51, char e50, char e49, char e48,
106   char e47, char e46, char e45, char e44,
107   char e43, char e42, char e41, char e40,
108   char e39, char e38, char e37, char e36,
109   char e35, char e34, char e33, char e32,
110   char e31, char e30, char e29, char e28,
111   char e27, char e26, char e25, char e24,
112   char e23, char e22, char e21, char e20,
113   char e19, char e18, char e17, char e16,
114   char e15, char e14, char e13, char e12,
115   char e11, char e10, char e09, char e08,
116   char e07, char e06, char e05, char e04,
117   char e03, char e02, char e01, char e00)
118 {
119   return (__m512i) (__v64qi) {
120     e00, e01, e02, e03, e04, e05, e06, e07,
121     e08, e09, e10, e11, e12, e13, e14, e15,
122     e16, e17, e18, e19, e20, e21, e22, e23,
123     e24, e25, e26, e27, e28, e29, e30, e31,
124     e32, e33, e34, e35, e36, e37, e38, e39,
125     e40, e41, e42, e43, e44, e45, e46, e47,
126     e48, e49, e50, e51, e52, e53, e54, e55,
127     e56, e57, e58, e59, e60, e61, e62, e63
128   };
129 }
130 #endif  // GCC pre-9
131 
132 #endif  // __AVX512F__
133 
134 #if XNN_ARCH_ARM && (defined(__ARM_NEON) || defined(__ARM_NEON__))
135 #include <arm_neon.h>
136 
137 // AArch32 GCC targeting ARMv8 NEON, see
138 // - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71233
139 // - https://gcc.gnu.org/bugzilla/show_bug.cgi?id=95399
140 #if defined(__GNUC__) && !defined(__clang__) && (__ARM_ARCH >= 8)
141 static XNN_INTRINSIC
vcvtnq_s32_f32(float32x4_t v)142 int32x4_t vcvtnq_s32_f32(float32x4_t v) {
143   return vcvtq_s32_f32(vrndnq_f32(v));
144 }
145 #endif  // AArch32 GCC targeting ARMv8 NEON
146 
147 #endif  // ARM NEON
148 
149 #if XNN_ARCH_ARM64
150 #include <arm_neon.h>
151 
152 // AArch64 GCC pre-8, 8.1-8.4, 9.1-9.3
153 #if defined(__GNUC__) && !defined(__clang__) && \
154   (__GNUC__ < 8 || __GNUC__ == 8 && __GNUC_MINOR__ < 5 || __GNUC__ == 9 && __GNUC_MINOR__ < 4)
155 static XNN_INTRINSIC
vld1q_u8_x4(const uint8_t * address)156 uint8x16x4_t vld1q_u8_x4(const uint8_t* address) {
157   uint8x16x4_t result;
158   result.val[0] = vld1q_u8(address);
159   result.val[1] = vld1q_u8(address + 16);
160   result.val[2] = vld1q_u8(address + 32);
161   result.val[3] = vld1q_u8(address + 48);
162   return result;
163 }
164 #endif  // AArch64 GCC pre-8, 8.1-8.4, 9.1-9.3
165 
166 #endif  // ARM64 NEON
167