1 /*===---- smmintrin.h - SSE4 intrinsics ------------------------------------===
2 *
3 * Permission is hereby granted, free of charge, to any person obtaining a copy
4 * of this software and associated documentation files (the "Software"), to deal
5 * in the Software without restriction, including without limitation the rights
6 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
7 * copies of the Software, and to permit persons to whom the Software is
8 * furnished to do so, subject to the following conditions:
9 *
10 * The above copyright notice and this permission notice shall be included in
11 * all copies or substantial portions of the Software.
12 *
13 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
14 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
16 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
17 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
18 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
19 * THE SOFTWARE.
20 *
21 *===-----------------------------------------------------------------------===
22 */
23
24 #ifndef _SMMINTRIN_H
25 #define _SMMINTRIN_H
26
27 #ifndef __SSE4_1__
28 #error "SSE4.1 instruction set not enabled"
29 #else
30
31 #include <tmmintrin.h>
32
33 /* SSE4 Rounding macros. */
34 #define _MM_FROUND_TO_NEAREST_INT 0x00
35 #define _MM_FROUND_TO_NEG_INF 0x01
36 #define _MM_FROUND_TO_POS_INF 0x02
37 #define _MM_FROUND_TO_ZERO 0x03
38 #define _MM_FROUND_CUR_DIRECTION 0x04
39
40 #define _MM_FROUND_RAISE_EXC 0x00
41 #define _MM_FROUND_NO_EXC 0x08
42
43 #define _MM_FROUND_NINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEAREST_INT)
44 #define _MM_FROUND_FLOOR (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_NEG_INF)
45 #define _MM_FROUND_CEIL (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_POS_INF)
46 #define _MM_FROUND_TRUNC (_MM_FROUND_RAISE_EXC | _MM_FROUND_TO_ZERO)
47 #define _MM_FROUND_RINT (_MM_FROUND_RAISE_EXC | _MM_FROUND_CUR_DIRECTION)
48 #define _MM_FROUND_NEARBYINT (_MM_FROUND_NO_EXC | _MM_FROUND_CUR_DIRECTION)
49
50 #define _mm_ceil_ps(X) _mm_round_ps((X), _MM_FROUND_CEIL)
51 #define _mm_ceil_pd(X) _mm_round_pd((X), _MM_FROUND_CEIL)
52 #define _mm_ceil_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_CEIL)
53 #define _mm_ceil_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_CEIL)
54
55 #define _mm_floor_ps(X) _mm_round_ps((X), _MM_FROUND_FLOOR)
56 #define _mm_floor_pd(X) _mm_round_pd((X), _MM_FROUND_FLOOR)
57 #define _mm_floor_ss(X, Y) _mm_round_ss((X), (Y), _MM_FROUND_FLOOR)
58 #define _mm_floor_sd(X, Y) _mm_round_sd((X), (Y), _MM_FROUND_FLOOR)
59
60 #define _mm_round_ps(X, M) __extension__ ({ \
61 __m128 __X = (X); \
62 (__m128) __builtin_ia32_roundps((__v4sf)__X, (M)); })
63
64 #define _mm_round_ss(X, Y, M) __extension__ ({ \
65 __m128 __X = (X); \
66 __m128 __Y = (Y); \
67 (__m128) __builtin_ia32_roundss((__v4sf)__X, (__v4sf)__Y, (M)); })
68
69 #define _mm_round_pd(X, M) __extension__ ({ \
70 __m128d __X = (X); \
71 (__m128d) __builtin_ia32_roundpd((__v2df)__X, (M)); })
72
73 #define _mm_round_sd(X, Y, M) __extension__ ({ \
74 __m128d __X = (X); \
75 __m128d __Y = (Y); \
76 (__m128d) __builtin_ia32_roundsd((__v2df)__X, (__v2df)__Y, (M)); })
77
78 /* SSE4 Packed Blending Intrinsics. */
79 #define _mm_blend_pd(V1, V2, M) __extension__ ({ \
80 __m128d __V1 = (V1); \
81 __m128d __V2 = (V2); \
82 (__m128d) __builtin_ia32_blendpd ((__v2df)__V1, (__v2df)__V2, (M)); })
83
84 #define _mm_blend_ps(V1, V2, M) __extension__ ({ \
85 __m128 __V1 = (V1); \
86 __m128 __V2 = (V2); \
87 (__m128) __builtin_ia32_blendps ((__v4sf)__V1, (__v4sf)__V2, (M)); })
88
89 static __inline__ __m128d __attribute__((__always_inline__, __nodebug__))
_mm_blendv_pd(__m128d __V1,__m128d __V2,__m128d __M)90 _mm_blendv_pd (__m128d __V1, __m128d __V2, __m128d __M)
91 {
92 return (__m128d) __builtin_ia32_blendvpd ((__v2df)__V1, (__v2df)__V2,
93 (__v2df)__M);
94 }
95
96 static __inline__ __m128 __attribute__((__always_inline__, __nodebug__))
_mm_blendv_ps(__m128 __V1,__m128 __V2,__m128 __M)97 _mm_blendv_ps (__m128 __V1, __m128 __V2, __m128 __M)
98 {
99 return (__m128) __builtin_ia32_blendvps ((__v4sf)__V1, (__v4sf)__V2,
100 (__v4sf)__M);
101 }
102
103 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_blendv_epi8(__m128i __V1,__m128i __V2,__m128i __M)104 _mm_blendv_epi8 (__m128i __V1, __m128i __V2, __m128i __M)
105 {
106 return (__m128i) __builtin_ia32_pblendvb128 ((__v16qi)__V1, (__v16qi)__V2,
107 (__v16qi)__M);
108 }
109
110 #define _mm_blend_epi16(V1, V2, M) __extension__ ({ \
111 __m128i __V1 = (V1); \
112 __m128i __V2 = (V2); \
113 (__m128i) __builtin_ia32_pblendw128 ((__v8hi)__V1, (__v8hi)__V2, (M)); })
114
115 /* SSE4 Dword Multiply Instructions. */
116 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mullo_epi32(__m128i __V1,__m128i __V2)117 _mm_mullo_epi32 (__m128i __V1, __m128i __V2)
118 {
119 return (__m128i) ((__v4si)__V1 * (__v4si)__V2);
120 }
121
122 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_mul_epi32(__m128i __V1,__m128i __V2)123 _mm_mul_epi32 (__m128i __V1, __m128i __V2)
124 {
125 return (__m128i) __builtin_ia32_pmuldq128 ((__v4si)__V1, (__v4si)__V2);
126 }
127
128 /* SSE4 Floating Point Dot Product Instructions. */
129 #define _mm_dp_ps(X, Y, M) __extension__ ({ \
130 __m128 __X = (X); \
131 __m128 __Y = (Y); \
132 (__m128) __builtin_ia32_dpps((__v4sf)__X, (__v4sf)__Y, (M)); })
133
134 #define _mm_dp_pd(X, Y, M) __extension__ ({\
135 __m128d __X = (X); \
136 __m128d __Y = (Y); \
137 (__m128d) __builtin_ia32_dppd((__v2df)__X, (__v2df)__Y, (M)); })
138
139 /* SSE4 Streaming Load Hint Instruction. */
140 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_stream_load_si128(__m128i * __V)141 _mm_stream_load_si128 (__m128i *__V)
142 {
143 return (__m128i) __builtin_ia32_movntdqa ((__v2di *) __V);
144 }
145
146 /* SSE4 Packed Integer Min/Max Instructions. */
147 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epi8(__m128i __V1,__m128i __V2)148 _mm_min_epi8 (__m128i __V1, __m128i __V2)
149 {
150 return (__m128i) __builtin_ia32_pminsb128 ((__v16qi) __V1, (__v16qi) __V2);
151 }
152
153 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epi8(__m128i __V1,__m128i __V2)154 _mm_max_epi8 (__m128i __V1, __m128i __V2)
155 {
156 return (__m128i) __builtin_ia32_pmaxsb128 ((__v16qi) __V1, (__v16qi) __V2);
157 }
158
159 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epu16(__m128i __V1,__m128i __V2)160 _mm_min_epu16 (__m128i __V1, __m128i __V2)
161 {
162 return (__m128i) __builtin_ia32_pminuw128 ((__v8hi) __V1, (__v8hi) __V2);
163 }
164
165 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epu16(__m128i __V1,__m128i __V2)166 _mm_max_epu16 (__m128i __V1, __m128i __V2)
167 {
168 return (__m128i) __builtin_ia32_pmaxuw128 ((__v8hi) __V1, (__v8hi) __V2);
169 }
170
171 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epi32(__m128i __V1,__m128i __V2)172 _mm_min_epi32 (__m128i __V1, __m128i __V2)
173 {
174 return (__m128i) __builtin_ia32_pminsd128 ((__v4si) __V1, (__v4si) __V2);
175 }
176
177 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epi32(__m128i __V1,__m128i __V2)178 _mm_max_epi32 (__m128i __V1, __m128i __V2)
179 {
180 return (__m128i) __builtin_ia32_pmaxsd128 ((__v4si) __V1, (__v4si) __V2);
181 }
182
183 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_min_epu32(__m128i __V1,__m128i __V2)184 _mm_min_epu32 (__m128i __V1, __m128i __V2)
185 {
186 return (__m128i) __builtin_ia32_pminud128((__v4si) __V1, (__v4si) __V2);
187 }
188
189 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_max_epu32(__m128i __V1,__m128i __V2)190 _mm_max_epu32 (__m128i __V1, __m128i __V2)
191 {
192 return (__m128i) __builtin_ia32_pmaxud128((__v4si) __V1, (__v4si) __V2);
193 }
194
195 /* SSE4 Insertion and Extraction from XMM Register Instructions. */
196 #define _mm_insert_ps(X, Y, N) __builtin_ia32_insertps128((X), (Y), (N))
197 #define _mm_extract_ps(X, N) (__extension__ \
198 ({ union { int i; float f; } __t; \
199 __v4sf __a = (__v4sf)(X); \
200 __t.f = __a[N]; \
201 __t.i;}))
202
203 /* Miscellaneous insert and extract macros. */
204 /* Extract a single-precision float from X at index N into D. */
205 #define _MM_EXTRACT_FLOAT(D, X, N) (__extension__ ({ __v4sf __a = (__v4sf)(X); \
206 (D) = __a[N]; }))
207
208 /* Or together 2 sets of indexes (X and Y) with the zeroing bits (Z) to create
209 an index suitable for _mm_insert_ps. */
210 #define _MM_MK_INSERTPS_NDX(X, Y, Z) (((X) << 6) | ((Y) << 4) | (Z))
211
212 /* Extract a float from X at index N into the first index of the return. */
213 #define _MM_PICK_OUT_PS(X, N) _mm_insert_ps (_mm_setzero_ps(), (X), \
214 _MM_MK_INSERTPS_NDX((N), 0, 0x0e))
215
216 /* Insert int into packed integer array at index. */
217 #define _mm_insert_epi8(X, I, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
218 __a[(N)] = (I); \
219 __a;}))
220 #define _mm_insert_epi32(X, I, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
221 __a[(N)] = (I); \
222 __a;}))
223 #ifdef __x86_64__
224 #define _mm_insert_epi64(X, I, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
225 __a[(N)] = (I); \
226 __a;}))
227 #endif /* __x86_64__ */
228
229 /* Extract int from packed integer array at index. This returns the element
230 * as a zero extended value, so it is unsigned.
231 */
232 #define _mm_extract_epi8(X, N) (__extension__ ({ __v16qi __a = (__v16qi)(X); \
233 (unsigned char)__a[(N)];}))
234 #define _mm_extract_epi32(X, N) (__extension__ ({ __v4si __a = (__v4si)(X); \
235 (unsigned)__a[(N)];}))
236 #ifdef __x86_64__
237 #define _mm_extract_epi64(X, N) (__extension__ ({ __v2di __a = (__v2di)(X); \
238 __a[(N)];}))
239 #endif /* __x86_64 */
240
241 /* SSE4 128-bit Packed Integer Comparisons. */
242 static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testz_si128(__m128i __M,__m128i __V)243 _mm_testz_si128(__m128i __M, __m128i __V)
244 {
245 return __builtin_ia32_ptestz128((__v2di)__M, (__v2di)__V);
246 }
247
248 static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testc_si128(__m128i __M,__m128i __V)249 _mm_testc_si128(__m128i __M, __m128i __V)
250 {
251 return __builtin_ia32_ptestc128((__v2di)__M, (__v2di)__V);
252 }
253
254 static __inline__ int __attribute__((__always_inline__, __nodebug__))
_mm_testnzc_si128(__m128i __M,__m128i __V)255 _mm_testnzc_si128(__m128i __M, __m128i __V)
256 {
257 return __builtin_ia32_ptestnzc128((__v2di)__M, (__v2di)__V);
258 }
259
260 #define _mm_test_all_ones(V) _mm_testc_si128((V), _mm_cmpeq_epi32((V), (V)))
261 #define _mm_test_mix_ones_zeros(M, V) _mm_testnzc_si128((M), (V))
262 #define _mm_test_all_zeros(M, V) _mm_testz_si128 ((M), (V))
263
264 /* SSE4 64-bit Packed Integer Comparisons. */
265 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpeq_epi64(__m128i __V1,__m128i __V2)266 _mm_cmpeq_epi64(__m128i __V1, __m128i __V2)
267 {
268 return (__m128i)((__v2di)__V1 == (__v2di)__V2);
269 }
270
271 /* SSE4 Packed Integer Sign-Extension. */
272 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi16(__m128i __V)273 _mm_cvtepi8_epi16(__m128i __V)
274 {
275 return (__m128i) __builtin_ia32_pmovsxbw128((__v16qi) __V);
276 }
277
278 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi32(__m128i __V)279 _mm_cvtepi8_epi32(__m128i __V)
280 {
281 return (__m128i) __builtin_ia32_pmovsxbd128((__v16qi) __V);
282 }
283
284 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi8_epi64(__m128i __V)285 _mm_cvtepi8_epi64(__m128i __V)
286 {
287 return (__m128i) __builtin_ia32_pmovsxbq128((__v16qi) __V);
288 }
289
290 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi16_epi32(__m128i __V)291 _mm_cvtepi16_epi32(__m128i __V)
292 {
293 return (__m128i) __builtin_ia32_pmovsxwd128((__v8hi) __V);
294 }
295
296 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi16_epi64(__m128i __V)297 _mm_cvtepi16_epi64(__m128i __V)
298 {
299 return (__m128i) __builtin_ia32_pmovsxwq128((__v8hi)__V);
300 }
301
302 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepi32_epi64(__m128i __V)303 _mm_cvtepi32_epi64(__m128i __V)
304 {
305 return (__m128i) __builtin_ia32_pmovsxdq128((__v4si)__V);
306 }
307
308 /* SSE4 Packed Integer Zero-Extension. */
309 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi16(__m128i __V)310 _mm_cvtepu8_epi16(__m128i __V)
311 {
312 return (__m128i) __builtin_ia32_pmovzxbw128((__v16qi) __V);
313 }
314
315 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi32(__m128i __V)316 _mm_cvtepu8_epi32(__m128i __V)
317 {
318 return (__m128i) __builtin_ia32_pmovzxbd128((__v16qi)__V);
319 }
320
321 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu8_epi64(__m128i __V)322 _mm_cvtepu8_epi64(__m128i __V)
323 {
324 return (__m128i) __builtin_ia32_pmovzxbq128((__v16qi)__V);
325 }
326
327 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu16_epi32(__m128i __V)328 _mm_cvtepu16_epi32(__m128i __V)
329 {
330 return (__m128i) __builtin_ia32_pmovzxwd128((__v8hi)__V);
331 }
332
333 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu16_epi64(__m128i __V)334 _mm_cvtepu16_epi64(__m128i __V)
335 {
336 return (__m128i) __builtin_ia32_pmovzxwq128((__v8hi)__V);
337 }
338
339 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cvtepu32_epi64(__m128i __V)340 _mm_cvtepu32_epi64(__m128i __V)
341 {
342 return (__m128i) __builtin_ia32_pmovzxdq128((__v4si)__V);
343 }
344
345 /* SSE4 Pack with Unsigned Saturation. */
346 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_packus_epi32(__m128i __V1,__m128i __V2)347 _mm_packus_epi32(__m128i __V1, __m128i __V2)
348 {
349 return (__m128i) __builtin_ia32_packusdw128((__v4si)__V1, (__v4si)__V2);
350 }
351
352 /* SSE4 Multiple Packed Sums of Absolute Difference. */
353 #define _mm_mpsadbw_epu8(X, Y, M) __extension__ ({ \
354 __m128i __X = (X); \
355 __m128i __Y = (Y); \
356 (__m128i) __builtin_ia32_mpsadbw128((__v16qi)__X, (__v16qi)__Y, (M)); })
357
358 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_minpos_epu16(__m128i __V)359 _mm_minpos_epu16(__m128i __V)
360 {
361 return (__m128i) __builtin_ia32_phminposuw128((__v8hi)__V);
362 }
363
364 /* These definitions are normally in nmmintrin.h, but gcc puts them in here
365 so we'll do the same. */
366 #ifdef __SSE4_2__
367
368 /* These specify the type of data that we're comparing. */
369 #define _SIDD_UBYTE_OPS 0x00
370 #define _SIDD_UWORD_OPS 0x01
371 #define _SIDD_SBYTE_OPS 0x02
372 #define _SIDD_SWORD_OPS 0x03
373
374 /* These specify the type of comparison operation. */
375 #define _SIDD_CMP_EQUAL_ANY 0x00
376 #define _SIDD_CMP_RANGES 0x04
377 #define _SIDD_CMP_EQUAL_EACH 0x08
378 #define _SIDD_CMP_EQUAL_ORDERED 0x0c
379
380 /* These macros specify the polarity of the operation. */
381 #define _SIDD_POSITIVE_POLARITY 0x00
382 #define _SIDD_NEGATIVE_POLARITY 0x10
383 #define _SIDD_MASKED_POSITIVE_POLARITY 0x20
384 #define _SIDD_MASKED_NEGATIVE_POLARITY 0x30
385
386 /* These macros are used in _mm_cmpXstri() to specify the return. */
387 #define _SIDD_LEAST_SIGNIFICANT 0x00
388 #define _SIDD_MOST_SIGNIFICANT 0x40
389
390 /* These macros are used in _mm_cmpXstri() to specify the return. */
391 #define _SIDD_BIT_MASK 0x00
392 #define _SIDD_UNIT_MASK 0x40
393
394 /* SSE4.2 Packed Comparison Intrinsics. */
395 #define _mm_cmpistrm(A, B, M) __builtin_ia32_pcmpistrm128((A), (B), (M))
396 #define _mm_cmpistri(A, B, M) __builtin_ia32_pcmpistri128((A), (B), (M))
397
398 #define _mm_cmpestrm(A, LA, B, LB, M) \
399 __builtin_ia32_pcmpestrm128((A), (LA), (B), (LB), (M))
400 #define _mm_cmpestri(A, LA, B, LB, M) \
401 __builtin_ia32_pcmpestri128((A), (LA), (B), (LB), (M))
402
403 /* SSE4.2 Packed Comparison Intrinsics and EFlag Reading. */
404 #define _mm_cmpistra(A, B, M) \
405 __builtin_ia32_pcmpistria128((A), (B), (M))
406 #define _mm_cmpistrc(A, B, M) \
407 __builtin_ia32_pcmpistric128((A), (B), (M))
408 #define _mm_cmpistro(A, B, M) \
409 __builtin_ia32_pcmpistrio128((A), (B), (M))
410 #define _mm_cmpistrs(A, B, M) \
411 __builtin_ia32_pcmpistris128((A), (B), (M))
412 #define _mm_cmpistrz(A, B, M) \
413 __builtin_ia32_pcmpistriz128((A), (B), (M))
414
415 #define _mm_cmpestra(A, LA, B, LB, M) \
416 __builtin_ia32_pcmpestria128((A), (LA), (B), (LB), (M))
417 #define _mm_cmpestrc(A, LA, B, LB, M) \
418 __builtin_ia32_pcmpestric128((A), (LA), (B), (LB), (M))
419 #define _mm_cmpestro(A, LA, B, LB, M) \
420 __builtin_ia32_pcmpestrio128((A), (LA), (B), (LB), (M))
421 #define _mm_cmpestrs(A, LA, B, LB, M) \
422 __builtin_ia32_pcmpestris128((A), (LA), (B), (LB), (M))
423 #define _mm_cmpestrz(A, LA, B, LB, M) \
424 __builtin_ia32_pcmpestriz128((A), (LA), (B), (LB), (M))
425
426 /* SSE4.2 Compare Packed Data -- Greater Than. */
427 static __inline__ __m128i __attribute__((__always_inline__, __nodebug__))
_mm_cmpgt_epi64(__m128i __V1,__m128i __V2)428 _mm_cmpgt_epi64(__m128i __V1, __m128i __V2)
429 {
430 return (__m128i)((__v2di)__V1 > (__v2di)__V2);
431 }
432
433 /* SSE4.2 Accumulate CRC32. */
434 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u8(unsigned int __C,unsigned char __D)435 _mm_crc32_u8(unsigned int __C, unsigned char __D)
436 {
437 return __builtin_ia32_crc32qi(__C, __D);
438 }
439
440 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u16(unsigned int __C,unsigned short __D)441 _mm_crc32_u16(unsigned int __C, unsigned short __D)
442 {
443 return __builtin_ia32_crc32hi(__C, __D);
444 }
445
446 static __inline__ unsigned int __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u32(unsigned int __C,unsigned int __D)447 _mm_crc32_u32(unsigned int __C, unsigned int __D)
448 {
449 return __builtin_ia32_crc32si(__C, __D);
450 }
451
452 #ifdef __x86_64__
453 static __inline__ unsigned long long __attribute__((__always_inline__, __nodebug__))
_mm_crc32_u64(unsigned long long __C,unsigned long long __D)454 _mm_crc32_u64(unsigned long long __C, unsigned long long __D)
455 {
456 return __builtin_ia32_crc32di(__C, __D);
457 }
458 #endif /* __x86_64__ */
459
460 #ifdef __POPCNT__
461 #include <popcntintrin.h>
462 #endif
463
464 #endif /* __SSE4_2__ */
465 #endif /* __SSE4_1__ */
466
467 #endif /* _SMMINTRIN_H */
468