1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vrnd/avx.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <immintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/vunary.h>
17
18
19 static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0};
20
xnn_f32_vrndz_ukernel__avx_x16(size_t n,const float * x,float * y,const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS (1)])21 void xnn_f32_vrndz_ukernel__avx_x16(
22 size_t n,
23 const float* x,
24 float* y,
25 const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
26 {
27 assert(n != 0);
28 assert(n % sizeof(float) == 0);
29
30 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
31 const __m256 vx01234567 = _mm256_loadu_ps(x);
32 const __m256 vx89ABCDEF = _mm256_loadu_ps(x + 8);
33 x += 16;
34
35 const __m256 vy01234567 = _mm256_round_ps(vx01234567, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
36 const __m256 vy89ABCDEF = _mm256_round_ps(vx89ABCDEF, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
37
38 _mm256_storeu_ps(y, vy01234567);
39 _mm256_storeu_ps(y + 8, vy89ABCDEF);
40 y += 16;
41 }
42 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
43 const __m256 vx = _mm256_loadu_ps(x);
44 x += 8;
45
46 const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
47
48 _mm256_storeu_ps(y, vy);
49 y += 8;
50 }
51 if XNN_UNLIKELY(n != 0) {
52 assert(n >= 1 * sizeof(float));
53 assert(n <= 7 * sizeof(float));
54 __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n));
55
56 const __m256 vx = _mm256_maskload_ps(x, vmask);
57 const __m256 vy = _mm256_round_ps(vx, _MM_FROUND_TO_ZERO | _MM_FROUND_NO_EXC);
58
59 // _mm256_maskstore_ps(y, vmask, vy) could be used here, but triggers msan failures (probably an msan bug).
60 __m128 vy_lo = _mm256_castps256_ps128(vy);
61 if (n & (4 * sizeof(float))) {
62 _mm_storeu_ps(y, vy_lo);
63 vy_lo = _mm256_extractf128_ps(vy, 1);
64 y += 4;
65 }
66 if (n & (2 * sizeof(float))) {
67 _mm_storel_pi((__m64*) y, vy_lo);
68 vy_lo = _mm_movehl_ps(vy_lo, vy_lo);
69 y += 2;
70 }
71 if (n & (1 * sizeof(float))) {
72 _mm_store_ss(y, vy_lo);
73 }
74 }
75 }
76