1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vrnd/vrndd-sse2.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/vunary.h>
17
18
xnn_f32_vrndd_ukernel__sse2_x8(size_t n,const float * x,float * y,const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vrndd_ukernel__sse2_x8(
20 size_t n,
21 const float* x,
22 float* y,
23 const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27
28 const __m128i vmagic = _mm_load_si128((const __m128i*) params->sse2.sign_mask);
29 const __m128 vone = _mm_load_ps(params->sse2.one);
30 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
31 const __m128 vx0123 = _mm_loadu_ps(x);
32 const __m128 vx4567 = _mm_loadu_ps(x + 4);
33 x += 8;
34
35 const __m128i vintx0123 = _mm_cvttps_epi32(vx0123);
36 const __m128i vintx4567 = _mm_cvttps_epi32(vx4567);
37
38 const __m128 vrndmask0123 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx0123, vmagic)));
39 const __m128 vrndmask4567 = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx4567, vmagic)));
40
41 const __m128 vprerndx0123 = _mm_cvtepi32_ps(vintx0123);
42 const __m128 vprerndx4567 = _mm_cvtepi32_ps(vintx4567);
43
44 const __m128 vrndx0123 = _mm_or_ps(_mm_and_ps(vx0123, vrndmask0123), _mm_andnot_ps(vrndmask0123, vprerndx0123));
45 const __m128 vrndx4567 = _mm_or_ps(_mm_and_ps(vx4567, vrndmask4567), _mm_andnot_ps(vrndmask4567, vprerndx4567));
46
47 const __m128 vy0123 = _mm_sub_ps(vrndx0123, _mm_and_ps(_mm_cmpgt_ps(vrndx0123, vx0123), vone));
48 const __m128 vy4567 = _mm_sub_ps(vrndx4567, _mm_and_ps(_mm_cmpgt_ps(vrndx4567, vx4567), vone));
49
50 _mm_storeu_ps(y, vy0123);
51 _mm_storeu_ps(y + 4, vy4567);
52 y += 8;
53 }
54 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
55 const __m128 vx = _mm_loadu_ps(x);
56 x += 4;
57
58 const __m128i vintx = _mm_cvttps_epi32(vx);
59 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
60 const __m128 vprerndx = _mm_cvtepi32_ps(vintx);
61 const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx));
62 const __m128 vy = _mm_sub_ps(vrndx, _mm_and_ps(_mm_cmpgt_ps(vrndx, vx), vone));
63
64 _mm_storeu_ps(y, vy);
65 y += 4;
66 }
67 if XNN_UNLIKELY(n != 0) {
68 const __m128 vx = _mm_loadu_ps(x);
69 const __m128i vintx = _mm_cvttps_epi32(vx);
70 const __m128 vrndmask = _mm_castsi128_ps(_mm_or_si128(vmagic, _mm_cmpeq_epi32(vintx, vmagic)));
71 const __m128 vprerndx = _mm_cvtepi32_ps(vintx);
72 const __m128 vrndx = _mm_or_ps(_mm_and_ps(vx, vrndmask), _mm_andnot_ps(vrndmask, vprerndx));
73 __m128 vy = _mm_sub_ps(vrndx, _mm_and_ps(_mm_cmpgt_ps(vrndx, vx), vone));
74 if (n & (2 * sizeof(float))) {
75 _mm_storel_pi((__m64*) y, vy);
76 vy = _mm_movehl_ps(vy, vy);
77 y += 2;
78 }
79 if (n & (1 * sizeof(float))) {
80 _mm_store_ss(y, vy);
81 }
82 }
83 }
84