1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vrnd/vrndz-neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/math.h>
16 #include <xnnpack/vunary.h>
17
18
xnn_f32_vrndz_ukernel__neon_x8(size_t n,const float * x,float * y,const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_f32_vrndz_ukernel__neon_x8(
20 size_t n,
21 const float* x,
22 float* y,
23 const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
24 {
25 assert(n != 0);
26 assert(n % sizeof(float) == 0);
27
28 const float32x4_t vintegral_threshold = vreinterpretq_f32_u32(vmovq_n_u32(UINT32_C(0x4B000000)));
29 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
30 const float32x4_t vx0123 = vld1q_f32(x); x += 4;
31 const float32x4_t vx4567 = vld1q_f32(x); x += 4;
32
33 const int32x4_t vintx0123 = vcvtq_s32_f32(vx0123);
34 const int32x4_t vintx4567 = vcvtq_s32_f32(vx4567);
35
36 uint32x4_t vrndmask0123 = vcaltq_f32(vx0123, vintegral_threshold);
37 uint32x4_t vrndmask4567 = vcaltq_f32(vx4567, vintegral_threshold);
38
39 const float32x4_t vrndx0123 = vcvtq_f32_s32(vintx0123);
40 const float32x4_t vrndx4567 = vcvtq_f32_s32(vintx4567);
41
42 vrndmask0123 = vbicq_u32(vrndmask0123, vmovq_n_u32(UINT32_C(0x80000000)));
43 vrndmask4567 = vbicq_u32(vrndmask4567, vmovq_n_u32(UINT32_C(0x80000000)));
44
45 const float32x4_t vy0123 = vbslq_f32(vrndmask0123, vrndx0123, vx0123);
46 const float32x4_t vy4567 = vbslq_f32(vrndmask4567, vrndx4567, vx4567);
47
48 vst1q_f32(y, vy0123); y += 4;
49 vst1q_f32(y, vy4567); y += 4;
50 }
51 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
52 const float32x4_t vx = vld1q_f32(x); x += 4;
53 const int32x4_t vintx = vcvtq_s32_f32(vx);
54 uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
55 const float32x4_t vrndx = vcvtq_f32_s32(vintx);
56 vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
57 const float32x4_t vy = vbslq_f32(vrndmask, vrndx, vx);
58 vst1q_f32(y, vy); y += 4;
59 }
60 if XNN_UNLIKELY(n != 0) {
61 const float32x4_t vx = vld1q_f32(x);
62 const int32x4_t vintx = vcvtq_s32_f32(vx);
63 uint32x4_t vrndmask = vcaltq_f32(vx, vintegral_threshold);
64 const float32x4_t vrndx = vcvtq_f32_s32(vintx);
65 vrndmask = vbicq_u32(vrndmask, vmovq_n_u32(UINT32_C(0x80000000)));
66 const float32x4_t vy = vbslq_f32(vrndmask, vrndx, vx);
67 float32x2_t vy_lo = vget_low_f32(vy);
68 if (n & (2 * sizeof(float))) {
69 vst1_f32(y, vy_lo); y += 2;
70 vy_lo = vget_high_f32(vy);
71 }
72 if (n & (1 * sizeof(float))) {
73 vst1_lane_f32(y, vy_lo, 0);
74 }
75 }
76 }
77