• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-vsigmoid/wasmsimd-rr2-p5-div.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/vunary.h>
16 
17 
xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x8(size_t n,const float * x,float * y,const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x8(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23 {
24   assert(n % sizeof(float) == 0);
25 
26   const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
27   const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
28   const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
29   const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
30   const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
31   const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
32   const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
33   const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
34   const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
35   const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
36   const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
37 
38   for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) {
39     const v128_t vx0123 = wasm_v128_load(x);
40     const v128_t vx4567 = wasm_v128_load(x + 4);
41     x += 8;
42 
43     const v128_t vz0123 = wasm_f32x4_abs(vx0123);
44     const v128_t vz4567 = wasm_f32x4_abs(vx4567);
45 
46     v128_t vn0123 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz0123, vminus_log2e));
47     v128_t vn4567 = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz4567, vminus_log2e));
48 
49     const v128_t vs0123 = wasm_i32x4_shl(vn0123, 23);
50     const v128_t vs4567 = wasm_i32x4_shl(vn4567, 23);
51 
52     vn0123 = wasm_f32x4_sub(vn0123, vmagic_bias);
53     vn4567 = wasm_f32x4_sub(vn4567, vmagic_bias);
54 
55     v128_t vt0123 = wasm_f32x4_add(vz0123, wasm_f32x4_mul(vn0123, vln2_hi));
56     v128_t vt4567 = wasm_f32x4_add(vz4567, wasm_f32x4_mul(vn4567, vln2_hi));
57 
58     vt0123 = wasm_f32x4_add(vt0123, wasm_f32x4_mul(vn0123, vln2_lo));
59     vt4567 = wasm_f32x4_add(vt4567, wasm_f32x4_mul(vn4567, vln2_lo));
60 
61     v128_t vp0123 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt0123, vc5));
62     v128_t vp4567 = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt4567, vc5));
63 
64     vp0123 = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt0123, vp0123));
65     vp4567 = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt4567, vp4567));
66 
67     vp0123 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt0123, vp0123));
68     vp4567 = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt4567, vp4567));
69 
70     vp0123 = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt0123, vp0123));
71     vp4567 = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt4567, vp4567));
72 
73     vt0123 = wasm_f32x4_mul(vt0123, vs0123);
74     vt4567 = wasm_f32x4_mul(vt4567, vs4567);
75 
76     const v128_t ve0123 = wasm_f32x4_add(vs0123, wasm_f32x4_mul(vt0123, vp0123));
77     const v128_t ve4567 = wasm_f32x4_add(vs4567, wasm_f32x4_mul(vt4567, vp4567));
78 
79     const v128_t vd0123 = wasm_f32x4_add(ve0123, vone);
80     const v128_t vd4567 = wasm_f32x4_add(ve4567, vone);
81 
82     v128_t vf0123 = wasm_f32x4_div(ve0123, vd0123);
83     v128_t vf4567 = wasm_f32x4_div(ve4567, vd4567);
84 
85     vf0123 = wasm_v128_andnot(vf0123, wasm_f32x4_gt(vz0123, vdenorm_cutoff));
86     vf4567 = wasm_v128_andnot(vf4567, wasm_f32x4_gt(vz4567, vdenorm_cutoff));
87 
88     vf0123 = wasm_v128_bitselect(vf0123, wasm_f32x4_sub(vone, vf0123), wasm_i32x4_shr(vx0123, 31));
89     vf4567 = wasm_v128_bitselect(vf4567, wasm_f32x4_sub(vone, vf4567), wasm_i32x4_shr(vx4567, 31));
90 
91     wasm_v128_store(y, vf0123);
92     wasm_v128_store(y + 4, vf4567);
93     y += 8;
94   }
95   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
96     const v128_t vx = wasm_v128_load(x);
97     x += 4;
98 
99     const v128_t vz = wasm_f32x4_abs(vx);
100 
101     v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
102     const v128_t vs = wasm_i32x4_shl(vn, 23);
103     vn = wasm_f32x4_sub(vn, vmagic_bias);
104 
105     v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
106     vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
107 
108     v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
109     vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
110     vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
111     vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
112 
113     vt = wasm_f32x4_mul(vt, vs);
114     const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
115     const v128_t vd = wasm_f32x4_add(ve, vone);
116 
117     v128_t vf = wasm_f32x4_div(ve, vd);
118     vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
119     vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
120 
121     wasm_v128_store(y, vf);
122     y += 4;
123   }
124   if XNN_UNLIKELY(n != 0) {
125     const v128_t vx = wasm_v128_load(x);
126 
127     const v128_t vz = wasm_f32x4_abs(vx);
128 
129     v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
130     const v128_t vs = wasm_i32x4_shl(vn, 23);
131     vn = wasm_f32x4_sub(vn, vmagic_bias);
132 
133     v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
134     vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
135 
136     v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
137     vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
138     vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
139     vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
140 
141     vt = wasm_f32x4_mul(vt, vs);
142     const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
143     const v128_t vd = wasm_f32x4_add(ve, vone);
144 
145     v128_t vf = wasm_f32x4_div(ve, vd);
146     vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
147     vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
148 
149     if (n & (2 * sizeof(float))) {
150       *((double*) y) = wasm_f64x2_extract_lane(vf, 0);
151       vf = wasm_v32x4_shuffle(vf, vf, 2, 3, 2, 3);
152       y += 2;
153     }
154     if (n & (1 * sizeof(float))) {
155       *y = wasm_f32x4_extract_lane(vf, 0);
156     }
157   }
158 }
159