• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/vunary.h>
15
16
17void xnn_f32_vsigmoid_ukernel__wasmsimd_rr2_p5_div_x${BATCH_TILE}(
18    size_t n,
19    const float* x,
20    float* y,
21    const union xnn_f32_sigmoid_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
22{
23  assert(n % sizeof(float) == 0);
24
25  const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.magic_bias);
26  const v128_t vminus_log2e = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.minus_log2e);
27  const v128_t vln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_hi);
28  const v128_t vln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.ln2_lo);
29  const v128_t vc5 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c5);
30  const v128_t vc4 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c4);
31  const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c3);
32  const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c2);
33  const v128_t vc1 = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.c1);
34  const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.one);
35  const v128_t vdenorm_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_p5.denorm_cutoff);
36
37  $if BATCH_TILE > 4:
38    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
39      const v128_t vx${ABC[0:4]} = wasm_v128_load(x);
40      $for N in range(4, BATCH_TILE, 4):
41        const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
42      x += ${BATCH_TILE};
43
44      $for N in range(0, BATCH_TILE, 4):
45        const v128_t vz${ABC[N:N+4]} = wasm_f32x4_abs(vx${ABC[N:N+4]});
46
47      $for N in range(0, BATCH_TILE, 4):
48        v128_t vn${ABC[N:N+4]} = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz${ABC[N:N+4]}, vminus_log2e));
49
50      $for N in range(0, BATCH_TILE, 4):
51        const v128_t vs${ABC[N:N+4]} = wasm_i32x4_shl(vn${ABC[N:N+4]}, 23);
52
53      $for N in range(0, BATCH_TILE, 4):
54        vn${ABC[N:N+4]} = wasm_f32x4_sub(vn${ABC[N:N+4]}, vmagic_bias);
55
56      $for N in range(0, BATCH_TILE, 4):
57        v128_t vt${ABC[N:N+4]} = wasm_f32x4_add(vz${ABC[N:N+4]}, wasm_f32x4_mul(vn${ABC[N:N+4]}, vln2_hi));
58
59      $for N in range(0, BATCH_TILE, 4):
60        vt${ABC[N:N+4]} = wasm_f32x4_add(vt${ABC[N:N+4]}, wasm_f32x4_mul(vn${ABC[N:N+4]}, vln2_lo));
61
62      $for N in range(0, BATCH_TILE, 4):
63        v128_t vp${ABC[N:N+4]} = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt${ABC[N:N+4]}, vc5));
64
65      $for N in range(0, BATCH_TILE, 4):
66        vp${ABC[N:N+4]} = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}));
67
68      $for N in range(0, BATCH_TILE, 4):
69        vp${ABC[N:N+4]} = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}));
70
71      $for N in range(0, BATCH_TILE, 4):
72        vp${ABC[N:N+4]} = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}));
73
74      $for N in range(0, BATCH_TILE, 4):
75        vt${ABC[N:N+4]} = wasm_f32x4_mul(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
76
77      $for N in range(0, BATCH_TILE, 4):
78        const v128_t ve${ABC[N:N+4]} = wasm_f32x4_add(vs${ABC[N:N+4]}, wasm_f32x4_mul(vt${ABC[N:N+4]}, vp${ABC[N:N+4]}));
79
80      $for N in range(0, BATCH_TILE, 4):
81        const v128_t vd${ABC[N:N+4]} = wasm_f32x4_add(ve${ABC[N:N+4]}, vone);
82
83      $for N in range(0, BATCH_TILE, 4):
84        v128_t vf${ABC[N:N+4]} = wasm_f32x4_div(ve${ABC[N:N+4]}, vd${ABC[N:N+4]});
85
86      $for N in range(0, BATCH_TILE, 4):
87        vf${ABC[N:N+4]} = wasm_v128_andnot(vf${ABC[N:N+4]}, wasm_f32x4_gt(vz${ABC[N:N+4]}, vdenorm_cutoff));
88
89      $for N in range(0, BATCH_TILE, 4):
90        vf${ABC[N:N+4]} = wasm_v128_bitselect(vf${ABC[N:N+4]}, wasm_f32x4_sub(vone, vf${ABC[N:N+4]}), wasm_i32x4_shr(vx${ABC[N:N+4]}, 31));
91
92      wasm_v128_store(y, vf${ABC[0:4]});
93      $for N in range(4, BATCH_TILE, 4):
94        wasm_v128_store(y + ${N}, vf${ABC[N:N+4]});
95      y += ${BATCH_TILE};
96    }
97  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
98    const v128_t vx = wasm_v128_load(x);
99    x += 4;
100
101    const v128_t vz = wasm_f32x4_abs(vx);
102
103    v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
104    const v128_t vs = wasm_i32x4_shl(vn, 23);
105    vn = wasm_f32x4_sub(vn, vmagic_bias);
106
107    v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
108    vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
109
110    v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
111    vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
112    vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
113    vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
114
115    vt = wasm_f32x4_mul(vt, vs);
116    const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
117    const v128_t vd = wasm_f32x4_add(ve, vone);
118
119    v128_t vf = wasm_f32x4_div(ve, vd);
120    vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
121    vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
122
123    wasm_v128_store(y, vf);
124    y += 4;
125  }
126  if XNN_UNLIKELY(n != 0) {
127    const v128_t vx = wasm_v128_load(x);
128
129    const v128_t vz = wasm_f32x4_abs(vx);
130
131    v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
132    const v128_t vs = wasm_i32x4_shl(vn, 23);
133    vn = wasm_f32x4_sub(vn, vmagic_bias);
134
135    v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
136    vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
137
138    v128_t vp = wasm_f32x4_add(vc4, wasm_f32x4_mul(vt, vc5));
139    vp = wasm_f32x4_add(vc3, wasm_f32x4_mul(vt, vp));
140    vp = wasm_f32x4_add(vc2, wasm_f32x4_mul(vt, vp));
141    vp = wasm_f32x4_add(vc1, wasm_f32x4_mul(vt, vp));
142
143    vt = wasm_f32x4_mul(vt, vs);
144    const v128_t ve = wasm_f32x4_add(vs, wasm_f32x4_mul(vt, vp));
145    const v128_t vd = wasm_f32x4_add(ve, vone);
146
147    v128_t vf = wasm_f32x4_div(ve, vd);
148    vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
149    vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
150
151    if (n & (2 * sizeof(float))) {
152      *((double*) y) = wasm_f64x2_extract_lane(vf, 0);
153      vf = wasm_v32x4_shuffle(vf, vf, 2, 3, 2, 3);
154      y += 2;
155    }
156    if (n & (1 * sizeof(float))) {
157      *y = wasm_f32x4_extract_lane(vf, 0);
158    }
159  }
160}
161