1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7 #include <stddef.h>
8
9 #include <wasm_simd128.h>
10
11 #include <xnnpack/common.h>
12 #include <xnnpack/math-stubs.h>
13
14
15 // Table of exp2(k / 64) values decremented (as integer) by (k << 17), k = 0..63
16 extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_64[64];
17
xnn_math_f32_sigmoid__wasmsimd_rr2_lut64_p2_div(size_t n,const float * input,float * output)18 void xnn_math_f32_sigmoid__wasmsimd_rr2_lut64_p2_div(
19 size_t n,
20 const float* input,
21 float* output)
22 {
23 assert(n % (4 * sizeof(float)) == 0);
24
25 // Large number such that ulp(magic bias) == exp2(-6)
26 const v128_t vmagic_bias = wasm_f32x4_splat(0x1.800000p17f);
27 const v128_t vminus_log2e = wasm_f32x4_splat(-0x1.715476p0f);
28 // Mask for the lowest 6 bits
29 const v128_t vindex_mask = wasm_i32x4_splat(INT32_C(0x3F));
30 // Last 13 bits are zeroes
31 const v128_t vln2_hi = wasm_f32x4_splat(0x1.630000p-1f);
32 const v128_t vln2_lo = wasm_f32x4_splat(-0x1.BD0106p-13f);
33 // Coefficient of polynomial approximation of exp(-t) ~ 1 + t * (1 + t * c2) on [-log(2)/128, log(2)/128]
34 const v128_t vc2 = wasm_f32x4_splat(0x1.FFFF0Ap-2f);
35 const v128_t vone = wasm_f32x4_splat(1.0f);
36 // The largest z for which sigmoidf(-z) is normalized.
37 // This number is also the largest z for which expf(-z) is normalized.
38 const v128_t vdenorm_cutoff = wasm_f32x4_splat(0x1.5D589Ep+6f);
39
40 for (; n != 0; n -= 4 * sizeof(float)) {
41 const v128_t vx = wasm_v128_load(input);
42 input += 4;
43
44 // General structure of the algorithm:
45 //
46 // / exp(x) / (1 + exp(x)) if x <= 0
47 // f[x] :=
48 // \ 1 - f[-x] if x >= 0
49 //
50 // First we compute f[-z] := exp(-z) / (1 + exp(-z)) where z = abs(x),
51 // then replace result with 1 - f[-z] if x >= 0.
52 const v128_t vz = wasm_f32x4_abs(vx);
53
54 // Compute reduced argument n := round(-z / log(2), 6).
55 // We do it by adding a large number (magic bias), which cause rounding of the result to integer, then subtracing
56 // the large number back. The trick with adding large number is valid only within certain bounds
57 // (|-z / log(2)| <= 2**16, i.e. |z| <= 0x1.62E43p+15 = 5814540.0), but that is acceptable, because inputs x
58 // outside of [-87.336544, 17.328678] (i.e. z outsize [0, 87.336544]) underflow or saturate sigmoidf(x). We fixup
59 // the result for such inputs at the very end of the algorithm.
60 v128_t vn = wasm_f32x4_add(vmagic_bias, wasm_f32x4_mul(vz, vminus_log2e));
61
62 // Create a floating-point number s (scale) such that s := 2**n for such inputs that sigmoidf(-z) is normalized,
63 // i.e. 0 <= z <= 87.33642. As n has 6 fractional bits, we split s == 2**n = 2**int(n) * 2**frac(n). We create s
64 // in two steps:
65 // 1. Fetch 2**frac(n) from the table using the 6 low bits of n, as integer. Note that the fetched values are in
66 // the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
67 // 2. Adjust fecthed value by addition of int(n) to its floating-point exponent. The result is always a normalized
68 // number, because for 0 <= z <= 87.33642 (inputs for which sigmoidf(z) is normalized) we have
69 // -126 <= int(n) <= 0, and thus the adjusted exponent is not lower than -126.
70 //
71 // Shift bits 6:14 into 23:31 (position of floating-point exponent).
72 const v128_t ve = wasm_i32x4_shl(vn, 17);
73
74 // Use bits 0:6 of n, as integer, as an index for table lookup of l := 2**frac(n).
75 const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
76 const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
77 const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
78 const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_lo));
79 const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_lo >> 32)));
80 const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) vidx_hi));
81 const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_64 + (uint32_t) (vidx_hi >> 32)));
82 const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
83 // Adjust exponent of the value l fetched from the table to get the final s value.
84 const v128_t vs = wasm_i32x4_add(vl, ve);
85
86 // Subtract the large number back to get the final n := round(-z / log(2), 6) as a floating-point number.
87 vn = wasm_f32x4_sub(vn, vmagic_bias);
88
89 // Compute reduced argument t := (z + n * log(2)). Note that -t = -z - n * log(2).
90 // Use Cody-Waite range reduction method (note two constants to represent log(2)) to improve accuracy.
91 v128_t vt = wasm_f32x4_add(vz, wasm_f32x4_mul(vn, vln2_hi));
92 vt = wasm_f32x4_add(vt, wasm_f32x4_mul(vn, vln2_lo));
93
94 // Compute degree-2 polynomial approximation for exp(-t) on [-log(2)/128, log(2)/128].
95 // P(t) = 1 + t * (-1 + t * c2) = 1 - (t - t * (t * c2)) = 1 - p
96 v128_t vp = wasm_f32x4_mul(vt, vc2);
97 vp = wasm_f32x4_sub(vt, wasm_f32x4_mul(vp, vt));
98
99 // Reconstruct the exp(-z) value:
100 // e = s * (1 + t * (-1 + t * c2))
101 // = s * (1 - p)
102 // = s - s * p
103 const v128_t vy = wasm_f32x4_sub(vs, wasm_f32x4_mul(vs, vp));
104
105 // Reconstruct sigmoid(-z) = exp(-z) / (1.0 + exp(-z))
106 v128_t vf = wasm_f32x4_div(vy, wasm_f32x4_add(vy, vone));
107
108 // For inputs below denormal cutoff, replace output with +0.0f.
109 // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
110 vf = wasm_v128_andnot(vf, wasm_f32x4_gt(vz, vdenorm_cutoff));
111
112 // Reconstruct sigmoid(x) = x < 0 ? sigmoid(-z) : 1.0 - sigmoid(-z)
113 vf = wasm_v128_bitselect(vf, wasm_f32x4_sub(vone, vf), wasm_i32x4_shr(vx, 31));
114
115 wasm_v128_store(output, vf);
116 output += 4;
117 }
118 }
119