• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/vunary.h>
14#include <xnnpack/common.h>
15
16
17extern XNN_INTERNAL const float xnn_table_exp2minus_k_over_16[16];
18
19void xnn_f32_velu_ukernel__wasmsimd_${"x86" if X86 else "arm"}_rr2_lut16_p3_x${BATCH_TILE}(
20    size_t n,
21    const float* x,
22    float* y,
23    const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
24{
25  assert(n != 0);
26  assert(n % sizeof(float) == 0);
27  assert(x != NULL);
28  assert(y != NULL);
29
30  const v128_t vprescale = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.prescale);
31  const v128_t valpha = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.alpha);
32  const v128_t vbeta = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.beta);
33  const v128_t vsat_cutoff = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.sat_cutoff);
34  const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.magic_bias);
35  const v128_t vlog2e = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.log2e);
36  const v128_t vindex_mask =  wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.index_mask);
37  const v128_t vminus_ln2_hi = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_hi);
38  const v128_t vminus_ln2_lo = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.minus_ln2_lo);
39  const v128_t vc3 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c3);
40  const v128_t vc2 = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.c2);
41  const v128_t vone = wasm_v128_load64_splat(params->wasmsimd_rr2_lut16_p3.one);
42
43  $if BATCH_TILE > 4:
44    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
45      v128_t vx${ABC[0:4]} = wasm_v128_load(x);
46      $for N in range(4, BATCH_TILE, 4):
47        v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
48      x += ${BATCH_TILE};
49
50      $for N in range(0, BATCH_TILE, 4):
51        $if X86:
52          const v128_t vz${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale);
53        $else:
54          const v128_t vz${ABC[N:N+4]} = wasm_f32x4_max(wasm_f32x4_mul(vx${ABC[N:N+4]}, vprescale), vsat_cutoff);
55
56      $for N in range(0, BATCH_TILE, 4):
57        v128_t vn${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vz${ABC[N:N+4]}, vlog2e), vmagic_bias);
58
59      $for N in range(0, BATCH_TILE, 4):
60        const v128_t vidx${ABC[N:N+4]} = wasm_i32x4_shl(wasm_v128_and(vn${ABC[N:N+4]}, vindex_mask), 2);
61        const v128_t ven${ABC[N:N+4]} = wasm_i32x4_shl(vn${ABC[N:N+4]}, 19);
62
63      $for N in range(0, BATCH_TILE, 4):
64        const uint64_t vidx${ABC[N:N+2]} = wasm_i64x2_extract_lane(vidx${ABC[N:N+4]}, 0);
65        const uint64_t vidx${ABC[N+2:N+4]} = wasm_i64x2_extract_lane(vidx${ABC[N:N+4]}, 1);
66        const float vl${ABC[N]}   = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N:N+2]}));
67        const float vl${ABC[N+1]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N:N+2]} >> 32)));
68        const float vl${ABC[N+2]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx${ABC[N+2:N+4]}));
69        const float vl${ABC[N+3]} = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx${ABC[N+2:N+4]} >> 32)));
70        const v128_t vl${ABC[N:N+4]} = wasm_f32x4_make(vl${ABC[N]}, vl${ABC[N+1]}, vl${ABC[N+2]}, vl${ABC[N+3]});
71
72      $for N in range(0, BATCH_TILE, 4):
73        vn${ABC[N:N+4]} = wasm_f32x4_sub(vn${ABC[N:N+4]}, vmagic_bias);
74        v128_t vs${ABC[N:N+4]} = wasm_i32x4_add(vl${ABC[N:N+4]}, ven${ABC[N:N+4]});
75
76      $for N in range(0, BATCH_TILE, 4):
77        v128_t vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_hi), vz${ABC[N:N+4]});
78        $if X86:
79          const v128_t vsatm${ABC[N:N+4]} = wasm_f32x4_le(vz${ABC[N:N+4]}, vsat_cutoff);
80
81      $for N in range(0, BATCH_TILE, 4):
82        vt${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vn${ABC[N:N+4]}, vminus_ln2_lo), vt${ABC[N:N+4]});
83        $if X86:
84          vs${ABC[N:N+4]} = wasm_v128_andnot(vs${ABC[N:N+4]}, vsatm${ABC[N:N+4]});
85
86      $for N in range(0, BATCH_TILE, 4):
87        $if X86:
88          vt${ABC[N:N+4]} = wasm_v128_andnot(vt${ABC[N:N+4]}, vsatm${ABC[N:N+4]});
89        v128_t vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt${ABC[N:N+4]}), vc2);
90
91      $for N in range(0, BATCH_TILE, 4):
92        vp${ABC[N:N+4]} = wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]});
93
94      $for N in range(0, BATCH_TILE, 4):
95        vt${ABC[N:N+4]} = wasm_f32x4_mul(vt${ABC[N:N+4]}, vs${ABC[N:N+4]});
96        vs${ABC[N:N+4]} = wasm_f32x4_sub(vs${ABC[N:N+4]}, vone);
97
98      $for N in range(0, BATCH_TILE, 4):
99        vp${ABC[N:N+4]} = wasm_f32x4_add(wasm_f32x4_mul(vp${ABC[N:N+4]}, vt${ABC[N:N+4]}), vt${ABC[N:N+4]});
100
101      $for N in range(0, BATCH_TILE, 4):
102        const v128_t ve${ABC[N:N+4]} = wasm_f32x4_mul(wasm_f32x4_add(vp${ABC[N:N+4]}, vs${ABC[N:N+4]}), valpha);
103
104      $for N in range(0, BATCH_TILE, 4):
105        const v128_t vsignm${ABC[N:N+4]} = wasm_i32x4_shr(vx${ABC[N:N+4]}, 31);
106        vx${ABC[N:N+4]} = wasm_f32x4_mul(vx${ABC[N:N+4]}, vbeta);
107
108      $for N in range(0, BATCH_TILE, 4):
109        const v128_t vy${ABC[N:N+4]} = wasm_v128_bitselect(ve${ABC[N:N+4]}, vx${ABC[N:N+4]}, vsignm${ABC[N:N+4]});
110
111      wasm_v128_store(y, vy${ABC[0:4]});
112      $for N in range(4, BATCH_TILE, 4):
113        wasm_v128_store(y + ${N}, vy${ABC[N:N+4]});
114      y += ${BATCH_TILE};
115    }
116  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
117    v128_t vx = wasm_v128_load(x);
118    x += 4;
119
120    $if X86:
121      const v128_t vz = wasm_f32x4_mul(vx, vprescale);
122    $else:
123      const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
124
125    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
126    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
127    const v128_t ven = wasm_i32x4_shl(vn, 19);
128
129    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
130    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
131    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
132    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
133    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
134    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
135    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
136
137    v128_t vs = wasm_i32x4_add(vl, ven);
138    vn = wasm_f32x4_sub(vn, vmagic_bias);
139
140    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
141    $if X86:
142      const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
143    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
144    $if X86:
145      vs = wasm_v128_andnot(vs, vsatm);
146      vt = wasm_v128_andnot(vt, vsatm);
147
148    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
149    vp = wasm_f32x4_mul(vp, vt);
150
151    vt = wasm_f32x4_mul(vt, vs);
152    vs = wasm_f32x4_sub(vs, vone);
153    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
154    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
155
156    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
157    vx = wasm_f32x4_mul(vx, vbeta);
158    const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
159
160    wasm_v128_store(y, vy);
161    y += 4;
162  }
163  if XNN_UNLIKELY(n != 0) {
164    v128_t vx = wasm_v128_load(x);
165
166    $if X86:
167      const v128_t vz = wasm_f32x4_mul(vx, vprescale);
168    $else:
169      const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff);
170
171    v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias);
172    const v128_t vidx = wasm_i32x4_shl(wasm_v128_and(vn, vindex_mask), 2);
173    const v128_t ven = wasm_i32x4_shl(vn, 19);
174
175    const uint64_t vidx_lo = wasm_i64x2_extract_lane(vidx, 0);
176    const uint64_t vidx_hi = wasm_i64x2_extract_lane(vidx, 1);
177    const float vl0 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_lo));
178    const float vl1 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_lo >> 32)));
179    const float vl2 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) vidx_hi));
180    const float vl3 = *((const float*) ((uintptr_t) xnn_table_exp2minus_k_over_16 + (uint32_t) (vidx_hi >> 32)));
181    const v128_t vl = wasm_f32x4_make(vl0, vl1, vl2, vl3);
182
183    v128_t vs = wasm_i32x4_add(vl, ven);
184    vn = wasm_f32x4_sub(vn, vmagic_bias);
185
186    v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz);
187    $if X86:
188      const v128_t vsatm = wasm_f32x4_le(vz, vsat_cutoff);
189    vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt);
190    $if X86:
191      vs = wasm_v128_andnot(vs, vsatm);
192      vt = wasm_v128_andnot(vt, vsatm);
193
194    v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc3, vt), vc2);
195    vp = wasm_f32x4_mul(vp, vt);
196
197    vt = wasm_f32x4_mul(vt, vs);
198    vs = wasm_f32x4_sub(vs, vone);
199    vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt);
200    const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha);
201
202    const v128_t vsignm = wasm_i32x4_shr(vx, 31);
203    vx = wasm_f32x4_mul(vx, vbeta);
204    v128_t vy = wasm_v128_bitselect(ve, vx, vsignm);
205
206    if (n & (2 * sizeof(float))) {
207      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
208      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
209      y += 2;
210    }
211    if (n & (1 * sizeof(float))) {
212      *y = wasm_f32x4_extract_lane(vy, 0);
213    }
214  }
215}
216