// Auto-generated file. Do not edit! // Template: src/f32-velu/wasmsimd-rr2-p6.c.in // Generator: tools/xngen // // Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. #include #include #include #include void xnn_f32_velu_ukernel__wasmsimd_arm_rr2_p6_x4( size_t n, const float* x, float* y, const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN { assert(n != 0); assert(n % sizeof(float) == 0); assert(x != NULL); assert(y != NULL); const v128_t vprescale = wasm_v32x4_load_splat(¶ms->scalar.prescale); const v128_t valpha = wasm_v32x4_load_splat(¶ms->scalar.alpha); const v128_t vbeta = wasm_v32x4_load_splat(¶ms->scalar.beta); const v128_t vsat_cutoff = wasm_f32x4_splat(-0x1.154246p+4f); const v128_t vmagic_bias = wasm_f32x4_splat(0x1.8000FEp23f); const v128_t vlog2e = wasm_f32x4_splat(0x1.715476p+0f); const v128_t vminus_ln2_hi = wasm_f32x4_splat(-0x1.62E440p-1f); const v128_t vminus_ln2_lo = wasm_f32x4_splat(0x1.0105C6p-21f); const v128_t vc6 = wasm_f32x4_splat(0x1.6b7338p-10f); const v128_t vc5 = wasm_f32x4_splat(0x1.12278Ep-7f); const v128_t vc4 = wasm_f32x4_splat(0x1.555716p-5f); const v128_t vc3 = wasm_f32x4_splat(0x1.5554B0p-3f); const v128_t vc2 = wasm_f32x4_splat(0x1.FFFFFEp-2f); const v128_t vone = wasm_f32x4_splat(1.0f); for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { v128_t vx = wasm_v128_load(x); x += 4; const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff); v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias); v128_t vs = wasm_i32x4_shl(vn, 23); vn = wasm_f32x4_sub(vn, vmagic_bias); v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz); vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt); v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5); vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4); vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3); vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2); vp = wasm_f32x4_mul(vp, vt); vt = wasm_f32x4_mul(vt, vs); vs = wasm_f32x4_sub(vs, vone); vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt); const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha); const v128_t vsignm = wasm_i32x4_shr(vx, 31); vx = wasm_f32x4_mul(vx, vbeta); const v128_t vy = wasm_v128_bitselect(ve, vx, vsignm); wasm_v128_store(y, vy); y += 4; } if XNN_UNLIKELY(n != 0) { v128_t vx = wasm_v128_load(x); const v128_t vz = wasm_f32x4_max(wasm_f32x4_mul(vx, vprescale), vsat_cutoff); v128_t vn = wasm_f32x4_add(wasm_f32x4_mul(vz, vlog2e), vmagic_bias); v128_t vs = wasm_i32x4_shl(vn, 23); vn = wasm_f32x4_sub(vn, vmagic_bias); v128_t vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_hi), vz); vt = wasm_f32x4_add(wasm_f32x4_mul(vn, vminus_ln2_lo), vt); v128_t vp = wasm_f32x4_add(wasm_f32x4_mul(vc6, vt), vc5); vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc4); vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc3); vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vc2); vp = wasm_f32x4_mul(vp, vt); vt = wasm_f32x4_mul(vt, vs); vs = wasm_f32x4_sub(vs, vone); vp = wasm_f32x4_add(wasm_f32x4_mul(vp, vt), vt); const v128_t ve = wasm_f32x4_mul(wasm_f32x4_add(vp, vs), valpha); const v128_t vsignm = wasm_i32x4_shr(vx, 31); vx = wasm_f32x4_mul(vx, vbeta); v128_t vy = wasm_v128_bitselect(ve, vx, vsignm); if (n & (2 * sizeof(float))) { *((double*) y) = wasm_f64x2_extract_lane(vy, 0); vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3); y += 2; } if (n & (1 * sizeof(float))) { *y = wasm_f32x4_extract_lane(vy, 0); } } }