1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 4 == 0 7$assert BATCH_TILE >= 4 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9#include <assert.h> 10 11#include <wasm_simd128.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/math.h> 15#include <xnnpack/vunary.h> 16 17 18void xnn_f32_vrndd_ukernel__wasmsimd_addsub_x${BATCH_TILE}( 19 size_t n, 20 const float* x, 21 float* y, 22 const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN 23{ 24 assert(n != 0); 25 assert(n % sizeof(float) == 0); 26 27 const v128_t vsign_mask = wasm_f32x4_splat(-0.0f); 28 const v128_t vmagic_number = wasm_f32x4_splat(0x1.000000p+23f); 29 const v128_t vone = wasm_f32x4_splat(1.0f); 30 $if BATCH_TILE > 4: 31 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 32 const v128_t vx${ABC[0:4]} = wasm_v128_load(x); 33 $for N in range(4, BATCH_TILE, 4): 34 const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N}); 35 x += ${BATCH_TILE}; 36 37 $for N in range(0, BATCH_TILE, 4): 38 const v128_t vabsx${ABC[N:N+4]} = wasm_v128_andnot(vx${ABC[N:N+4]}, vsign_mask); 39 40 $for N in range(0, BATCH_TILE, 4): 41 const v128_t vrndmask${ABC[N:N+4]} = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx${ABC[N:N+4]})); 42 43 $for N in range(0, BATCH_TILE, 4): 44 const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_number), vmagic_number); 45 46 $for N in range(0, BATCH_TILE, 4): 47 const v128_t vrndx${ABC[N:N+4]} = wasm_v128_bitselect(vx${ABC[N:N+4]}, vrndabsx${ABC[N:N+4]}, vrndmask${ABC[N:N+4]}); 48 49 $for N in range(0, BATCH_TILE, 4): 50 const v128_t vy${ABC[N:N+4]} = wasm_f32x4_sub(vrndx${ABC[N:N+4]}, wasm_v128_and(wasm_f32x4_lt(vx${ABC[N:N+4]}, vrndx${ABC[N:N+4]}), vone)); 51 52 wasm_v128_store(y, vy${ABC[0:4]}); 53 $for N in range(4, BATCH_TILE, 4): 54 wasm_v128_store(y + ${N}, vy${ABC[N:N+4]}); 55 y += ${BATCH_TILE}; 56 } 57 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { 58 const v128_t vx = wasm_v128_load(x); 59 x += 4; 60 61 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); 62 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); 63 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); 64 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); 65 const v128_t vy = wasm_f32x4_sub(vrndx, wasm_v128_and(wasm_f32x4_lt(vx, vrndx), vone)); 66 67 wasm_v128_store(y, vy); 68 y += 4; 69 } 70 if XNN_UNLIKELY(n != 0) { 71 const v128_t vx = wasm_v128_load(x); 72 73 const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); 74 const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); 75 const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); 76 const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); 77 v128_t vy = wasm_f32x4_sub(vrndx, wasm_v128_and(wasm_f32x4_lt(vx, vrndx), vone)); 78 79 if (n & (2 * sizeof(float))) { 80 *((double*) y) = wasm_f64x2_extract_lane(vy, 0); 81 vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3); 82 y += 2; 83 } 84 if (n & (1 * sizeof(float))) { 85 *y = wasm_f32x4_extract_lane(vy, 0); 86 } 87 } 88} 89