// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $assert BATCH_TILE % 4 == 0 $assert BATCH_TILE >= 4 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" #include #include #include #include #include void xnn_f32_vrndu_ukernel__wasmsimd_addsub_x${BATCH_TILE}( size_t n, const float* x, float* y, const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN { assert(n != 0); assert(n % sizeof(float) == 0); const v128_t vsign_mask = wasm_f32x4_splat(-0.0f); const v128_t vmagic_number = wasm_f32x4_splat(0x1.000000p+23f); const v128_t vone = wasm_f32x4_splat(1.0f); $if BATCH_TILE > 4: for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { const v128_t vx${ABC[0:4]} = wasm_v128_load(x); $for N in range(4, BATCH_TILE, 4): const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N}); x += ${BATCH_TILE}; $for N in range(0, BATCH_TILE, 4): const v128_t vabsx${ABC[N:N+4]} = wasm_v128_andnot(vx${ABC[N:N+4]}, vsign_mask); $for N in range(0, BATCH_TILE, 4): const v128_t vrndmask${ABC[N:N+4]} = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx${ABC[N:N+4]})); $for N in range(0, BATCH_TILE, 4): const v128_t vrndabsx${ABC[N:N+4]} = wasm_f32x4_sub(wasm_f32x4_add(vabsx${ABC[N:N+4]}, vmagic_number), vmagic_number); $for N in range(0, BATCH_TILE, 4): const v128_t vrndx${ABC[N:N+4]} = wasm_v128_bitselect(vx${ABC[N:N+4]}, vrndabsx${ABC[N:N+4]}, vrndmask${ABC[N:N+4]}); $for N in range(0, BATCH_TILE, 4): const v128_t vadjmask${ABC[N:N+4]} = wasm_v128_or(vsign_mask, wasm_f32x4_le(vx${ABC[N:N+4]}, vrndx${ABC[N:N+4]})); $for N in range(0, BATCH_TILE, 4): const v128_t vadjrndx${ABC[N:N+4]} = wasm_f32x4_add(vrndx${ABC[N:N+4]}, vone); $for N in range(0, BATCH_TILE, 4): const v128_t vy${ABC[N:N+4]} = wasm_v128_bitselect(vrndx${ABC[N:N+4]}, vadjrndx${ABC[N:N+4]}, vadjmask${ABC[N:N+4]}); wasm_v128_store(y, vy${ABC[0:4]}); $for N in range(4, BATCH_TILE, 4): wasm_v128_store(y + ${N}, vy${ABC[N:N+4]}); y += ${BATCH_TILE}; } for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { const v128_t vx = wasm_v128_load(x); x += 4; const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); const v128_t vadjmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vx, vrndx)); const v128_t vadjrndx = wasm_f32x4_add(vrndx, vone); const v128_t vy = wasm_v128_bitselect(vrndx, vadjrndx, vadjmask); wasm_v128_store(y, vy); y += 4; } if XNN_UNLIKELY(n != 0) { const v128_t vx = wasm_v128_load(x); const v128_t vabsx = wasm_v128_andnot(vx, vsign_mask); const v128_t vrndmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vmagic_number, vabsx)); const v128_t vrndabsx = wasm_f32x4_sub(wasm_f32x4_add(vabsx, vmagic_number), vmagic_number); const v128_t vrndx = wasm_v128_bitselect(vx, vrndabsx, vrndmask); const v128_t vadjmask = wasm_v128_or(vsign_mask, wasm_f32x4_le(vx, vrndx)); const v128_t vadjrndx = wasm_f32x4_add(vrndx, vone); v128_t vy = wasm_v128_bitselect(vrndx, vadjrndx, vadjmask); if (n & (2 * sizeof(float))) { *((double*) y) = wasm_f64x2_extract_lane(vy, 0); vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3); y += 2; } if (n & (1 * sizeof(float))) { *y = wasm_f32x4_extract_lane(vy, 0); } } }