• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert BATCH_TILE % 4 == 0
7$assert BATCH_TILE >= 4
8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
9#include <assert.h>
10
11#include <wasm_simd128.h>
12
13#include <xnnpack/common.h>
14#include <xnnpack/math.h>
15#include <xnnpack/vunary.h>
16
17
18void xnn_f32_vrndd_ukernel__wasmsimd_cvt_x${BATCH_TILE}(
19    size_t n,
20    const float* x,
21    float* y,
22    const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
23{
24  assert(n != 0);
25  assert(n % sizeof(float) == 0);
26
27  const v128_t vsign_mask = wasm_v128_load64_splat(params->wasmsimd.sign_mask);
28  const v128_t vmagic_bias = wasm_v128_load64_splat(params->wasmsimd.magic_bias);
29  const v128_t vone = wasm_v128_load64_splat(params->wasmsimd.one);
30  $if BATCH_TILE > 4:
31    for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) {
32      const v128_t vx${ABC[0:4]} = wasm_v128_load(x);
33      $for N in range(4, BATCH_TILE, 4):
34        const v128_t vx${ABC[N:N+4]} = wasm_v128_load(x + ${N});
35      x += ${BATCH_TILE};
36
37      $for N in range(0, BATCH_TILE, 4):
38        const v128_t vintx${ABC[N:N+4]} = wasm_i32x4_trunc_sat_f32x4(vx${ABC[N:N+4]});
39        const v128_t vabsx${ABC[N:N+4]} = wasm_f32x4_abs(vx${ABC[N:N+4]});
40
41      $for N in range(0, BATCH_TILE, 4):
42        const v128_t vprerndx${ABC[N:N+4]} = wasm_f32x4_convert_i32x4(vintx${ABC[N:N+4]});
43        const v128_t vrndmask${ABC[N:N+4]} = wasm_v128_andnot(wasm_f32x4_lt(vabsx${ABC[N:N+4]}, vmagic_bias), vsign_mask);
44
45      $for N in range(0, BATCH_TILE, 4):
46        const v128_t vrndx${ABC[N:N+4]} = wasm_v128_bitselect(vprerndx${ABC[N:N+4]}, vx${ABC[N:N+4]}, vrndmask${ABC[N:N+4]});
47
48      $for N in range(0, BATCH_TILE, 4):
49        const v128_t vadj${ABC[N:N+4]} = wasm_v128_and(wasm_f32x4_lt(vx${ABC[N:N+4]}, vrndx${ABC[N:N+4]}), vone);
50
51      $for N in range(0, BATCH_TILE, 4):
52        const v128_t vy${ABC[N:N+4]} = wasm_f32x4_sub(vrndx${ABC[N:N+4]}, vadj${ABC[N:N+4]});
53
54      wasm_v128_store(y, vy${ABC[0:4]});
55      $for N in range(4, BATCH_TILE, 4):
56        wasm_v128_store(y + ${N}, vy${ABC[N:N+4]});
57      y += ${BATCH_TILE};
58    }
59  for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
60    const v128_t vx = wasm_v128_load(x);
61    x += 4;
62
63    const v128_t vintx = wasm_i32x4_trunc_sat_f32x4(vx);
64    const v128_t vabsx = wasm_f32x4_abs(vx);
65    const v128_t vprerndx = wasm_f32x4_convert_i32x4(vintx);
66    const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_bias), vsign_mask);
67    const v128_t vrndx = wasm_v128_bitselect(vprerndx, vx, vrndmask);
68    const v128_t vadj = wasm_v128_and(wasm_f32x4_lt(vx, vrndx), vone);
69    const v128_t vy = wasm_f32x4_sub(vrndx, vadj);
70
71    wasm_v128_store(y, vy);
72    y += 4;
73  }
74  if XNN_UNLIKELY(n != 0) {
75    const v128_t vx = wasm_v128_load(x);
76
77    const v128_t vintx = wasm_i32x4_trunc_sat_f32x4(vx);
78    const v128_t vabsx = wasm_f32x4_abs(vx);
79    const v128_t vprerndx = wasm_f32x4_convert_i32x4(vintx);
80    const v128_t vrndmask = wasm_v128_andnot(wasm_f32x4_lt(vabsx, vmagic_bias), vsign_mask);
81    const v128_t vrndx = wasm_v128_bitselect(vprerndx, vx, vrndmask);
82    const v128_t vadj = wasm_v128_and(wasm_f32x4_lt(vx, vrndx), vone);
83    v128_t vy = wasm_f32x4_sub(vrndx, vadj);
84
85    if (n & (2 * sizeof(float))) {
86      *((double*) y) = wasm_f64x2_extract_lane(vy, 0);
87      vy = wasm_v32x4_shuffle(vy, vy, 2, 3, 2, 3);
88      y += 2;
89    }
90    if (n & (1 * sizeof(float))) {
91      *y = wasm_f32x4_extract_lane(vy, 0);
92    }
93  }
94}
95