• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-hswish/wasmsimd.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/hswish.h>
16 
17 
xnn_f32_hswish_ukernel__wasmsimd_x16(size_t n,const float * x,float * y,const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_hswish_ukernel__wasmsimd_x16(
19     size_t n,
20     const float* x,
21     float* y,
22     const union xnn_f32_hswish_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
23 {
24   assert(n != 0);
25   assert(n % sizeof(float) == 0);
26 
27   const v128_t vsixth = wasm_v32x4_load_splat(&params->scalar.sixth);
28   const v128_t vthree = wasm_v32x4_load_splat(&params->scalar.three);
29   const v128_t vsix = wasm_v32x4_load_splat(&params->scalar.six);
30   const v128_t vzero = wasm_f32x4_splat(0.0f);
31 
32   for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) {
33     v128_t vx0123 = wasm_v128_load(x);
34     v128_t vx4567 = wasm_v128_load(x + 4);
35     v128_t vx89AB = wasm_v128_load(x + 8);
36     v128_t vxCDEF = wasm_v128_load(x + 12);
37     x += 16;
38 
39     v128_t vacc0123 = wasm_f32x4_add(vx0123, vthree);
40     vx0123 = wasm_f32x4_mul(vx0123, vsixth);
41     v128_t vacc4567 = wasm_f32x4_add(vx4567, vthree);
42     vx4567 = wasm_f32x4_mul(vx4567, vsixth);
43     v128_t vacc89AB = wasm_f32x4_add(vx89AB, vthree);
44     vx89AB = wasm_f32x4_mul(vx89AB, vsixth);
45     v128_t vaccCDEF = wasm_f32x4_add(vxCDEF, vthree);
46     vxCDEF = wasm_f32x4_mul(vxCDEF, vsixth);
47 
48     vacc0123 = wasm_i32x4_max(vacc0123, vzero);
49     vacc4567 = wasm_i32x4_max(vacc4567, vzero);
50     vacc89AB = wasm_i32x4_max(vacc89AB, vzero);
51     vaccCDEF = wasm_i32x4_max(vaccCDEF, vzero);
52 
53     vacc0123 = wasm_i32x4_min(vacc0123, vsix);
54     vacc4567 = wasm_i32x4_min(vacc4567, vsix);
55     vacc89AB = wasm_i32x4_min(vacc89AB, vsix);
56     vaccCDEF = wasm_i32x4_min(vaccCDEF, vsix);
57 
58     vacc0123 = wasm_f32x4_mul(vacc0123, vx0123);
59     vacc4567 = wasm_f32x4_mul(vacc4567, vx4567);
60     vacc89AB = wasm_f32x4_mul(vacc89AB, vx89AB);
61     vaccCDEF = wasm_f32x4_mul(vaccCDEF, vxCDEF);
62 
63     wasm_v128_store(y, vacc0123);
64     wasm_v128_store(y + 4, vacc4567);
65     wasm_v128_store(y + 8, vacc89AB);
66     wasm_v128_store(y + 12, vaccCDEF);
67     y += 16;
68   }
69   for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) {
70     v128_t vx = wasm_v128_load(x);
71     x += 4;
72 
73     v128_t vacc = wasm_f32x4_add(vx, vthree);
74     vx = wasm_f32x4_mul(vx, vsixth);
75     vacc = wasm_i32x4_max(vacc, vzero);
76     vacc = wasm_i32x4_min(vacc, vsix);
77     vacc = wasm_f32x4_mul(vacc, vx);
78 
79     wasm_v128_store(y, vacc);
80     y += 4;
81   }
82   if XNN_UNLIKELY(n != 0) {
83     v128_t vx = wasm_v128_load(x);
84 
85     v128_t vacc = wasm_f32x4_add(vx, vthree);
86     vx = wasm_f32x4_mul(vx, vsixth);
87     vacc = wasm_i32x4_max(vacc, vzero);
88     vacc = wasm_i32x4_min(vacc, vsix);
89     vacc = wasm_f32x4_mul(vacc, vx);
90 
91     if (n & (2 * sizeof(float))) {
92       *((double*) y) = wasm_f64x2_extract_lane(vacc, 0);
93       vacc = wasm_v32x4_shuffle(vacc, vacc, 2, 3, 2, 3);
94       y += 2;
95     }
96     if (n & (1 * sizeof(float))) {
97       *y = wasm_f32x4_extract_lane(vacc, 0);
98     }
99   }
100 }
101