• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <wasm_simd128.h>
9 
10 #include <xnnpack/gavgpool.h>
11 #include <xnnpack/math.h>
12 
13 
xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4(size_t elements,size_t channels,const float * input,float * output,const union xnn_f32_gavgpool_params params[restrict XNN_MIN_ELEMENTS (1)])14 void xnn_f32_gavgpool_cw_ukernel__wasmsimd_arm_x4(
15     size_t elements,
16     size_t channels,
17     const float* input,
18     float* output,
19     const union xnn_f32_gavgpool_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
20 {
21   assert(elements != 0);
22   assert(elements % sizeof(float) == 0);
23   assert(channels != 0);
24 
25   const float* i0 = input;
26   const float* i1 = (const float*) ((uintptr_t) i0 + elements);
27   const float* i2 = (const float*) ((uintptr_t) i1 + elements);
28   const float* i3 = (const float*) ((uintptr_t) i2 + elements);
29 
30   const v128_t vmask = wasm_v128_load(params->scalar.mask);
31   const v128_t vmultiplier = wasm_v128_load32_splat(&params->scalar.multiplier);
32   const v128_t vmin = wasm_v128_load32_splat(&params->scalar.output_min);
33   const v128_t vmax = wasm_v128_load32_splat(&params->scalar.output_max);
34 
35   while (channels >= 4) {
36     v128_t vsum0 = wasm_f32x4_const_splat(0.0f);
37     v128_t vsum1 = vsum0;
38     v128_t vsum2 = vsum0;
39     v128_t vsum3 = vsum0;
40     size_t n = elements;
41     while (n >= 4 * sizeof(float)) {
42       const v128_t vi0 = wasm_v128_load(i0);
43       i0 += 4;
44       const v128_t vi1 = wasm_v128_load(i1);
45       i1 += 4;
46       const v128_t vi2 = wasm_v128_load(i2);
47       i2 += 4;
48       const v128_t vi3 = wasm_v128_load(i3);
49       i3 += 4;
50 
51       vsum0 = wasm_f32x4_add(vsum0, vi0);
52       vsum1 = wasm_f32x4_add(vsum1, vi1);
53       vsum2 = wasm_f32x4_add(vsum2, vi2);
54       vsum3 = wasm_f32x4_add(vsum3, vi3);
55       n -= 4 * sizeof(float);
56     }
57 
58     if XNN_UNLIKELY(n != 0) {
59       const v128_t vi0 = wasm_v128_and(wasm_v128_load(i0), vmask);
60       i0 = (const float*) ((uintptr_t) i0 + n);
61       const v128_t vi1 = wasm_v128_and(wasm_v128_load(i1), vmask);
62       i1 = (const float*) ((uintptr_t) i1 + n);
63       const v128_t vi2 = wasm_v128_and(wasm_v128_load(i2), vmask);
64       i2 = (const float*) ((uintptr_t) i2 + n);
65       const v128_t vi3 = wasm_v128_and(wasm_v128_load(i3), vmask);
66       i3 = (const float*) ((uintptr_t) i3 + n);
67 
68       vsum0 = wasm_f32x4_add(vsum0, vi0);
69       vsum1 = wasm_f32x4_add(vsum1, vi1);
70       vsum2 = wasm_f32x4_add(vsum2, vi2);
71       vsum3 = wasm_f32x4_add(vsum3, vi3);
72     }
73 
74     // Having exactly 4 rows makes this work out nicely as we end up with
75     // the 4 totals in 4 different lanes of the same vector.
76     const v128_t vsum01 = wasm_f32x4_add(wasm_v32x4_shuffle(vsum0, vsum1, 0, 2, 4, 6), wasm_v32x4_shuffle(vsum0, vsum1, 1, 3, 5, 7));
77     const v128_t vsum23 = wasm_f32x4_add(wasm_v32x4_shuffle(vsum2, vsum3, 0, 2, 4, 6), wasm_v32x4_shuffle(vsum2, vsum3, 1, 3, 5, 7));
78     const v128_t vsum = wasm_f32x4_add(wasm_v32x4_shuffle(vsum01, vsum23, 0, 2, 4, 6), wasm_v32x4_shuffle(vsum01, vsum23, 1, 3, 5, 7));
79     v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
80 
81     vout = wasm_f32x4_max(vout, vmin);
82     vout = wasm_f32x4_min(vout, vmax);
83 
84     wasm_v128_store(output, vout);
85     output += 4;
86     i0 = i3;
87     i1 = (const float*) ((uintptr_t) i0 + elements);
88     i2 = (const float*) ((uintptr_t) i1 + elements);
89     i3 = (const float*) ((uintptr_t) i2 + elements);
90     channels -= 4;
91   }
92 
93   while (channels != 0) {
94     v128_t vsum = wasm_f32x4_const_splat(0.0f);
95     size_t n = elements;
96     while (n >= 4 * sizeof(float)) {
97       const v128_t vi0 = wasm_v128_load(i0);
98       i0 += 4;
99       vsum = wasm_f32x4_add(vsum, vi0);
100       n -= 4 * sizeof(float);
101     }
102 
103     if XNN_UNLIKELY(n != 0) {
104       v128_t vi0 = wasm_v128_and(vmask, wasm_v128_load(i0));
105       i0 = (const float*) ((uintptr_t) i0 + n);
106       vsum = wasm_f32x4_add(vsum, vi0);
107     }
108 
109     vsum = wasm_f32x4_add(wasm_v32x4_shuffle(vsum, vsum, 0, 2, 4, 6), wasm_v32x4_shuffle(vsum, vsum, 1, 3, 5, 7));
110     vsum = wasm_f32x4_add(wasm_v32x4_shuffle(vsum, vsum, 0, 2, 4, 6), wasm_v32x4_shuffle(vsum, vsum, 1, 3, 5, 7));
111 
112     v128_t vout = wasm_f32x4_mul(vsum, vmultiplier);
113 
114     vout = wasm_f32x4_max(vout, vmin);
115     vout = wasm_f32x4_min(vout, vmax);
116 
117     *output++ = wasm_f32x4_extract_lane(vout, 0);
118     channels -= 1;
119   }
120 }
121