1 // Auto-generated file. Do not edit!
2 // Template: src/f32-vmulcaddc/wasmsimd.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <wasm_simd128.h>
13
14 #include <xnnpack/math.h>
15 #include <xnnpack/vmulcaddc.h>
16
17
xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_x86_2x(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride,const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_vmulcaddc_minmax_ukernel_c8__wasmsimd_x86_2x(
19 size_t rows,
20 size_t channels,
21 const float*restrict input,
22 size_t input_stride,
23 const float*restrict weights,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN
27 {
28 assert(rows != 0);
29 assert(channels != 0);
30 assert(channels % sizeof(float) == 0);
31
32 const float* i0 = input;
33 float* o0 = output;
34 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride);
36 if XNN_UNPREDICTABLE(rows < 2) {
37 i1 = i0;
38 o1 = o0;
39 }
40
41 const size_t input_increment = input_stride * 2 - channels;
42 const size_t output_increment = output_stride * 2 - channels;
43
44 const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min);
45 const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max);
46 do {
47 const float* w = weights;
48 size_t c = channels;
49 for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
50 const v128_t vscale0123 = wasm_v128_load(w);
51 const v128_t vscale4567 = wasm_v128_load(w + 4);
52
53 v128_t vacc0x0123 = wasm_v128_load(i0);
54 v128_t vacc0x4567 = wasm_v128_load(i0 + 4);
55 i0 += 8;
56 v128_t vacc1x0123 = wasm_v128_load(i1);
57 v128_t vacc1x4567 = wasm_v128_load(i1 + 4);
58 i1 += 8;
59
60 const v128_t vbias0123 = wasm_v128_load(w + 8);
61 const v128_t vbias4567 = wasm_v128_load(w + 12);
62
63 vacc0x0123 = wasm_f32x4_add(vbias0123, wasm_f32x4_mul(vscale0123, vacc0x0123));
64 vacc0x4567 = wasm_f32x4_add(vbias4567, wasm_f32x4_mul(vscale4567, vacc0x4567));
65 vacc1x0123 = wasm_f32x4_add(vbias0123, wasm_f32x4_mul(vscale0123, vacc1x0123));
66 vacc1x4567 = wasm_f32x4_add(vbias4567, wasm_f32x4_mul(vscale4567, vacc1x4567));
67
68 vacc0x0123 = wasm_v128_bitselect(vmin, vacc0x0123, wasm_f32x4_lt(vacc0x0123, vmin));
69 vacc0x4567 = wasm_v128_bitselect(vmin, vacc0x4567, wasm_f32x4_lt(vacc0x4567, vmin));
70 vacc1x0123 = wasm_v128_bitselect(vmin, vacc1x0123, wasm_f32x4_lt(vacc1x0123, vmin));
71 vacc1x4567 = wasm_v128_bitselect(vmin, vacc1x4567, wasm_f32x4_lt(vacc1x4567, vmin));
72
73 vacc0x0123 = wasm_v128_bitselect(vacc0x0123, vmax, wasm_f32x4_le(vacc0x0123, vmax));
74 vacc0x4567 = wasm_v128_bitselect(vacc0x4567, vmax, wasm_f32x4_le(vacc0x4567, vmax));
75 vacc1x0123 = wasm_v128_bitselect(vacc1x0123, vmax, wasm_f32x4_le(vacc1x0123, vmax));
76 vacc1x4567 = wasm_v128_bitselect(vacc1x4567, vmax, wasm_f32x4_le(vacc1x4567, vmax));
77
78 wasm_v128_store(o0, vacc0x0123);
79 wasm_v128_store(o0 + 4, vacc0x4567);
80 o0 += 8;
81 wasm_v128_store(o1, vacc1x0123);
82 wasm_v128_store(o1 + 4, vacc1x4567);
83 o1 += 8;
84
85 w += 16;
86 }
87 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
88 const v128_t vscale = wasm_v128_load(w);
89
90 v128_t vacc0 = wasm_v128_load(i0);
91 i0 += 4;
92 v128_t vacc1 = wasm_v128_load(i1);
93 i1 += 4;
94
95 const v128_t vbias = wasm_v128_load(w + 8);
96
97 vacc0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vscale, vacc0));
98 vacc1 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vscale, vacc1));
99
100 vacc0 = wasm_v128_bitselect(vmin, vacc0, wasm_f32x4_lt(vacc0, vmin));
101 vacc1 = wasm_v128_bitselect(vmin, vacc1, wasm_f32x4_lt(vacc1, vmin));
102
103 vacc0 = wasm_v128_bitselect(vacc0, vmax, wasm_f32x4_le(vacc0, vmax));
104 vacc1 = wasm_v128_bitselect(vacc1, vmax, wasm_f32x4_le(vacc1, vmax));
105
106 wasm_v128_store(o0, vacc0);
107 o0 += 4;
108 wasm_v128_store(o1, vacc1);
109 o1 += 4;
110
111 w += 4;
112 }
113 if XNN_UNLIKELY(c != 0) {
114 const v128_t vscale = wasm_v128_load(w);
115
116 v128_t vacc0 = wasm_v128_load(i0);
117 i0 = (const float*) ((uintptr_t) i0 + c);
118 v128_t vacc1 = wasm_v128_load(i1);
119 i1 = (const float*) ((uintptr_t) i1 + c);
120
121 const v128_t vbias = wasm_v128_load(w + 8);
122
123 vacc0 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vscale, vacc0));
124 vacc1 = wasm_f32x4_add(vbias, wasm_f32x4_mul(vscale, vacc1));
125
126 vacc0 = wasm_v128_bitselect(vmin, vacc0, wasm_f32x4_lt(vacc0, vmin));
127 vacc1 = wasm_v128_bitselect(vmin, vacc1, wasm_f32x4_lt(vacc1, vmin));
128
129 vacc0 = wasm_v128_bitselect(vacc0, vmax, wasm_f32x4_le(vacc0, vmax));
130 vacc1 = wasm_v128_bitselect(vacc1, vmax, wasm_f32x4_le(vacc1, vmax));
131
132 if (c & (2 * sizeof(float))) {
133 *((double*) o0) = wasm_f64x2_extract_lane(vacc0, 0);
134 *((double*) o1) = wasm_f64x2_extract_lane(vacc1, 0);
135
136 vacc0 = wasm_v32x4_shuffle(vacc0, vacc0, 2, 3, 2, 3);
137 vacc1 = wasm_v32x4_shuffle(vacc1, vacc1, 2, 3, 2, 3);
138
139 o0 += 2;
140 o1 += 2;
141 }
142 if (c & (1 * sizeof(float))) {
143 *o0++ = wasm_f32x4_extract_lane(vacc0, 0);
144 *o1++ = wasm_f32x4_extract_lane(vacc1, 0);
145 }
146 }
147 i0 = (const float*) ((uintptr_t) i0 + input_increment);
148 o0 = (float*) ((uintptr_t) o0 + output_increment);
149 i1 = (const float*) ((uintptr_t) i1 + input_increment);
150 o1 = (float*) ((uintptr_t) o1 + output_increment);
151 if XNN_UNPREDICTABLE(rows < 4) {
152 i1 = i0;
153 o1 = o0;
154 }
155 rows = doz(rows, 2);
156 } while (rows != 0);
157 }
158