1 // Auto-generated file. Do not edit!
2 // Template: src/f32-prelu/wasm.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <xnnpack/math.h>
13 #include <xnnpack/prelu.h>
14
15
xnn_f32_prelu_ukernel__wasm_2x4(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride)16 void xnn_f32_prelu_ukernel__wasm_2x4(
17 size_t rows,
18 size_t channels,
19 const float*restrict input,
20 size_t input_stride,
21 const float*restrict weights,
22 float*restrict output,
23 size_t output_stride)
24 {
25 assert(rows != 0);
26 assert(channels != 0);
27 assert(channels % sizeof(float) == 0);
28
29 const float* i0 = input;
30 float* o0 = output;
31 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
32 float* o1 = (float*) ((uintptr_t) o0 + output_stride);
33 if XNN_UNPREDICTABLE(rows < 2) {
34 i1 = i0;
35 o1 = o0;
36 }
37
38 const size_t input_increment = input_stride * 2 - channels;
39 const size_t output_increment = output_stride * 2 - channels;
40
41 const float vzero = 0.0f;
42 do {
43 const float* w = weights;
44 size_t c = channels;
45 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
46 const float vw0 = w[0];
47 const float vw1 = w[1];
48 const float vw2 = w[2];
49 const float vw3 = w[3];
50
51 float vi0x0 = i0[0];
52 float vi0x1 = i0[1];
53 float vi0x2 = i0[2];
54 float vi0x3 = i0[3];
55 i0 += 4;
56 float vi1x0 = i1[0];
57 float vi1x1 = i1[1];
58 float vi1x2 = i1[2];
59 float vi1x3 = i1[3];
60 i1 += 4;
61
62 float vacc0x0 = __builtin_wasm_max_f32(vi0x0, vzero);
63 vi0x0 = __builtin_wasm_min_f32(vi0x0, vzero);
64 float vacc0x1 = __builtin_wasm_max_f32(vi0x1, vzero);
65 vi0x1 = __builtin_wasm_min_f32(vi0x1, vzero);
66 float vacc0x2 = __builtin_wasm_max_f32(vi0x2, vzero);
67 vi0x2 = __builtin_wasm_min_f32(vi0x2, vzero);
68 float vacc0x3 = __builtin_wasm_max_f32(vi0x3, vzero);
69 vi0x3 = __builtin_wasm_min_f32(vi0x3, vzero);
70 float vacc1x0 = __builtin_wasm_max_f32(vi1x0, vzero);
71 vi1x0 = __builtin_wasm_min_f32(vi1x0, vzero);
72 float vacc1x1 = __builtin_wasm_max_f32(vi1x1, vzero);
73 vi1x1 = __builtin_wasm_min_f32(vi1x1, vzero);
74 float vacc1x2 = __builtin_wasm_max_f32(vi1x2, vzero);
75 vi1x2 = __builtin_wasm_min_f32(vi1x2, vzero);
76 float vacc1x3 = __builtin_wasm_max_f32(vi1x3, vzero);
77 vi1x3 = __builtin_wasm_min_f32(vi1x3, vzero);
78
79 vacc0x0 += vi0x0 * vw0;
80 vacc0x1 += vi0x1 * vw1;
81 vacc0x2 += vi0x2 * vw2;
82 vacc0x3 += vi0x3 * vw3;
83 vacc1x0 += vi1x0 * vw0;
84 vacc1x1 += vi1x1 * vw1;
85 vacc1x2 += vi1x2 * vw2;
86 vacc1x3 += vi1x3 * vw3;
87
88 o0[0] = vacc0x0;
89 o0[1] = vacc0x1;
90 o0[2] = vacc0x2;
91 o0[3] = vacc0x3;
92 o0 += 4;
93 o1[0] = vacc1x0;
94 o1[1] = vacc1x1;
95 o1[2] = vacc1x2;
96 o1[3] = vacc1x3;
97 o1 += 4;
98
99 w += 4;
100 }
101 for (; c != 0; c -= sizeof(float)) {
102 const float vw = *w++;
103
104 float vi0 = *i0++;
105 float vi1 = *i1++;
106
107 float vacc0 = __builtin_wasm_max_f32(vi0, vzero);
108 vi0 = __builtin_wasm_min_f32(vi0, vzero);
109 float vacc1 = __builtin_wasm_max_f32(vi1, vzero);
110 vi1 = __builtin_wasm_min_f32(vi1, vzero);
111
112 vacc0 += vi0 * vw;
113 vacc1 += vi1 * vw;
114
115 *o0++ = vacc0;
116 *o1++ = vacc1;
117 }
118 i0 = (const float*) ((uintptr_t) i0 + input_increment);
119 o0 = (float*) ((uintptr_t) o0 + output_increment);
120 i1 = (const float*) ((uintptr_t) i1 + input_increment);
121 o1 = (float*) ((uintptr_t) o1 + output_increment);
122 if XNN_UNPREDICTABLE(rows < 4) {
123 i1 = i0;
124 o1 = o0;
125 }
126 rows = doz(rows, 2);
127 } while (rows != 0);
128 }
129