1 // Auto-generated file. Do not edit!
2 // Template: src/f32-prelu/neon.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2019 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <arm_neon.h>
13
14 #include <xnnpack/math.h>
15 #include <xnnpack/prelu.h>
16
17
xnn_f32_prelu_ukernel__neon_2x8(size_t rows,size_t channels,const float * restrict input,size_t input_stride,const float * restrict weights,float * restrict output,size_t output_stride,const union xnn_f32_output_params params[restrict static1])18 void xnn_f32_prelu_ukernel__neon_2x8(
19 size_t rows,
20 size_t channels,
21 const float*restrict input,
22 size_t input_stride,
23 const float*restrict weights,
24 float*restrict output,
25 size_t output_stride,
26 const union xnn_f32_output_params params[restrict static 1])
27 {
28 assert(rows != 0);
29 assert(channels != 0);
30 assert(channels % sizeof(float) == 0);
31
32 const float* i0 = input;
33 float* o0 = output;
34 const float* i1 = (const float*) ((uintptr_t) i0 + input_stride);
35 float* o1 = (float*) ((uintptr_t) o0 + output_stride);
36 if XNN_UNPREDICTABLE(rows < 2) {
37 i1 = i0;
38 o1 = o0;
39 }
40
41 const size_t input_increment = input_stride * 2 - channels;
42 const size_t output_increment = output_stride * 2 - channels;
43
44 const float32x4_t vmin = vld1q_dup_f32(¶ms->scalar.min);
45 const float32x4_t vmax = vld1q_dup_f32(¶ms->scalar.max);
46 do {
47 const float* w = weights;
48 size_t c = channels;
49 for (; c >= 8 * sizeof(float); c -= 8 * sizeof(float)) {
50 const float32x4_t vw0123 = vld1q_f32(w); w += 4;
51 const float32x4_t vw4567 = vld1q_f32(w); w += 4;
52
53 const float32x4_t vi0x0123 = vld1q_f32(i0); i0 += 4;
54 const float32x4_t vi0x4567 = vld1q_f32(i0); i0 += 4;
55 const float32x4_t vi1x0123 = vld1q_f32(i1); i1 += 4;
56 const float32x4_t vi1x4567 = vld1q_f32(i1); i1 += 4;
57
58 float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
59 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
60 float32x4_t vacc0x4567 = vmulq_f32(vi0x4567, vw4567);
61 const uint32x4_t vm0x4567 = vcltq_s32(vreinterpretq_s32_f32(vi0x4567), vmovq_n_s32(0));
62 float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
63 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
64 float32x4_t vacc1x4567 = vmulq_f32(vi1x4567, vw4567);
65 const uint32x4_t vm1x4567 = vcltq_s32(vreinterpretq_s32_f32(vi1x4567), vmovq_n_s32(0));
66
67 vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
68 vacc0x4567 = vbslq_f32(vm0x4567, vacc0x4567, vi0x4567);
69 vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
70 vacc1x4567 = vbslq_f32(vm1x4567, vacc1x4567, vi1x4567);
71
72 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
73 vacc0x4567 = vmaxq_f32(vacc0x4567, vmin);
74 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
75 vacc1x4567 = vmaxq_f32(vacc1x4567, vmin);
76
77 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
78 vacc0x4567 = vminq_f32(vacc0x4567, vmax);
79 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
80 vacc1x4567 = vminq_f32(vacc1x4567, vmax);
81
82 vst1q_f32(o0, vacc0x0123); o0 += 4;
83 vst1q_f32(o0, vacc0x4567); o0 += 4;
84 vst1q_f32(o1, vacc1x0123); o1 += 4;
85 vst1q_f32(o1, vacc1x4567); o1 += 4;
86 }
87 for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) {
88 const float32x4_t vw0123 = vld1q_f32(w); w += 4;
89
90 const float32x4_t vi0x0123 = vld1q_f32(i0);
91 i0 += 4;
92 const float32x4_t vi1x0123 = vld1q_f32(i1);
93 i1 += 4;
94
95 float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
96 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
97 float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
98 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
99
100 vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
101 vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
102
103 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
104 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
105
106 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
107 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
108
109 vst1q_f32(o0, vacc0x0123); o0 += 4;
110 vst1q_f32(o1, vacc1x0123); o1 += 4;
111 }
112 if XNN_UNLIKELY(c != 0) {
113 const float32x4_t vw0123 = vld1q_f32(w); w += 4;
114
115 const float32x4_t vi0x0123 = vld1q_f32(i0);
116 i0 = (const float*) ((uintptr_t) i0 + c);
117 const float32x4_t vi1x0123 = vld1q_f32(i1);
118 i1 = (const float*) ((uintptr_t) i1 + c);
119
120 float32x4_t vacc0x0123 = vmulq_f32(vi0x0123, vw0123);
121 const uint32x4_t vm0x0123 = vcltq_s32(vreinterpretq_s32_f32(vi0x0123), vmovq_n_s32(0));
122 float32x4_t vacc1x0123 = vmulq_f32(vi1x0123, vw0123);
123 const uint32x4_t vm1x0123 = vcltq_s32(vreinterpretq_s32_f32(vi1x0123), vmovq_n_s32(0));
124
125 vacc0x0123 = vbslq_f32(vm0x0123, vacc0x0123, vi0x0123);
126 vacc1x0123 = vbslq_f32(vm1x0123, vacc1x0123, vi1x0123);
127
128 vacc0x0123 = vmaxq_f32(vacc0x0123, vmin);
129 vacc1x0123 = vmaxq_f32(vacc1x0123, vmin);
130
131 vacc0x0123 = vminq_f32(vacc0x0123, vmax);
132 vacc1x0123 = vminq_f32(vacc1x0123, vmax);
133
134 float32x2_t vacc0x01 = vget_low_f32(vacc0x0123);
135 float32x2_t vacc1x01 = vget_low_f32(vacc1x0123);
136 if (c & (2 * sizeof(float))) {
137 vst1_f32(o0, vacc0x01); o0 += 2;
138 vst1_f32(o1, vacc1x01); o1 += 2;
139
140 vacc0x01 = vget_high_f32(vacc0x0123);
141 vacc1x01 = vget_high_f32(vacc1x0123);
142 }
143 if (c & (1 * sizeof(float))) {
144 vst1_lane_f32(o0, vacc0x01, 0); o0 += 1;
145 vst1_lane_f32(o1, vacc1x01, 0); o1 += 1;
146 }
147 }
148 i0 = (const float*) ((uintptr_t) i0 + input_increment);
149 o0 = (float*) ((uintptr_t) o0 + output_increment);
150 i1 = (const float*) ((uintptr_t) i1 + input_increment);
151 o1 = (float*) ((uintptr_t) o1 + output_increment);
152 if XNN_UNPREDICTABLE(rows < 4) {
153 i1 = i0;
154 o1 = o0;
155 }
156 rows = doz(rows, 2);
157 } while (rows != 0);
158 }
159