• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f16-raddstoreexpminusmax/neonfp16arith-rr2-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2022 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16 
17 
xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x48_acc2(size_t batch,const void * input,const void * max,void * output,void * sum,const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f16_raddstoreexpminusmax_ukernel__neonfp16arith_rr2_p2_x48_acc2(
19     size_t batch,
20     const void* input,
21     const void* max,
22     void* output,
23     void* sum,
24     const union xnn_f16_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(batch % sizeof(__fp16) == 0);
27 
28   const float16x8_t vi_max = vld1q_dup_f16(max);
29   const float16x8_t vlog2e = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.log2e));
30   const float16x8_t vmagic_bias = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.magic_bias));
31   const float16x8_t vminus_ln2_hi = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.minus_ln2_hi));
32   const float16x8_t vminus_ln2_lo = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.minus_ln2_lo));
33   const float16x8_t vc2 = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.c2));
34   const float16x8_t vc1 = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.c1));
35   const float16x8_t vdenorm_cutoff = vreinterpretq_f16_u16(vld1q_dup_u16(&params->neonfp16arith_rr2_p2.denorm_cutoff));
36 
37   const __fp16* i = (const __fp16*) input;
38   __fp16* o = (__fp16*) output;
39   float16x8_t vacc0 = vmovq_n_f16(0.0f);
40   float16x8_t vacc1 = vmovq_n_f16(0.0f);
41   for (; batch >= 48 * sizeof(__fp16); batch -= 48 * sizeof(__fp16)) {
42     const float16x8_t vi0 = vld1q_f16(i); i += 8;
43     const float16x8_t vi1 = vld1q_f16(i); i += 8;
44     const float16x8_t vi2 = vld1q_f16(i); i += 8;
45     const float16x8_t vi3 = vld1q_f16(i); i += 8;
46     const float16x8_t vi4 = vld1q_f16(i); i += 8;
47     const float16x8_t vi5 = vld1q_f16(i); i += 8;
48 
49     const float16x8_t vx0 = vsubq_f16(vi0, vi_max);
50     const float16x8_t vx1 = vsubq_f16(vi1, vi_max);
51     const float16x8_t vx2 = vsubq_f16(vi2, vi_max);
52     const float16x8_t vx3 = vsubq_f16(vi3, vi_max);
53     const float16x8_t vx4 = vsubq_f16(vi4, vi_max);
54     const float16x8_t vx5 = vsubq_f16(vi5, vi_max);
55 
56     float16x8_t vn0 = vfmaq_f16(vmagic_bias, vx0, vlog2e);
57     float16x8_t vn1 = vfmaq_f16(vmagic_bias, vx1, vlog2e);
58     float16x8_t vn2 = vfmaq_f16(vmagic_bias, vx2, vlog2e);
59     float16x8_t vn3 = vfmaq_f16(vmagic_bias, vx3, vlog2e);
60     float16x8_t vn4 = vfmaq_f16(vmagic_bias, vx4, vlog2e);
61     float16x8_t vn5 = vfmaq_f16(vmagic_bias, vx5, vlog2e);
62 
63     const float16x8_t vs0 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn0), 10));
64     const float16x8_t vs1 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn1), 10));
65     const float16x8_t vs2 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn2), 10));
66     const float16x8_t vs3 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn3), 10));
67     const float16x8_t vs4 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn4), 10));
68     const float16x8_t vs5 = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn5), 10));
69 
70     vn0 = vsubq_f16(vn0, vmagic_bias);
71     vn1 = vsubq_f16(vn1, vmagic_bias);
72     vn2 = vsubq_f16(vn2, vmagic_bias);
73     vn3 = vsubq_f16(vn3, vmagic_bias);
74     vn4 = vsubq_f16(vn4, vmagic_bias);
75     vn5 = vsubq_f16(vn5, vmagic_bias);
76 
77     float16x8_t vt0 = vfmaq_f16(vx0, vn0, vminus_ln2_hi);
78     float16x8_t vt1 = vfmaq_f16(vx1, vn1, vminus_ln2_hi);
79     float16x8_t vt2 = vfmaq_f16(vx2, vn2, vminus_ln2_hi);
80     float16x8_t vt3 = vfmaq_f16(vx3, vn3, vminus_ln2_hi);
81     float16x8_t vt4 = vfmaq_f16(vx4, vn4, vminus_ln2_hi);
82     float16x8_t vt5 = vfmaq_f16(vx5, vn5, vminus_ln2_hi);
83 
84     vt0 = vfmaq_f16(vt0, vn0, vminus_ln2_lo);
85     vt1 = vfmaq_f16(vt1, vn1, vminus_ln2_lo);
86     vt2 = vfmaq_f16(vt2, vn2, vminus_ln2_lo);
87     vt3 = vfmaq_f16(vt3, vn3, vminus_ln2_lo);
88     vt4 = vfmaq_f16(vt4, vn4, vminus_ln2_lo);
89     vt5 = vfmaq_f16(vt5, vn5, vminus_ln2_lo);
90 
91     const float16x8_t vp0 = vfmaq_f16(vc1, vc2, vt0);
92     const float16x8_t vp1 = vfmaq_f16(vc1, vc2, vt1);
93     const float16x8_t vp2 = vfmaq_f16(vc1, vc2, vt2);
94     const float16x8_t vp3 = vfmaq_f16(vc1, vc2, vt3);
95     const float16x8_t vp4 = vfmaq_f16(vc1, vc2, vt4);
96     const float16x8_t vp5 = vfmaq_f16(vc1, vc2, vt5);
97 
98     vt0 = vmulq_f16(vt0, vs0);
99     vt1 = vmulq_f16(vt1, vs1);
100     vt2 = vmulq_f16(vt2, vs2);
101     vt3 = vmulq_f16(vt3, vs3);
102     vt4 = vmulq_f16(vt4, vs4);
103     vt5 = vmulq_f16(vt5, vs5);
104 
105     float16x8_t vf0 = vfmaq_f16(vs0, vp0, vt0);
106     const uint16x8_t vm0 = vcltq_f16(vx0, vdenorm_cutoff);
107     float16x8_t vf1 = vfmaq_f16(vs1, vp1, vt1);
108     const uint16x8_t vm1 = vcltq_f16(vx1, vdenorm_cutoff);
109     float16x8_t vf2 = vfmaq_f16(vs2, vp2, vt2);
110     const uint16x8_t vm2 = vcltq_f16(vx2, vdenorm_cutoff);
111     float16x8_t vf3 = vfmaq_f16(vs3, vp3, vt3);
112     const uint16x8_t vm3 = vcltq_f16(vx3, vdenorm_cutoff);
113     float16x8_t vf4 = vfmaq_f16(vs4, vp4, vt4);
114     const uint16x8_t vm4 = vcltq_f16(vx4, vdenorm_cutoff);
115     float16x8_t vf5 = vfmaq_f16(vs5, vp5, vt5);
116     const uint16x8_t vm5 = vcltq_f16(vx5, vdenorm_cutoff);
117 
118     vf0 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf0), vm0));
119     vf1 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf1), vm1));
120     vf2 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf2), vm2));
121     vf3 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf3), vm3));
122     vf4 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf4), vm4));
123     vf5 = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf5), vm5));
124 
125     vst1q_f16(o, vf0); o += 8;
126     vst1q_f16(o, vf1); o += 8;
127     vst1q_f16(o, vf2); o += 8;
128     vst1q_f16(o, vf3); o += 8;
129     vst1q_f16(o, vf4); o += 8;
130     vst1q_f16(o, vf5); o += 8;
131 
132     vacc0 = vaddq_f16(vacc0, vf0);
133     vacc1 = vaddq_f16(vacc1, vf1);
134     vacc0 = vaddq_f16(vacc0, vf2);
135     vacc1 = vaddq_f16(vacc1, vf3);
136     vacc0 = vaddq_f16(vacc0, vf4);
137     vacc1 = vaddq_f16(vacc1, vf5);
138   }
139   vacc0 = vaddq_f16(vacc0, vacc1);
140 
141   float16x8_t vacc = vacc0;
142   for (; batch >= 8 * sizeof(__fp16); batch -= 8 * sizeof(__fp16)) {
143     const float16x8_t vi = vld1q_f16(i); i += 8;
144 
145     const float16x8_t vx = vsubq_f16(vi, vi_max);
146 
147     float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
148     const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
149     vn = vsubq_f16(vn, vmagic_bias);
150 
151     float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
152     vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
153 
154     const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
155     vt = vmulq_f16(vt, vs);
156 
157     float16x8_t vf = vfmaq_f16(vs, vp, vt);
158     const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
159     vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
160 
161     vst1q_f16(o, vf); o += 8;
162 
163     vacc = vaddq_f16(vacc, vf);
164   }
165   float16x4_t vacc_lo = vadd_f16(vget_low_f16(vacc), vget_high_f16(vacc));
166   if (batch != 0) {
167     assert(batch >= 1 * sizeof(__fp16));
168     assert(batch <= 7 * sizeof(__fp16));
169     const float16x8_t vi = vld1q_f16(i);
170 
171     const float16x8_t vx = vsubq_f16(vi, vi_max);
172 
173     float16x8_t vn = vfmaq_f16(vmagic_bias, vx, vlog2e);
174     const float16x8_t vs = vreinterpretq_f16_s16(vshlq_n_s16(vreinterpretq_s16_f16(vn), 10));
175     vn = vsubq_f16(vn, vmagic_bias);
176 
177     float16x8_t vt = vfmaq_f16(vx, vn, vminus_ln2_hi);
178     vt = vfmaq_f16(vt, vn, vminus_ln2_lo);
179 
180     const float16x8_t vp = vfmaq_f16(vc1, vc2, vt);
181     vt = vmulq_f16(vt, vs);
182 
183     float16x8_t vf = vfmaq_f16(vs, vp, vt);
184     const uint16x8_t vm = vcltq_f16(vx, vdenorm_cutoff);
185     vf = vreinterpretq_f16_u16(vbicq_u16(vreinterpretq_u16_f16(vf), vm));
186 
187     float16x4_t vf_lo = vget_low_f16(vf);
188     if (batch & (4 * sizeof(__fp16))) {
189       vst1_f16(o, vf_lo); o += 4;
190       vacc_lo = vadd_f16(vacc_lo, vf_lo);
191       vf_lo = vget_high_f16(vf);
192     }
193     if (batch & (2 * sizeof(__fp16))) {
194       vst1_lane_u32((void*) o, vreinterpret_u32_f16(vf_lo), 0); o += 2;
195       vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 32)));
196       vf_lo = vext_f16(vf_lo, vf_lo, 2);
197     }
198     if (batch & (1 * sizeof(__fp16))) {
199       vst1_lane_f16(o, vf_lo, 0);
200       vacc_lo = vadd_f16(vacc_lo, vreinterpret_f16_u64(vshl_n_u64(vreinterpret_u64_f16(vf_lo), 48)));
201     }
202   }
203   vacc_lo = vpadd_f16(vacc_lo, vacc_lo);
204   *((__fp16*) sum) = vget_lane_f16(vacc_lo, 0) + vget_lane_f16(vacc_lo, 1);
205 }
206