• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-raddstoreexpminusmax/neon-p5.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <arm_neon.h>
13 
14 #include <xnnpack/common.h>
15 #include <xnnpack/raddstoreexpminusmax.h>
16 
17 
xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x4(size_t elements,const float * input,const float * max,float * output,float * sum,const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_f32_raddstoreexpminusmax_ukernel__neon_rr2_p5_x4(
19     size_t elements,
20     const float* input,
21     const float* max,
22     float* output,
23     float* sum,
24     const union xnn_f32_expminus_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
25 {
26   assert(elements % sizeof(float) == 0);
27 
28   const float32x4_t vi_max = vld1q_dup_f32(max);
29   const float32x4_t vlog2e = vld1q_dup_f32(&params->neon_rr2_p5.log2e);
30   const float32x4_t vmagic_bias = vld1q_dup_f32(&params->neon_rr2_p5.magic_bias);
31   const float32x4_t vminus_ln2_hi = vld1q_dup_f32(&params->neon_rr2_p5.minus_ln2_hi);
32   const float32x4_t vminus_ln2_lo = vld1q_dup_f32(&params->neon_rr2_p5.minus_ln2_lo);
33   const float32x4_t vc5 = vld1q_dup_f32(&params->neon_rr2_p5.c5);
34   const float32x4_t vc4 = vld1q_dup_f32(&params->neon_rr2_p5.c4);
35   const float32x4_t vc3 = vld1q_dup_f32(&params->neon_rr2_p5.c3);
36   const float32x4_t vc2 = vld1q_dup_f32(&params->neon_rr2_p5.c2);
37   const float32x4_t vc1 = vld1q_dup_f32(&params->neon_rr2_p5.c1);
38   const float32x4_t vdenorm_cutoff = vld1q_dup_f32(&params->neon_rr2_p5.denorm_cutoff);
39 
40   float32x4_t vacc = vmovq_n_f32(0.0f);
41   for (; elements >= 4 * sizeof(float); elements -= 4 * sizeof(float)) {
42     const float32x4_t vi = vld1q_f32(input); input += 4;
43 
44     const float32x4_t vx = vsubq_f32(vi, vi_max);
45 
46     float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
47 
48     const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
49 
50     vn = vsubq_f32(vn, vmagic_bias);
51 
52     float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
53     vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
54 
55     float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
56     vp = vmlaq_f32(vc3, vp, vt);
57     vp = vmlaq_f32(vc2, vp, vt);
58     vp = vmlaq_f32(vc1, vp, vt);
59 
60     vt = vmulq_f32(vt, vs);
61     float32x4_t vf = vmlaq_f32(vs, vp, vt);
62 
63     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
64 
65     vst1q_f32(output, vf); output += 4;
66 
67     vacc = vaddq_f32(vacc, vf);
68   }
69 #if XNN_ARCH_ARM64
70   float vacc_lo = vaddvq_f32(vacc);
71 #else
72   float32x2_t vacc_lo = vadd_f32(vget_high_f32(vacc), vget_low_f32(vacc));
73 #endif
74   if (elements != 0) {
75     assert(elements >= 1 * sizeof(float));
76     assert(elements <= 3 * sizeof(float));
77     const float32x4_t vi = vld1q_f32(input); input += 4;
78 
79     const float32x4_t vx = vsubq_f32(vi, vi_max);
80 
81     float32x4_t vn = vmlaq_f32(vmagic_bias, vx, vlog2e);
82 
83     const float32x4_t vs = vreinterpretq_f32_s32(vshlq_n_s32(vreinterpretq_s32_f32(vn), 23));
84 
85     vn = vsubq_f32(vn, vmagic_bias);
86 
87     float32x4_t vt = vmlaq_f32(vx, vn, vminus_ln2_hi);
88     vt = vmlaq_f32(vt, vn, vminus_ln2_lo);
89 
90     float32x4_t vp = vmlaq_f32(vc4, vc5, vt);
91     vp = vmlaq_f32(vc3, vp, vt);
92     vp = vmlaq_f32(vc2, vp, vt);
93     vp = vmlaq_f32(vc1, vp, vt);
94 
95     vt = vmulq_f32(vt, vs);
96     float32x4_t vf = vmlaq_f32(vs, vp, vt);
97 
98     vf = vreinterpretq_f32_u32(vbicq_u32(vreinterpretq_u32_f32(vf), vcltq_f32(vx, vdenorm_cutoff)));
99 
100     float32x2_t vf_lo = vget_low_f32(vf);
101     if (elements & (2 * sizeof(float))) {
102       vst1_f32(output, vf_lo); output += 2;
103 
104       #if XNN_ARCH_ARM64
105         vacc_lo += vaddv_f32(vf_lo);
106       #else
107         vacc_lo = vadd_f32(vacc_lo, vf_lo);
108       #endif
109 
110       vf_lo = vget_high_f32(vf);
111     }
112     if (elements & (1 * sizeof(float))) {
113       vst1_lane_f32(output, vf_lo, 0);
114 
115       #if XNN_ARCH_ARM64
116         vacc_lo += vget_lane_f32(vf_lo, 0);
117       #else
118         vacc_lo = vadd_f32(vacc_lo, vreinterpret_f32_u64(vshl_n_u64(vreinterpret_u64_f32(vf_lo), 32)));
119       #endif
120     }
121   }
122 #if XNN_ARCH_ARM64
123   *sum = vacc_lo;
124 #else
125   vst1_lane_f32(sum, vpadd_f32(vacc_lo, vacc_lo), 0);
126 #endif
127 }
128