• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/f32-raddstoreexpminusmax/scalar-lut64-p2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xnnpack/common.h>
13 #include <xnnpack/raddstoreexpminusmax.h>
14 
15 #include <fp16/bitcasts.h>
16 
17 
18 // Note redefine as uint32[] to avoid redundant bitcasts.
19 extern XNN_INTERNAL const uint32_t xnn_table_exp2_k_over_64[64];
20 
xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2(size_t elements,const float * input,float * output,float * sum,float vi_max)21 void xnn_f32_raddstoreexpminusmax_ukernel__scalar_lut64_p2_x2_acc2(
22     size_t elements,
23     const float* input,
24     float* output,
25     float* sum,
26     float vi_max)
27 {
28   assert(elements % sizeof(float) == 0);
29 
30   const float vmagic_bias = 0x1.800000p23f;
31   // The smallest x for which expf(x) is normalized.
32   const float vdenorm_cutoff = -0x1.5D589Ep6f;
33   const float vlog2e_x64  = 0x1.715476p6f;
34   // Last 13 bits are zeroes
35   const float vminus_ln2_o64_hi = -0x1.630000p-7f;
36   const float vminus_ln2_o64_lo =  0x1.BD0106p-19f;
37 
38   const float vc2 = 0x1.FFFF0Ap-2f;
39 
40   const uint32_t vindex_mask = UINT32_C(0x3F);
41 
42   float vacc0 = 0.0f;
43   float vacc1 = 0.0f;
44   for (; elements >= 2 * sizeof(float); elements -= 2 * sizeof(float)) {
45     // Load 2 inputs at a time.
46     const float vi0 = input[0];
47     const float vi1 = input[1];
48     input += 2;
49 
50     // Subtract maximum input x := i - i_max. This implies x <= 0.
51     const float vx0 = vi0 - vi_max;
52     const float vx1 = vi1 - vi_max;
53 
54     // Compute reduced argument n := round(x * 64 / log(2)).
55     // We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
56     // the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
57     // The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
58     // |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
59     // result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
60     // algorithm.
61     float vn0 = vx0 * vlog2e_x64 + vmagic_bias;
62     float vn1 = vx1 * vlog2e_x64 + vmagic_bias;
63 
64     // Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
65     // i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
66     // e := int(n / 64). We create s in two steps:
67     // 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
68     //    fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
69     // 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
70     //    number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
71     //    and thus the adjusted exponent is not lower than -126.
72     //
73     // Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
74     const uint32_t ve0 = (fp32_to_bits(vn0) & UINT32_C(0xFFFFFFC0)) << 17;
75     const uint32_t ve1 = (fp32_to_bits(vn1) & UINT32_C(0xFFFFFFC0)) << 17;
76 
77     // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
78     const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask;
79     const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask;
80     // Adjust exponent of the value l fetched from the table to get the final s value.
81     const float vs0 = fp32_from_bits(xnn_table_exp2_k_over_64[vidx0] + ve0);
82     const float vs1 = fp32_from_bits(xnn_table_exp2_k_over_64[vidx1] + ve1);
83 
84     // Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
85     vn0 -= vmagic_bias;
86     vn1 -= vmagic_bias;
87 
88     // Compute reduced argument t := x - n * log(2) / 64.
89     // Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
90     float vt0 = vn0 * vminus_ln2_o64_hi + vx0;
91     float vt1 = vn1 * vminus_ln2_o64_hi + vx1;
92 
93     vt0 = vn0 * vminus_ln2_o64_lo + vt0;
94     vt1 = vn1 * vminus_ln2_o64_lo + vt1;
95 
96     // Compute degree-2 polynomial approxiatmion for exp(t) on [-log(2)/128, log(2)/128].
97     float vp0 = vt0 * vc2;
98     float vp1 = vt1 * vc2;
99 
100     vp0 = vp0 * vt0 + vt0;
101     vp1 = vp1 * vt1 + vt1;
102 
103     // Reconstruct the final f value:
104     //   f = s * (1 + t * (1 + t * c2))
105     //     = s * (1 + t + t * (t * c2))
106     //     = s + s * (t + t * (t * c2))
107     //     = s + s * p
108     float vf0 = vp0 * vs0 + vs0;
109     float vf1 = vp1 * vs1 + vs1;
110 
111     // For inputs below denormal cutoff, replace output with +0.0f.
112     // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
113     if XNN_UNPREDICTABLE(vx0 < vdenorm_cutoff) {
114       vf0 = 0.0f;
115     }
116     if XNN_UNPREDICTABLE(vx1 < vdenorm_cutoff) {
117       vf1 = 0.0f;
118     }
119 
120     // Store 2 outputs at a time.
121     output[0] = vf0;
122     output[1] = vf1;
123     output += 2;
124 
125     // Accumulate computed exponents.
126     vacc0 += vf0;
127     vacc1 += vf1;
128   }
129   // Add up all accumulators to vacc0
130   vacc0 += vacc1;
131 
132   float vacc = vacc0;
133   for (; elements >= sizeof(float); elements -= sizeof(float)) {
134     // Load 1 input at a time.
135     const float vi = *input++;
136 
137     // Subtract maximum input x := i - i_max. This implies x <= 0.
138     const float vx = vi - vi_max;
139 
140     // Compute reduced argument n := round(x * 64 / log(2)).
141     // We do it by adding a large number (magic bias), which cause rounding of the result to an integer, then subtracing
142     // the large number back. The first addition is combined with multiplication by log2e into a single FMA instruction.
143     // The trick with adding large number is valid only within certain bounds (|x * 64 / log(2)| <= 2**22, i.e.
144     // |x| <= 0x1.62E43p+15 = 45426.09375), but that is acceptable, because inputs outside of [-87.336540, 0.0]
145     // result in denormalized or underflown expf(x). We fixup the result for such inputs at the very end of the
146     // algorithm.
147     float vn = vx * vlog2e_x64 + vmagic_bias;
148 
149     // Create a floating-point number s (scale) such that s := 2**(n / 64) for such inputs that expf(x) is normalized,
150     // i.e. -87.33642 <= x <= 0.0. As n has 6 fractional bits, we split s == 2**(n / 64) = 2**e * 2**(n / 64 - e), where
151     // e := int(n / 64). We create s in two steps:
152     // 1. Fetch 2**(n / 64 - e) = 2**(n % 64) from the table using the 6 low bits of n, as integer. Note that the
153     //    fetched values are in the [1.0, 2.0) range, i.e. their floating-point exponent is 0.
154     // 2. Adjust fecthed value by addition of e to its floating-point exponent. The result is always a normalized
155     //    number, because for -87.33642 <= x <= 0.0 (inputs for which expf(x) is normalized) we have -126 <= e <= 0,
156     //    and thus the adjusted exponent is not lower than -126.
157     //
158     // Extract e from bits 6:14 of n and shift it into bits 23:31 (position of floating-point exponent).
159     const uint32_t ve = (fp32_to_bits(vn) & UINT32_C(0xFFFFFFC0)) << 17;
160 
161     // Use bits 0:6 bits of n, as integer, as an index for table lookup of l := 2**(n % 64).
162     const uint32_t vidx = fp32_to_bits(vn) & vindex_mask;
163     // Adjust exponent of the value l fetched from the table to get the final s value.
164     const float vs = fp32_from_bits(xnn_table_exp2_k_over_64[vidx] + ve);
165 
166     // Subtract the large number back to get final n := round(x * 64 / log(2)) as a floating-point number.
167     vn -= vmagic_bias;
168 
169     // Compute reduced argument t := x - n * log(2) / 64.
170     // Use Cody-Waite range reduction method (note the two constants representing log(2) / 64) to improve accuracy.
171     float vt = vn * vminus_ln2_o64_hi + vx;
172     vt = vn * vminus_ln2_o64_lo + vt;
173 
174     // Compute degree-2 polynomial approxiatmion for exp(t) on [-log(2)/128, log(2)/128].
175     float vp = vt * vc2;
176     vp = vp * vt + vt;
177 
178     // Reconstruct the final f value:
179     //   f = s * (1 + t * (1 + t * c2))
180     //     = s * (1 + t + t * (t * c2))
181     //     = s + s * (t + t * (t * c2))
182     //     = s + s * p
183     float vf = vp * vs + vs;
184 
185     // For inputs below denormal cutoff, replace output with +0.0f.
186     // Note that for NaN inputs, comparison result is false, and outputs are left unchanged.
187     if XNN_UNPREDICTABLE(vx < vdenorm_cutoff) {
188       vf = 0.0f;
189     }
190 
191     // Store 1 output at a time.
192     *output++ = vf;
193 
194     // Accumulate computed exponents.
195     vacc += vf;
196   }
197   *sum = vacc;
198 }
199