1 // Auto-generated file. Do not edit! 2 // Template: src/f32-velu/scalar-rr2-lut16-p3.c.in 3 // Generator: tools/xngen 4 // 5 // Copyright 2020 Google LLC 6 // 7 // This source code is licensed under the BSD-style license found in the 8 // LICENSE file in the root directory of this source tree. 9 10 #include <assert.h> 11 #include <math.h> 12 13 #include <xnnpack/common.h> 14 #include <xnnpack/vunary.h> 15 16 #include <fp16/bitcasts.h> 17 18 19 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16]; 20 xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])21void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x5( 22 size_t n, 23 const float* x, 24 float* y, 25 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) 26 { 27 assert(n % sizeof(float) == 0); 28 29 const float vprescale = params->scalar_rr2_lut16_p3.prescale; 30 const float valpha = params->scalar_rr2_lut16_p3.alpha; 31 const float vbeta = params->scalar_rr2_lut16_p3.beta; 32 const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias; 33 const float vlog2e = params->scalar_rr2_lut16_p3.log2e; 34 const uint32_t vindex_mask = UINT32_C(0xF); 35 const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff; 36 const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi; 37 const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo; 38 const float vc3 = params->scalar_rr2_lut16_p3.c3; 39 const float vc2 = params->scalar_rr2_lut16_p3.c2; 40 const float vone = params->scalar_rr2_lut16_p3.one; 41 42 for (; n >= 5 * sizeof(float); n -= 5 * sizeof(float)) { 43 float vx0 = x[0]; 44 float vx1 = x[1]; 45 float vx2 = x[2]; 46 float vx3 = x[3]; 47 float vx4 = x[4]; 48 x += 5; 49 50 const float vz0 = vx0 * vprescale; 51 const float vz1 = vx1 * vprescale; 52 const float vz2 = vx2 * vprescale; 53 const float vz3 = vx3 * vprescale; 54 const float vz4 = vx4 * vprescale; 55 56 float vn0 = vz0 * vlog2e + vmagic_bias; 57 float vn1 = vz1 * vlog2e + vmagic_bias; 58 float vn2 = vz2 * vlog2e + vmagic_bias; 59 float vn3 = vz3 * vlog2e + vmagic_bias; 60 float vn4 = vz4 * vlog2e + vmagic_bias; 61 62 const uint32_t ven0 = fp32_to_bits(vn0) << 19; 63 const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask; 64 vn0 -= vmagic_bias; 65 const uint32_t ven1 = fp32_to_bits(vn1) << 19; 66 const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask; 67 vn1 -= vmagic_bias; 68 const uint32_t ven2 = fp32_to_bits(vn2) << 19; 69 const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask; 70 vn2 -= vmagic_bias; 71 const uint32_t ven3 = fp32_to_bits(vn3) << 19; 72 const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask; 73 vn3 -= vmagic_bias; 74 const uint32_t ven4 = fp32_to_bits(vn4) << 19; 75 const uint32_t vidx4 = fp32_to_bits(vn4) & vindex_mask; 76 vn4 -= vmagic_bias; 77 78 float vt0 = vn0 * vminus_ln2_hi + vz0; 79 float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0); 80 float vt1 = vn1 * vminus_ln2_hi + vz1; 81 float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1); 82 float vt2 = vn2 * vminus_ln2_hi + vz2; 83 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); 84 float vt3 = vn3 * vminus_ln2_hi + vz3; 85 float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3); 86 float vt4 = vn4 * vminus_ln2_hi + vz4; 87 float vs4 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx4] + ven4); 88 89 vt0 = vn0 * vminus_ln2_lo + vt0; 90 if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) { 91 vs0 = 0.0f; 92 vt0 = 0.0f; 93 } 94 vt1 = vn1 * vminus_ln2_lo + vt1; 95 if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) { 96 vs1 = 0.0f; 97 vt1 = 0.0f; 98 } 99 vt2 = vn2 * vminus_ln2_lo + vt2; 100 if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) { 101 vs2 = 0.0f; 102 vt2 = 0.0f; 103 } 104 vt3 = vn3 * vminus_ln2_lo + vt3; 105 if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) { 106 vs3 = 0.0f; 107 vt3 = 0.0f; 108 } 109 vt4 = vn4 * vminus_ln2_lo + vt4; 110 if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) { 111 vs4 = 0.0f; 112 vt4 = 0.0f; 113 } 114 115 float vp0 = vc3 * vt0 + vc2; 116 float vp1 = vc3 * vt1 + vc2; 117 float vp2 = vc3 * vt2 + vc2; 118 float vp3 = vc3 * vt3 + vc2; 119 float vp4 = vc3 * vt4 + vc2; 120 121 vp0 *= vt0; 122 vp1 *= vt1; 123 vp2 *= vt2; 124 vp3 *= vt3; 125 vp4 *= vt4; 126 127 vt0 *= vs0; 128 vs0 -= vone; 129 vt1 *= vs1; 130 vs1 -= vone; 131 vt2 *= vs2; 132 vs2 -= vone; 133 vt3 *= vs3; 134 vs3 -= vone; 135 vt4 *= vs4; 136 vs4 -= vone; 137 138 vp0 = vp0 * vt0 + vt0; 139 vp1 = vp1 * vt1 + vt1; 140 vp2 = vp2 * vt2 + vt2; 141 vp3 = vp3 * vt3 + vt3; 142 vp4 = vp4 * vt4 + vt4; 143 144 const float ve0 = (vp0 + vs0) * valpha; 145 float vy0 = vx0 * vbeta; 146 const float ve1 = (vp1 + vs1) * valpha; 147 float vy1 = vx1 * vbeta; 148 const float ve2 = (vp2 + vs2) * valpha; 149 float vy2 = vx2 * vbeta; 150 const float ve3 = (vp3 + vs3) * valpha; 151 float vy3 = vx3 * vbeta; 152 const float ve4 = (vp4 + vs4) * valpha; 153 float vy4 = vx4 * vbeta; 154 155 if XNN_UNPREDICTABLE(vx0 < 0.0f) { 156 vy0 = ve0; 157 } 158 if XNN_UNPREDICTABLE(vx1 < 0.0f) { 159 vy1 = ve1; 160 } 161 if XNN_UNPREDICTABLE(vx2 < 0.0f) { 162 vy2 = ve2; 163 } 164 if XNN_UNPREDICTABLE(vx3 < 0.0f) { 165 vy3 = ve3; 166 } 167 if XNN_UNPREDICTABLE(vx4 < 0.0f) { 168 vy4 = ve4; 169 } 170 171 y[0] = vy0; 172 y[1] = vy1; 173 y[2] = vy2; 174 y[3] = vy3; 175 y[4] = vy4; 176 y += 5; 177 } 178 if XNN_UNLIKELY(n != 0) { 179 do { 180 float vx = *x++; 181 182 const float vz = vx * vprescale; 183 184 float vn = vz * vlog2e + vmagic_bias; 185 const uint32_t ven = fp32_to_bits(vn) << 19; 186 const uint32_t vidx = fp32_to_bits(vn) & vindex_mask; 187 vn -= vmagic_bias; 188 189 float vt = vn * vminus_ln2_hi + vz; 190 float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven); 191 192 vt = vn * vminus_ln2_lo + vt; 193 if XNN_UNPREDICTABLE(vz <= vsat_cutoff) { 194 vs = 0.0f; 195 vt = 0.0f; 196 } 197 198 float vp = vc3 * vt + vc2; 199 vp *= vt; 200 201 vt *= vs; 202 vs -= vone; 203 vp = vp * vt + vt; 204 const float ve = (vp + vs) * valpha; 205 206 float vy = vx * vbeta; 207 if XNN_UNPREDICTABLE(vx < 0.0f) { 208 vy = ve; 209 } 210 211 *y++ = vy; 212 213 n -= sizeof(float); 214 } while (n != 0); 215 } 216 } 217