1 // Auto-generated file. Do not edit! 2 // Template: src/f32-velu/scalar-rr2-p6.c.in 3 // Generator: tools/xngen 4 // 5 // Copyright 2020 Google LLC 6 // 7 // This source code is licensed under the BSD-style license found in the 8 // LICENSE file in the root directory of this source tree. 9 10 #include <assert.h> 11 #include <math.h> 12 13 #include <xnnpack/common.h> 14 #include <xnnpack/vunary.h> 15 16 #include <fp16/bitcasts.h> 17 18 xnn_f32_velu_ukernel__scalar_rr2_p6_x5(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])19void xnn_f32_velu_ukernel__scalar_rr2_p6_x5( 20 size_t n, 21 const float* x, 22 float* y, 23 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) 24 { 25 assert(n % sizeof(float) == 0); 26 27 const float vprescale = params->scalar.prescale; 28 const float valpha = params->scalar.alpha; 29 const float vbeta = params->scalar.beta; 30 31 const float vmagic_bias = 0x1.8000FEp23f; 32 const float vlog2e = 0x1.715476p+0f; 33 const float vsat_cutoff = -0x1.154246p+4f; 34 const float vminus_ln2_hi = -0x1.62E440p-1f; 35 const float vminus_ln2_lo = 0x1.0105C6p-21f; 36 const float vc6 = 0x1.6b7338p-10f; 37 const float vc5 = 0x1.12278Ep-7f; 38 const float vc4 = 0x1.555716p-5f; 39 const float vc3 = 0x1.5554B0p-3f; 40 const float vc2 = 0x1.FFFFFEp-2f; 41 const float vone = 1.0f; 42 43 for (; n >= 5 * sizeof(float); n -= 5 * sizeof(float)) { 44 float vx0 = x[0]; 45 float vx1 = x[1]; 46 float vx2 = x[2]; 47 float vx3 = x[3]; 48 float vx4 = x[4]; 49 x += 5; 50 51 const float vz0 = vx0 * vprescale; 52 const float vz1 = vx1 * vprescale; 53 const float vz2 = vx2 * vprescale; 54 const float vz3 = vx3 * vprescale; 55 const float vz4 = vx4 * vprescale; 56 57 float vn0 = vz0 * vlog2e + vmagic_bias; 58 float vn1 = vz1 * vlog2e + vmagic_bias; 59 float vn2 = vz2 * vlog2e + vmagic_bias; 60 float vn3 = vz3 * vlog2e + vmagic_bias; 61 float vn4 = vz4 * vlog2e + vmagic_bias; 62 63 float vs0 = fp32_from_bits(fp32_to_bits(vn0) << 23); 64 vn0 -= vmagic_bias; 65 float vs1 = fp32_from_bits(fp32_to_bits(vn1) << 23); 66 vn1 -= vmagic_bias; 67 float vs2 = fp32_from_bits(fp32_to_bits(vn2) << 23); 68 vn2 -= vmagic_bias; 69 float vs3 = fp32_from_bits(fp32_to_bits(vn3) << 23); 70 vn3 -= vmagic_bias; 71 float vs4 = fp32_from_bits(fp32_to_bits(vn4) << 23); 72 vn4 -= vmagic_bias; 73 74 float vt0 = vn0 * vminus_ln2_hi + vz0; 75 float vt1 = vn1 * vminus_ln2_hi + vz1; 76 float vt2 = vn2 * vminus_ln2_hi + vz2; 77 float vt3 = vn3 * vminus_ln2_hi + vz3; 78 float vt4 = vn4 * vminus_ln2_hi + vz4; 79 80 vt0 = vn0 * vminus_ln2_lo + vt0; 81 vt1 = vn1 * vminus_ln2_lo + vt1; 82 vt2 = vn2 * vminus_ln2_lo + vt2; 83 vt3 = vn3 * vminus_ln2_lo + vt3; 84 vt4 = vn4 * vminus_ln2_lo + vt4; 85 86 if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) { 87 vs0 = 0.0f; 88 vt0 = 0.0f; 89 } 90 if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) { 91 vs1 = 0.0f; 92 vt1 = 0.0f; 93 } 94 if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) { 95 vs2 = 0.0f; 96 vt2 = 0.0f; 97 } 98 if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) { 99 vs3 = 0.0f; 100 vt3 = 0.0f; 101 } 102 if XNN_UNPREDICTABLE(vz4 <= vsat_cutoff) { 103 vs4 = 0.0f; 104 vt4 = 0.0f; 105 } 106 107 float vp0 = vc6 * vt0 + vc5; 108 float vp1 = vc6 * vt1 + vc5; 109 float vp2 = vc6 * vt2 + vc5; 110 float vp3 = vc6 * vt3 + vc5; 111 float vp4 = vc6 * vt4 + vc5; 112 113 vp0 = vp0 * vt0 + vc4; 114 vp1 = vp1 * vt1 + vc4; 115 vp2 = vp2 * vt2 + vc4; 116 vp3 = vp3 * vt3 + vc4; 117 vp4 = vp4 * vt4 + vc4; 118 119 vp0 = vp0 * vt0 + vc3; 120 vp1 = vp1 * vt1 + vc3; 121 vp2 = vp2 * vt2 + vc3; 122 vp3 = vp3 * vt3 + vc3; 123 vp4 = vp4 * vt4 + vc3; 124 125 vp0 = vp0 * vt0 + vc2; 126 vp1 = vp1 * vt1 + vc2; 127 vp2 = vp2 * vt2 + vc2; 128 vp3 = vp3 * vt3 + vc2; 129 vp4 = vp4 * vt4 + vc2; 130 131 vp0 *= vt0; 132 vp1 *= vt1; 133 vp2 *= vt2; 134 vp3 *= vt3; 135 vp4 *= vt4; 136 137 vt0 *= vs0; 138 vs0 -= vone; 139 vt1 *= vs1; 140 vs1 -= vone; 141 vt2 *= vs2; 142 vs2 -= vone; 143 vt3 *= vs3; 144 vs3 -= vone; 145 vt4 *= vs4; 146 vs4 -= vone; 147 148 vp0 = vp0 * vt0 + vt0; 149 vp1 = vp1 * vt1 + vt1; 150 vp2 = vp2 * vt2 + vt2; 151 vp3 = vp3 * vt3 + vt3; 152 vp4 = vp4 * vt4 + vt4; 153 154 const float ve0 = (vp0 + vs0) * valpha; 155 float vy0 = vx0 * vbeta; 156 const float ve1 = (vp1 + vs1) * valpha; 157 float vy1 = vx1 * vbeta; 158 const float ve2 = (vp2 + vs2) * valpha; 159 float vy2 = vx2 * vbeta; 160 const float ve3 = (vp3 + vs3) * valpha; 161 float vy3 = vx3 * vbeta; 162 const float ve4 = (vp4 + vs4) * valpha; 163 float vy4 = vx4 * vbeta; 164 165 if XNN_UNPREDICTABLE(vx0 < 0.0f) { 166 vy0 = ve0; 167 } 168 if XNN_UNPREDICTABLE(vx1 < 0.0f) { 169 vy1 = ve1; 170 } 171 if XNN_UNPREDICTABLE(vx2 < 0.0f) { 172 vy2 = ve2; 173 } 174 if XNN_UNPREDICTABLE(vx3 < 0.0f) { 175 vy3 = ve3; 176 } 177 if XNN_UNPREDICTABLE(vx4 < 0.0f) { 178 vy4 = ve4; 179 } 180 181 y[0] = vy0; 182 y[1] = vy1; 183 y[2] = vy2; 184 y[3] = vy3; 185 y[4] = vy4; 186 y += 5; 187 } 188 if XNN_UNLIKELY(n != 0) { 189 do { 190 float vx = *x++; 191 192 const float vz = vx * vprescale; 193 194 float vn = vz * vlog2e + vmagic_bias; 195 float vs = fp32_from_bits(fp32_to_bits(vn) << 23); 196 vn -= vmagic_bias; 197 198 float vt = vn * vminus_ln2_hi + vz; 199 vt = vn * vminus_ln2_lo + vt; 200 201 if XNN_UNPREDICTABLE(vz <= vsat_cutoff) { 202 vs = 0.0f; 203 vt = 0.0f; 204 } 205 206 float vp = vc6 * vt + vc5; 207 vp = vp * vt + vc4; 208 vp = vp * vt + vc3; 209 vp = vp * vt + vc2; 210 vp *= vt; 211 212 vt *= vs; 213 vs -= vone; 214 vp = vp * vt + vt; 215 const float ve = (vp + vs) * valpha; 216 217 float vy = vx * vbeta; 218 if XNN_UNPREDICTABLE(vx < 0.0f) { 219 vy = ve; 220 } 221 222 *y++ = vy; 223 224 n -= sizeof(float); 225 } while (n != 0); 226 } 227 } 228