1 // Auto-generated file. Do not edit! 2 // Template: src/f32-velu/scalar-rr2-lut16-p3.c.in 3 // Generator: tools/xngen 4 // 5 // Copyright 2020 Google LLC 6 // 7 // This source code is licensed under the BSD-style license found in the 8 // LICENSE file in the root directory of this source tree. 9 10 #include <assert.h> 11 #include <math.h> 12 13 #include <xnnpack/common.h> 14 #include <xnnpack/vunary.h> 15 16 #include <fp16/bitcasts.h> 17 18 19 extern XNN_INTERNAL const uint32_t xnn_table_exp2minus_k_over_16[16]; 20 xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])21void xnn_f32_velu_ukernel__scalar_rr2_lut16_p3_x4( 22 size_t n, 23 const float* x, 24 float* y, 25 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) 26 { 27 assert(n % sizeof(float) == 0); 28 29 const float vprescale = params->scalar_rr2_lut16_p3.prescale; 30 const float valpha = params->scalar_rr2_lut16_p3.alpha; 31 const float vbeta = params->scalar_rr2_lut16_p3.beta; 32 const float vmagic_bias = params->scalar_rr2_lut16_p3.magic_bias; 33 const float vlog2e = params->scalar_rr2_lut16_p3.log2e; 34 const uint32_t vindex_mask = UINT32_C(0xF); 35 const float vsat_cutoff = params->scalar_rr2_lut16_p3.sat_cutoff; 36 const float vminus_ln2_hi = params->scalar_rr2_lut16_p3.minus_ln2_hi; 37 const float vminus_ln2_lo = params->scalar_rr2_lut16_p3.minus_ln2_lo; 38 const float vc3 = params->scalar_rr2_lut16_p3.c3; 39 const float vc2 = params->scalar_rr2_lut16_p3.c2; 40 const float vone = params->scalar_rr2_lut16_p3.one; 41 42 for (; n >= 4 * sizeof(float); n -= 4 * sizeof(float)) { 43 float vx0 = x[0]; 44 float vx1 = x[1]; 45 float vx2 = x[2]; 46 float vx3 = x[3]; 47 x += 4; 48 49 const float vz0 = vx0 * vprescale; 50 const float vz1 = vx1 * vprescale; 51 const float vz2 = vx2 * vprescale; 52 const float vz3 = vx3 * vprescale; 53 54 float vn0 = vz0 * vlog2e + vmagic_bias; 55 float vn1 = vz1 * vlog2e + vmagic_bias; 56 float vn2 = vz2 * vlog2e + vmagic_bias; 57 float vn3 = vz3 * vlog2e + vmagic_bias; 58 59 const uint32_t ven0 = fp32_to_bits(vn0) << 19; 60 const uint32_t vidx0 = fp32_to_bits(vn0) & vindex_mask; 61 vn0 -= vmagic_bias; 62 const uint32_t ven1 = fp32_to_bits(vn1) << 19; 63 const uint32_t vidx1 = fp32_to_bits(vn1) & vindex_mask; 64 vn1 -= vmagic_bias; 65 const uint32_t ven2 = fp32_to_bits(vn2) << 19; 66 const uint32_t vidx2 = fp32_to_bits(vn2) & vindex_mask; 67 vn2 -= vmagic_bias; 68 const uint32_t ven3 = fp32_to_bits(vn3) << 19; 69 const uint32_t vidx3 = fp32_to_bits(vn3) & vindex_mask; 70 vn3 -= vmagic_bias; 71 72 float vt0 = vn0 * vminus_ln2_hi + vz0; 73 float vs0 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx0] + ven0); 74 float vt1 = vn1 * vminus_ln2_hi + vz1; 75 float vs1 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx1] + ven1); 76 float vt2 = vn2 * vminus_ln2_hi + vz2; 77 float vs2 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx2] + ven2); 78 float vt3 = vn3 * vminus_ln2_hi + vz3; 79 float vs3 = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx3] + ven3); 80 81 vt0 = vn0 * vminus_ln2_lo + vt0; 82 if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) { 83 vs0 = 0.0f; 84 vt0 = 0.0f; 85 } 86 vt1 = vn1 * vminus_ln2_lo + vt1; 87 if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) { 88 vs1 = 0.0f; 89 vt1 = 0.0f; 90 } 91 vt2 = vn2 * vminus_ln2_lo + vt2; 92 if XNN_UNPREDICTABLE(vz2 <= vsat_cutoff) { 93 vs2 = 0.0f; 94 vt2 = 0.0f; 95 } 96 vt3 = vn3 * vminus_ln2_lo + vt3; 97 if XNN_UNPREDICTABLE(vz3 <= vsat_cutoff) { 98 vs3 = 0.0f; 99 vt3 = 0.0f; 100 } 101 102 float vp0 = vc3 * vt0 + vc2; 103 float vp1 = vc3 * vt1 + vc2; 104 float vp2 = vc3 * vt2 + vc2; 105 float vp3 = vc3 * vt3 + vc2; 106 107 vp0 *= vt0; 108 vp1 *= vt1; 109 vp2 *= vt2; 110 vp3 *= vt3; 111 112 vt0 *= vs0; 113 vs0 -= vone; 114 vt1 *= vs1; 115 vs1 -= vone; 116 vt2 *= vs2; 117 vs2 -= vone; 118 vt3 *= vs3; 119 vs3 -= vone; 120 121 vp0 = vp0 * vt0 + vt0; 122 vp1 = vp1 * vt1 + vt1; 123 vp2 = vp2 * vt2 + vt2; 124 vp3 = vp3 * vt3 + vt3; 125 126 const float ve0 = (vp0 + vs0) * valpha; 127 float vy0 = vx0 * vbeta; 128 const float ve1 = (vp1 + vs1) * valpha; 129 float vy1 = vx1 * vbeta; 130 const float ve2 = (vp2 + vs2) * valpha; 131 float vy2 = vx2 * vbeta; 132 const float ve3 = (vp3 + vs3) * valpha; 133 float vy3 = vx3 * vbeta; 134 135 if XNN_UNPREDICTABLE(vx0 < 0.0f) { 136 vy0 = ve0; 137 } 138 if XNN_UNPREDICTABLE(vx1 < 0.0f) { 139 vy1 = ve1; 140 } 141 if XNN_UNPREDICTABLE(vx2 < 0.0f) { 142 vy2 = ve2; 143 } 144 if XNN_UNPREDICTABLE(vx3 < 0.0f) { 145 vy3 = ve3; 146 } 147 148 y[0] = vy0; 149 y[1] = vy1; 150 y[2] = vy2; 151 y[3] = vy3; 152 y += 4; 153 } 154 if XNN_UNLIKELY(n != 0) { 155 do { 156 float vx = *x++; 157 158 const float vz = vx * vprescale; 159 160 float vn = vz * vlog2e + vmagic_bias; 161 const uint32_t ven = fp32_to_bits(vn) << 19; 162 const uint32_t vidx = fp32_to_bits(vn) & vindex_mask; 163 vn -= vmagic_bias; 164 165 float vt = vn * vminus_ln2_hi + vz; 166 float vs = fp32_from_bits(xnn_table_exp2minus_k_over_16[vidx] + ven); 167 168 vt = vn * vminus_ln2_lo + vt; 169 if XNN_UNPREDICTABLE(vz <= vsat_cutoff) { 170 vs = 0.0f; 171 vt = 0.0f; 172 } 173 174 float vp = vc3 * vt + vc2; 175 vp *= vt; 176 177 vt *= vs; 178 vs -= vone; 179 vp = vp * vt + vt; 180 const float ve = (vp + vs) * valpha; 181 182 float vy = vx * vbeta; 183 if XNN_UNPREDICTABLE(vx < 0.0f) { 184 vy = ve; 185 } 186 187 *y++ = vy; 188 189 n -= sizeof(float); 190 } while (n != 0); 191 } 192 } 193