1 // Auto-generated file. Do not edit! 2 // Template: src/f32-velu/scalar-rr2-p6.c.in 3 // Generator: tools/xngen 4 // 5 // Copyright 2020 Google LLC 6 // 7 // This source code is licensed under the BSD-style license found in the 8 // LICENSE file in the root directory of this source tree. 9 10 #include <assert.h> 11 #include <math.h> 12 13 #include <xnnpack/common.h> 14 #include <xnnpack/math.h> 15 #include <xnnpack/vunary.h> 16 17 xnn_f32_velu_ukernel__scalar_rr2_p6_x2(size_t n,const float * x,float * y,const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS (1)])18void xnn_f32_velu_ukernel__scalar_rr2_p6_x2( 19 size_t n, 20 const float* x, 21 float* y, 22 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) 23 { 24 assert(n % sizeof(float) == 0); 25 26 const float vprescale = params->scalar_rr2_p6.prescale; 27 const float valpha = params->scalar_rr2_p6.alpha; 28 const float vbeta = params->scalar_rr2_p6.beta; 29 const float vmagic_bias = params->scalar_rr2_p6.magic_bias; 30 const float vlog2e = params->scalar_rr2_p6.log2e; 31 const float vsat_cutoff = params->scalar_rr2_p6.sat_cutoff; 32 const float vminus_ln2_hi = params->scalar_rr2_p6.minus_ln2_hi; 33 const float vminus_ln2_lo = params->scalar_rr2_p6.minus_ln2_lo; 34 const float vc6 = params->scalar_rr2_p6.c6; 35 const float vc5 = params->scalar_rr2_p6.c5; 36 const float vc4 = params->scalar_rr2_p6.c4; 37 const float vc3 = params->scalar_rr2_p6.c3; 38 const float vc2 = params->scalar_rr2_p6.c2; 39 const float vone = params->scalar_rr2_p6.one; 40 41 for (; n >= 2 * sizeof(float); n -= 2 * sizeof(float)) { 42 float vx0 = x[0]; 43 float vx1 = x[1]; 44 x += 2; 45 46 const float vz0 = vx0 * vprescale; 47 const float vz1 = vx1 * vprescale; 48 49 float vn0 = vz0 * vlog2e + vmagic_bias; 50 float vn1 = vz1 * vlog2e + vmagic_bias; 51 52 float vs0 = uint32_as_float(float_as_uint32(vn0) << 23); 53 vn0 -= vmagic_bias; 54 float vs1 = uint32_as_float(float_as_uint32(vn1) << 23); 55 vn1 -= vmagic_bias; 56 57 float vt0 = vn0 * vminus_ln2_hi + vz0; 58 float vt1 = vn1 * vminus_ln2_hi + vz1; 59 60 vt0 = vn0 * vminus_ln2_lo + vt0; 61 vt1 = vn1 * vminus_ln2_lo + vt1; 62 63 if XNN_UNPREDICTABLE(vz0 <= vsat_cutoff) { 64 vs0 = 0.0f; 65 vt0 = 0.0f; 66 } 67 if XNN_UNPREDICTABLE(vz1 <= vsat_cutoff) { 68 vs1 = 0.0f; 69 vt1 = 0.0f; 70 } 71 72 float vp0 = vc6 * vt0 + vc5; 73 float vp1 = vc6 * vt1 + vc5; 74 75 vp0 = vp0 * vt0 + vc4; 76 vp1 = vp1 * vt1 + vc4; 77 78 vp0 = vp0 * vt0 + vc3; 79 vp1 = vp1 * vt1 + vc3; 80 81 vp0 = vp0 * vt0 + vc2; 82 vp1 = vp1 * vt1 + vc2; 83 84 vp0 *= vt0; 85 vp1 *= vt1; 86 87 vt0 *= vs0; 88 vs0 -= vone; 89 vt1 *= vs1; 90 vs1 -= vone; 91 92 vp0 = vp0 * vt0 + vt0; 93 vp1 = vp1 * vt1 + vt1; 94 95 const float ve0 = (vp0 + vs0) * valpha; 96 float vy0 = vx0 * vbeta; 97 const float ve1 = (vp1 + vs1) * valpha; 98 float vy1 = vx1 * vbeta; 99 100 if XNN_UNPREDICTABLE(vx0 < 0.0f) { 101 vy0 = ve0; 102 } 103 if XNN_UNPREDICTABLE(vx1 < 0.0f) { 104 vy1 = ve1; 105 } 106 107 y[0] = vy0; 108 y[1] = vy1; 109 y += 2; 110 } 111 if XNN_UNLIKELY(n != 0) { 112 float vx = *x; 113 114 const float vz = vx * vprescale; 115 116 float vn = vz * vlog2e + vmagic_bias; 117 float vs = uint32_as_float(float_as_uint32(vn) << 23); 118 vn -= vmagic_bias; 119 120 float vt = vn * vminus_ln2_hi + vz; 121 vt = vn * vminus_ln2_lo + vt; 122 123 if XNN_UNPREDICTABLE(vz <= vsat_cutoff) { 124 vs = 0.0f; 125 vt = 0.0f; 126 } 127 128 float vp = vc6 * vt + vc5; 129 vp = vp * vt + vc4; 130 vp = vp * vt + vc3; 131 vp = vp * vt + vc2; 132 vp *= vt; 133 134 vt *= vs; 135 vs -= vone; 136 vp = vp * vt + vt; 137 const float ve = (vp + vs) * valpha; 138 139 float vy = vx * vbeta; 140 if XNN_UNPREDICTABLE(vx < 0.0f) { 141 vy = ve; 142 } 143 144 *y = vy; 145 } 146 } 147