1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$SIMD_TILE = BATCH_TILE // 8 9#include <assert.h> 10 11#include <immintrin.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/vunary.h> 15 16 17void xnn_f32_velu_ukernel__avx2_rr1_p6_x${BATCH_TILE}( 18 size_t n, 19 const float* x, 20 float* y, 21 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) 22{ 23 assert(n % sizeof(float) == 0); 24 25 const __m256 vprescale = _mm256_load_ps(params->avx2_rr1_p6.prescale); 26 const __m256 valpha = _mm256_load_ps(params->avx2_rr1_p6.alpha); 27 const __m256 vbeta = _mm256_load_ps(params->avx2_rr1_p6.beta); 28 const __m256 vsat_cutoff = _mm256_load_ps(params->avx2_rr1_p6.sat_cutoff); 29 const __m256 vmagic_bias = _mm256_load_ps(params->avx2_rr1_p6.magic_bias); 30 const __m256 vlog2e = _mm256_load_ps(params->avx2_rr1_p6.log2e); 31 const __m256 vminus_ln2 = _mm256_load_ps(params->avx2_rr1_p6.minus_ln2); 32 const __m256 vc6 = _mm256_load_ps(params->avx2_rr1_p6.c6); 33 const __m256 vc5 = _mm256_load_ps(params->avx2_rr1_p6.c5); 34 const __m256 vc4 = _mm256_load_ps(params->avx2_rr1_p6.c4); 35 const __m256 vc3 = _mm256_load_ps(params->avx2_rr1_p6.c3); 36 const __m256 vc2 = _mm256_load_ps(params->avx2_rr1_p6.c2); 37 38 $if BATCH_TILE > 8: 39 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 40 __m256 vx0 = _mm256_loadu_ps(x); 41 $for N in range(1, SIMD_TILE): 42 __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8}); 43 x += ${BATCH_TILE}; 44 45 $for N in range(SIMD_TILE): 46 const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale)); 47 48 $for N in range(SIMD_TILE): 49 __m256 vn${N} = _mm256_fmadd_ps(vz${N}, vlog2e, vmagic_bias); 50 51 $for N in range(SIMD_TILE): 52 __m256 vs${N} = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn${N}), 23)); 53 vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias); 54 55 $for N in range(SIMD_TILE): 56 __m256 vt${N} = _mm256_fmadd_ps(vn${N}, vminus_ln2, vz${N}); 57 58 $for N in range(SIMD_TILE): 59 __m256 vp${N} = _mm256_fmadd_ps(vc6, vt${N}, vc5); 60 61 $for N in range(SIMD_TILE): 62 vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc4); 63 64 $for N in range(SIMD_TILE): 65 vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc3); 66 67 $for N in range(SIMD_TILE): 68 vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc2); 69 70 $for N in range(SIMD_TILE): 71 vp${N} = _mm256_mul_ps(vp${N}, vt${N}); 72 vt${N} = _mm256_mul_ps(vt${N}, vs${N}); 73 74 $for N in range(SIMD_TILE): 75 vs${N} = _mm256_fmsub_ps(vs${N}, valpha, valpha); 76 vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vt${N}); 77 78 $for N in range(SIMD_TILE): 79 const __m256 ve${N} = _mm256_fmadd_ps(vp${N}, valpha, vs${N}); 80 vx${N} = _mm256_mul_ps(vx${N}, vbeta); 81 82 $for N in range(SIMD_TILE): 83 const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N}); 84 85 _mm256_storeu_ps(y, vy0); 86 $for N in range(1, SIMD_TILE): 87 _mm256_storeu_ps(y + ${N * 8}, vy${N}); 88 y += ${BATCH_TILE}; 89 } 90 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { 91 __m256 vx = _mm256_loadu_ps(x); 92 x += 8; 93 94 const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale)); 95 96 __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); 97 __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23)); 98 vn = _mm256_sub_ps(vn, vmagic_bias); 99 100 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); 101 102 __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5); 103 vp = _mm256_fmadd_ps(vp, vt, vc4); 104 vp = _mm256_fmadd_ps(vp, vt, vc3); 105 vp = _mm256_fmadd_ps(vp, vt, vc2); 106 vp = _mm256_mul_ps(vp, vt); 107 108 vt = _mm256_mul_ps(vt, vs); 109 vs = _mm256_fmsub_ps(vs, valpha, valpha); 110 vp = _mm256_fmadd_ps(vp, vt, vt); 111 const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs); 112 113 vx = _mm256_mul_ps(vx, vbeta); 114 const __m256 vy = _mm256_blendv_ps(vx, ve, vx); 115 116 _mm256_storeu_ps(y, vy); 117 y += 8; 118 } 119 if XNN_UNLIKELY(n != 0) { 120 assert(n >= 1 * sizeof(float)); 121 assert(n <= 7 * sizeof(float)); 122 const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx2_rr1_p6.mask_table[7] - n)); 123 124 __m256 vx = _mm256_maskload_ps(x, vmask); 125 126 const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale)); 127 128 __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); 129 __m256 vs = _mm256_castsi256_ps(_mm256_slli_epi32(_mm256_castps_si256(vn), 23)); 130 vn = _mm256_sub_ps(vn, vmagic_bias); 131 132 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); 133 134 __m256 vp = _mm256_fmadd_ps(vc6, vt, vc5); 135 vp = _mm256_fmadd_ps(vp, vt, vc4); 136 vp = _mm256_fmadd_ps(vp, vt, vc3); 137 vp = _mm256_fmadd_ps(vp, vt, vc2); 138 vp = _mm256_mul_ps(vp, vt); 139 140 vt = _mm256_mul_ps(vt, vs); 141 vs = _mm256_fmsub_ps(vs, valpha, valpha); 142 vp = _mm256_fmadd_ps(vp, vt, vt); 143 const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs); 144 145 vx = _mm256_mul_ps(vx, vbeta); 146 const __m256 vy = _mm256_blendv_ps(vx, ve, vx); 147 148 __m128 vy_lo = _mm256_castps256_ps128(vy); 149 if (n & (4 * sizeof(float))) { 150 _mm_storeu_ps(y, vy_lo); 151 vy_lo = _mm256_extractf128_ps(vy, 1); 152 y += 4; 153 } 154 if (n & (2 * sizeof(float))) { 155 _mm_storel_pi((__m64*) y, vy_lo); 156 vy_lo = _mm_movehl_ps(vy_lo, vy_lo); 157 y += 2; 158 } 159 if (n & (1 * sizeof(float))) { 160 _mm_store_ss(y, vy_lo); 161 } 162 } 163} 164