1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$SIMD_TILE = BATCH_TILE // 8 9#include <assert.h> 10 11#include <immintrin.h> 12 13#include <xnnpack/common.h> 14#include <xnnpack/vunary.h> 15 16 17static const int32_t mask_table[14] = {-1, -1, -1, -1, -1, -1, -1, 0, 0, 0, 0, 0, 0, 0}; 18 19void xnn_f32_velu_ukernel__avx2_rr1_lut8_p4_perm_x${BATCH_TILE}( 20 size_t n, 21 const float* x, 22 float* y, 23 const union xnn_f32_elu_params params[restrict XNN_MIN_ELEMENTS(1)]) 24{ 25 assert(n % sizeof(float) == 0); 26 27 const __m256 vprescale = _mm256_broadcast_ps((const __m128*) params->sse.prescale); 28 const __m256 valpha = _mm256_broadcast_ps((const __m128*) params->sse.alpha); 29 const __m256 vbeta = _mm256_broadcast_ps((const __m128*) params->sse.beta); 30 31 const __m256 vsat_cutoff = _mm256_set1_ps(-0x1.154246p+4f); 32 const __m256 vmagic_bias = _mm256_set1_ps(0x1.800000p20f); 33 const __m256 vlog2e = _mm256_set1_ps(0x1.715476p+0f); 34 const __m256i vtable = _mm256_set_epi32( 35 0x3F7AC0C7, 0x3F7744FD, 0x3F75672A, 0x3F7504F3, 0x3F75FED7, 0x3F7837F0, 0x3F7B95C2, 0x3F800000); 36 const __m256 vminus_ln2 = _mm256_set1_ps(-0x1.62E43p-1f); 37 const __m256 vc4 = _mm256_set1_ps(0x1.5558ECp-5f); 38 const __m256 vc3 = _mm256_set1_ps(0x1.555C20p-3f); 39 const __m256 vc2 = _mm256_set1_ps(0x1.000000p-1f); 40 41 $if BATCH_TILE > 8: 42 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 43 __m256 vx0 = _mm256_loadu_ps(x); 44 $for N in range(1, SIMD_TILE): 45 __m256 vx${N} = _mm256_loadu_ps(x + ${N * 8}); 46 x += ${BATCH_TILE}; 47 48 $for N in range(SIMD_TILE): 49 const __m256 vz${N} = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx${N}, vprescale)); 50 51 $for N in range(SIMD_TILE): 52 __m256 vn${N} = _mm256_fmadd_ps(vz${N}, vlog2e, vmagic_bias); 53 54 $for N in range(SIMD_TILE): 55 const __m256i ven${N} = _mm256_slli_epi32(_mm256_castps_si256(vn${N}), 20); 56 const __m256i vl${N} = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn${N})); 57 vn${N} = _mm256_sub_ps(vn${N}, vmagic_bias); 58 59 $for N in range(SIMD_TILE): 60 __m256 vs${N} = _mm256_castsi256_ps(_mm256_add_epi32(vl${N}, ven${N})); 61 __m256 vt${N} = _mm256_fmadd_ps(vn${N}, vminus_ln2, vz${N}); 62 63 $for N in range(SIMD_TILE): 64 __m256 vp${N} = _mm256_fmadd_ps(vc4, vt${N}, vc3); 65 66 $for N in range(SIMD_TILE): 67 vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vc2); 68 69 $for N in range(SIMD_TILE): 70 vp${N} = _mm256_mul_ps(vp${N}, vt${N}); 71 vt${N} = _mm256_mul_ps(vt${N}, vs${N}); 72 73 $for N in range(SIMD_TILE): 74 vs${N} = _mm256_fmsub_ps(vs${N}, valpha, valpha); 75 vp${N} = _mm256_fmadd_ps(vp${N}, vt${N}, vt${N}); 76 77 $for N in range(SIMD_TILE): 78 const __m256 ve${N} = _mm256_fmadd_ps(vp${N}, valpha, vs${N}); 79 vx${N} = _mm256_mul_ps(vx${N}, vbeta); 80 81 $for N in range(SIMD_TILE): 82 const __m256 vy${N} = _mm256_blendv_ps(vx${N}, ve${N}, vx${N}); 83 84 _mm256_storeu_ps(y, vy0); 85 $for N in range(1, SIMD_TILE): 86 _mm256_storeu_ps(y + ${N * 8}, vy${N}); 87 y += ${BATCH_TILE}; 88 } 89 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { 90 __m256 vx = _mm256_loadu_ps(x); 91 x += 8; 92 93 const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale)); 94 95 __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); 96 const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20); 97 const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn)); 98 __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven)); 99 vn = _mm256_sub_ps(vn, vmagic_bias); 100 101 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); 102 103 __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3); 104 vp = _mm256_fmadd_ps(vp, vt, vc2); 105 vp = _mm256_mul_ps(vp, vt); 106 107 vt = _mm256_mul_ps(vt, vs); 108 vs = _mm256_fmsub_ps(vs, valpha, valpha); 109 vp = _mm256_fmadd_ps(vp, vt, vt); 110 const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs); 111 112 vx = _mm256_mul_ps(vx, vbeta); 113 const __m256 vy = _mm256_blendv_ps(vx, ve, vx); 114 115 _mm256_storeu_ps(y, vy); 116 y += 8; 117 } 118 if XNN_UNLIKELY(n != 0) { 119 assert(n >= 1 * sizeof(float)); 120 assert(n <= 7 * sizeof(float)); 121 __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) &mask_table[7] - n)); 122 123 __m256 vx = _mm256_maskload_ps(x, vmask); 124 125 const __m256 vz = _mm256_max_ps(vsat_cutoff, _mm256_mul_ps(vx, vprescale)); 126 127 __m256 vn = _mm256_fmadd_ps(vz, vlog2e, vmagic_bias); 128 const __m256i ven = _mm256_slli_epi32(_mm256_castps_si256(vn), 20); 129 const __m256i vl = _mm256_permutevar8x32_epi32(vtable, _mm256_castps_si256(vn)); 130 __m256 vs = _mm256_castsi256_ps(_mm256_add_epi32(vl, ven)); 131 vn = _mm256_sub_ps(vn, vmagic_bias); 132 133 __m256 vt = _mm256_fmadd_ps(vn, vminus_ln2, vz); 134 135 __m256 vp = _mm256_fmadd_ps(vc4, vt, vc3); 136 vp = _mm256_fmadd_ps(vp, vt, vc2); 137 vp = _mm256_mul_ps(vp, vt); 138 139 vt = _mm256_mul_ps(vt, vs); 140 vs = _mm256_fmsub_ps(vs, valpha, valpha); 141 vp = _mm256_fmadd_ps(vp, vt, vt); 142 const __m256 ve = _mm256_fmadd_ps(vp, valpha, vs); 143 144 vx = _mm256_mul_ps(vx, vbeta); 145 const __m256 vy = _mm256_blendv_ps(vx, ve, vx); 146 147 // _mm256_maskstore_ps(y, vmask, vf) could be used here, but triggers msan failures (probably an msan bug). 148 __m128 vy_lo = _mm256_castps256_ps128(vy); 149 if (n & (4 * sizeof(float))) { 150 _mm_storeu_ps(y, vy_lo); 151 vy_lo = _mm256_extractf128_ps(vy, 1); 152 y += 4; 153 } 154 if (n & (2 * sizeof(float))) { 155 _mm_storel_pi((__m64*) y, vy_lo); 156 vy_lo = _mm_movehl_ps(vy_lo, vy_lo); 157 y += 2; 158 } 159 if (n & (1 * sizeof(float))) { 160 _mm_store_ss(y, vy_lo); 161 } 162 } 163} 164