1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9$assert OP in ["RNDNE", "RNDZ", "RNDU", "RNDD"] 10#include <assert.h> 11 12#include <immintrin.h> 13 14#include <xnnpack/common.h> 15#include <xnnpack/math.h> 16#include <xnnpack/vunary.h> 17 18 19$_MM_FROUND_TO_FLAG = { 20$ "RNDNE": "_MM_FROUND_TO_NEAREST_INT", 21$ "RNDZ": "_MM_FROUND_TO_ZERO", 22$ "RNDU": "_MM_FROUND_TO_POS_INF", 23$ "RNDD": "_MM_FROUND_TO_NEG_INF", 24$}[OP] 25void xnn_f32_v${OP.lower()}_ukernel__avx_x${BATCH_TILE}( 26 size_t n, 27 const float* x, 28 float* y, 29 const union xnn_f32_rnd_params params[restrict XNN_MIN_ELEMENTS(1)]) 30{ 31 assert(n != 0); 32 assert(n % sizeof(float) == 0); 33 34 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 35 const __m256 vx${ABC[0:8]} = _mm256_loadu_ps(x); 36 $for N in range(8, BATCH_TILE, 8): 37 const __m256 vx${ABC[N:N+8]} = _mm256_loadu_ps(x + ${N}); 38 x += ${BATCH_TILE}; 39 40 $for N in range(0, BATCH_TILE, 8): 41 const __m256 vy${ABC[N:N+8]} = _mm256_round_ps(vx${ABC[N:N+8]}, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC); 42 43 _mm256_storeu_ps(y, vy${ABC[0:8]}); 44 $for N in range(8, BATCH_TILE, 8): 45 _mm256_storeu_ps(y + ${N}, vy${ABC[N:N+8]}); 46 y += ${BATCH_TILE}; 47 } 48 $if BATCH_TILE > 8: 49 for (; n >= 8 * sizeof(float); n -= 8 * sizeof(float)) { 50 const __m256 vx = _mm256_loadu_ps(x); 51 x += 8; 52 53 const __m256 vy = _mm256_round_ps(vx, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC); 54 55 _mm256_storeu_ps(y, vy); 56 y += 8; 57 } 58 if XNN_UNLIKELY(n != 0) { 59 assert(n >= 1 * sizeof(float)); 60 assert(n <= 7 * sizeof(float)); 61 const __m256i vmask = _mm256_loadu_si256((const __m256i*) ((uintptr_t) ¶ms->avx.mask_table[7] - n)); 62 63 const __m256 vx = _mm256_maskload_ps(x, vmask); 64 const __m256 vy = _mm256_round_ps(vx, ${_MM_FROUND_TO_FLAG} | _MM_FROUND_NO_EXC); 65 66 __m128 vy_lo = _mm256_castps256_ps128(vy); 67 if (n & (4 * sizeof(float))) { 68 _mm_storeu_ps(y, vy_lo); 69 vy_lo = _mm256_extractf128_ps(vy, 1); 70 y += 4; 71 } 72 if (n & (2 * sizeof(float))) { 73 _mm_storel_pi((__m64*) y, vy_lo); 74 vy_lo = _mm_movehl_ps(vy_lo, vy_lo); 75 y += 2; 76 } 77 if (n & (1 * sizeof(float))) { 78 _mm_store_ss(y, vy_lo); 79 } 80 } 81} 82