1// Copyright 2022 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 8 == 0 7$assert BATCH_TILE >= 8 8$SIMD_TILE = BATCH_TILE // 8 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10$assert OP in ["ABS", "NEG"] 11#include <assert.h> 12 13#include <emmintrin.h> 14 15#include <xnnpack/common.h> 16#include <xnnpack/math.h> 17#include <xnnpack/unaligned.h> 18#include <xnnpack/vunary.h> 19 20 21$_MM_OP_SI128 = { 22$ "ABS": lambda x: "_mm_and_si128(%s, vnonsign_mask)" % x, 23$ "NEG": lambda x: "_mm_xor_si128(%s, vsign_mask)" % x, 24$}[OP] 25$PARAMS = { 26$ "ABS": "xnn_f16_abs_params", 27$ "NEG": "xnn_f16_neg_params", 28$}[OP] 29void xnn_f16_v${OP.lower()}_ukernel__sse2_x${BATCH_TILE}( 30 size_t n, 31 const void* input, 32 void* output, 33 const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 34{ 35 assert(n != 0); 36 assert(n % sizeof(uint16_t) == 0); 37 assert(input != NULL); 38 assert(output != NULL); 39 40 const uint16_t* i = (const uint16_t*) input; 41 uint16_t* o = (uint16_t*) output; 42 $if OP == "ABS": 43 const __m128i vnonsign_mask = _mm_load_si128((const __m128i*) params->sse.nonsign_mask); 44 $elif OP == "NEG": 45 const __m128i vsign_mask = _mm_load_si128((const __m128i*) params->sse.sign_mask); 46 $if BATCH_TILE > 8: 47 for (; n >= ${BATCH_TILE} * sizeof(uint16_t); n -= ${BATCH_TILE} * sizeof(uint16_t)) { 48 __m128i vacc${ABC[0]} = _mm_loadu_si128((const __m128i*) i); 49 $for N in range(1, SIMD_TILE): 50 __m128i vacc${ABC[N]} = _mm_loadu_si128((const __m128i*) (i + ${N*8})); 51 i += ${BATCH_TILE}; 52 53 $for N in range(SIMD_TILE): 54 vacc${ABC[N]} = ${_MM_OP_SI128("vacc" + ABC[N])}; 55 56 _mm_storeu_si128((__m128i*) o, vacc${ABC[0]}); 57 $for N in range(1, SIMD_TILE): 58 _mm_storeu_si128((__m128i*) (o + ${N*8}), vacc${ABC[N]}); 59 o += ${BATCH_TILE}; 60 } 61 for (; n >= 8 * sizeof(uint16_t); n -= 8 * sizeof(uint16_t)) { 62 __m128i vacc = _mm_loadu_si128((const __m128i*) i); 63 i += 8; 64 vacc = ${_MM_OP_SI128("vacc")}; 65 _mm_storeu_si128((__m128i*) o, vacc); 66 o += 8; 67 } 68 if XNN_UNLIKELY(n != 0) { 69 __m128i vacc = _mm_loadu_si128((const __m128i*) i); 70 vacc = ${_MM_OP_SI128("vacc")}; 71 if (n & (4 * sizeof(uint16_t))) { 72 _mm_storel_epi64((__m128i*) o, vacc); 73 o += 4; 74 vacc = _mm_unpackhi_epi64(vacc, vacc); 75 } 76 if (n & (2 * sizeof(uint16_t))) { 77 unaligned_store_u32(o, (uint32_t) _mm_cvtsi128_si32(vacc)); 78 o += 2; 79 vacc = _mm_srli_epi64(vacc, 32); 80 } 81 if (n & (1 * sizeof(uint16_t))) { 82 *o = (uint16_t) _mm_extract_epi16(vacc, 0); 83 } 84 } 85} 86