1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE >= 16 7$assert BATCH_TILE % 16 == 0 8$SIMD_TILE = BATCH_TILE // 16 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12$if AVX: 13 #include <immintrin.h> 14$else: 15 #include <tmmintrin.h> 16 17$if AVX: 18 #include <xnnpack/intrinsics-polyfill.h> 19#include <xnnpack/lut.h> 20#include <xnnpack/common.h> 21 22 23void xnn_x8_lut_ukernel__${"avx" if AVX else "ssse3"}_x${BATCH_TILE}( 24 size_t n, 25 const uint8_t* x, 26 uint8_t* y, 27 const uint8_t t[restrict XNN_MIN_ELEMENTS(256)]) 28{ 29 assert(n != 0); 30 assert(x != NULL); 31 assert(y != NULL); 32 33 const __m128i vt0 = _mm_load_si128((const __m128i*) t); 34 $for T in range(1, 16): 35 const __m128i vt${ABC[T]} = _mm_load_si128((const __m128i*) (t + ${T * 16})); 36 37 const __m128i vtable0 = vt0; 38 $for T in range(1, 8): 39 const __m128i vtable${ABC[T]} = _mm_xor_si128(vt${ABC[T-1]}, vt${ABC[T]}); 40 $for T in range(8, 16): 41 const __m128i vtable${ABC[T]} = _mm_xor_si128(_mm_xor_si128(vt${ABC[T-1]}, vt${ABC[T]}), vtable${ABC[T-8]}); 42 43 const __m128i voffset = _mm_set1_epi8(16); 44 $if BATCH_TILE > 16: 45 for (; n >= ${BATCH_TILE} * sizeof(uint8_t); n -= ${BATCH_TILE} * sizeof(uint8_t)) { 46 __m128i vx0 = _mm_loadu_si128((const __m128i*) x); 47 $for N in range(1, SIMD_TILE): 48 __m128i vx${N} = _mm_loadu_si128((const __m128i*) (x + ${N * 16})); 49 x += ${BATCH_TILE}; 50 51 $for N in range(SIMD_TILE): 52 __m128i vy${N} = _mm_shuffle_epi8(vtable0, vx${N}); 53 54 $for T in range(1, 9): 55 $for N in range(SIMD_TILE): 56 vx${N} = _mm_sub_epi8(vx${N}, voffset); 57 $for N in range(SIMD_TILE): 58 vy${N} = _mm_xor_si128(vy${N}, _mm_shuffle_epi8(vtable${ABC[T]}, vx${N})); 59 60 $for T in range(9, 16): 61 $for N in range(SIMD_TILE): 62 vx${N} = _mm_subs_epi8(vx${N}, voffset); 63 $for N in range(SIMD_TILE): 64 vy${N} = _mm_xor_si128(vy${N}, _mm_shuffle_epi8(vtable${ABC[T]}, vx${N})); 65 66 _mm_storeu_si128((__m128i*) y, vy0); 67 $for N in range(1, SIMD_TILE): 68 _mm_storeu_si128((__m128i*) (y + ${N * 16}), vy${N}); 69 y += ${BATCH_TILE}; 70 } 71 for (; n >= 16 * sizeof(uint8_t); n -= 16 * sizeof(uint8_t)) { 72 __m128i vx = _mm_loadu_si128((const __m128i*) x); 73 x += 16; 74 75 __m128i vy = _mm_shuffle_epi8(vtable0, vx); 76 77 $for T in range(1, 9): 78 vx = _mm_sub_epi8(vx, voffset); 79 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx)); 80 81 $for T in range(9, 16): 82 vx = _mm_subs_epi8(vx, voffset); 83 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx)); 84 85 _mm_storeu_si128((__m128i*) y, vy); 86 y += 16; 87 } 88 if XNN_UNLIKELY(n != 0) { 89 __m128i vx = _mm_loadu_si128((const __m128i*) x); 90 91 __m128i vy = _mm_shuffle_epi8(vtable0, vx); 92 93 $for T in range(1, 9): 94 vx = _mm_sub_epi8(vx, voffset); 95 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx)); 96 97 $for T in range(9, 16): 98 vx = _mm_subs_epi8(vx, voffset); 99 vy = _mm_xor_si128(vy, _mm_shuffle_epi8(vtable${ABC[T]}, vx)); 100 101 if (n & (8 * sizeof(uint8_t))) { 102 _mm_storel_epi64((__m128i*) y, vy); 103 vy = _mm_unpackhi_epi64(vy, vy); 104 y += 8; 105 } 106 if (n & (4 * sizeof(uint8_t))) { 107 $if AVX: 108 _mm_storeu_si32(y, vy); 109 $else: 110 *((uint32_t*) y) = (uint32_t) _mm_cvtsi128_si32(vy); 111 vy = _mm_srli_epi64(vy, 32); 112 y += 4; 113 } 114 $if AVX: 115 if (n & (2 * sizeof(uint8_t))) { 116 *((uint16_t*) y) = (uint16_t) _mm_extract_epi16(vy, 0); 117 vy = _mm_srli_epi32(vy, 16); 118 y += 2; 119 } 120 if (n & (1 * sizeof(uint8_t))) { 121 *y = (uint8_t) _mm_extract_epi8(vy, 0); 122 } 123 $else: 124 uint32_t vy_lo = (uint32_t) _mm_cvtsi128_si32(vy); 125 if (n & (2 * sizeof(uint8_t))) { 126 *((uint16_t*) y) = (uint16_t) vy_lo; 127 vy_lo >>= 16; 128 y += 2; 129 } 130 if (n & (1 * sizeof(uint8_t))) { 131 *y = (uint8_t) vy_lo; 132 } 133 } 134} 135