// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $SSE_HEADER = {2: "emmintrin.h", 3: "tmmintrin.h", 4: "smmintrin.h"}[SSE] $assert CHANNEL_TILE % 8 == 0 $assert CHANNEL_TILE >= 8 $assert ROW_TILE >= 2 $assert ROW_SUBTILE >= 2 $assert ROW_SUBTILE <= ROW_TILE $assert ACCUMULATORS >= 1 $assert ROW_TILE >= ACCUMULATORS * 2 $assert ROW_SUBTILE >= ACCUMULATORS * 2 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" #include #include <${SSE_HEADER}> #include #include $ISA = {2: "sse2", 3: "ssse3", 4: "sse41"}[SSE] void xnn_qs8_gavgpool_minmax_ukernel_${ROW_TILE}p${ROW_SUBTILE}x__${ISA}_c${CHANNEL_TILE}${"" if ACCUMULATORS == 1 else "_acc%d" % ACCUMULATORS}( size_t rows, size_t channels, const int8_t* input, size_t input_stride, const int8_t* zero, int32_t* buffer, int8_t* output, const union xnn_qs8_avgpool_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN { assert(rows > ${ROW_TILE}); assert(channels != 0); const int8_t* i0 = input; $for M in range(1, ROW_TILE): const int8_t* i${M} = (const int8_t*) ((uintptr_t) i${M-1} + input_stride); $if CHANNEL_TILE <= 16: const size_t input_increment = ${ROW_TILE} * input_stride - round_up_po2(channels, ${CHANNEL_TILE}); $else: const size_t input_increment = ${ROW_TILE} * input_stride - round_up_po2(channels, 8); const __m128i vbias = _mm_load_si128((const __m128i*) params->sse2.bias); int32_t* b = buffer; size_t c = channels; for (; ${"c >= %d" % CHANNEL_TILE if CHANNEL_TILE > 16 else "c != 0"}; ${("c -= %d" if CHANNEL_TILE > 16 else "c = doz(c, %d)") % CHANNEL_TILE}) { $for M in range(ROW_TILE): $if SSE >= 4: const __m128i vxi${M}x${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i${M})); $for C in range(8, CHANNEL_TILE, 8): const __m128i vxi${M}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i${M} + ${C}))); $else: const __m128i vi${M}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${M}); $for C in range(8, CHANNEL_TILE, 8): const __m128i vi${M}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) (i${M} + ${C})); i${M} += ${CHANNEL_TILE}; $if SSE < 4: $for M in range(ROW_TILE): $for C in range(0, CHANNEL_TILE, 8): const __m128i vxi${M}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vi${M}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${M}x${ABC[C:C+8]})); $for A in range(ACCUMULATORS): $for C in range(0, CHANNEL_TILE, 8): __m128i vacc${A}x${ABC[C:C+8]} = _mm_add_epi16(vxi${A*2}x${ABC[C:C+8]}, vxi${A*2+1}x${ABC[C:C+8]}); $for M in range(ACCUMULATORS * 2, ROW_TILE): $for C in range(0, CHANNEL_TILE, 8): vacc${M % ACCUMULATORS}x${ABC[C:C+8]} = _mm_add_epi16(vacc${M % ACCUMULATORS}x${ABC[C:C+8]}, vxi${M}x${ABC[C:C+8]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0x${ABC[0:CHANNEL_TILE]} $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: $for C in range(0, CHANNEL_TILE, 8): vacc${A}x${ABC[C:C+8]} = _mm_add_epi16(vacc${A}x${ABC[C:C+8]}, vacc${A + ACC_SLICE}x${ABC[C:C+8]}); $ACC_SLICE *= 2 $for C in range(0, CHANNEL_TILE, 8): $if SSE >= 4: const __m128i vacc${ABC[C:C+4]} = _mm_add_epi32(vbias, _mm_cvtepi16_epi32(vacc0x${ABC[C:C+8]})); const __m128i vacc${ABC[C+4:C+8]} = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vacc0x${ABC[C:C+8]}, _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[C:C+8]}))); $else: const __m128i vsgnacc0x${ABC[C:C+8]} = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[C:C+8]}); const __m128i vacc${ABC[C:C+4]} = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vacc0x${ABC[C:C+8]}, vsgnacc0x${ABC[C:C+8]})); const __m128i vacc${ABC[C+4:C+8]} = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vacc0x${ABC[C:C+8]}, vsgnacc0x${ABC[C:C+8]})); _mm_store_si128((__m128i*) b, vacc${ABC[0:4]}); $for C in range(4, CHANNEL_TILE, 4): _mm_store_si128((__m128i*) (b + ${C}), vacc${ABC[C:C+4]}); b += ${CHANNEL_TILE}; } $if CHANNEL_TILE > 16: if XNN_UNLIKELY(c != 0) { do { $for M in range(ROW_TILE): $if SSE >= 4: const __m128i vxi${M}x${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i${M})); $else: const __m128i vi${M}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${M}); i${M} += 8; $if SSE < 4: $for M in range(ROW_TILE): const __m128i vxi${M}x${ABC[0:8]} = _mm_unpacklo_epi8(vi${M}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${M}x${ABC[0:8]})); $for A in range(ACCUMULATORS): __m128i vacc${A}x${ABC[0:8]} = _mm_add_epi16(vxi${A*2}x${ABC[0:8]}, vxi${A*2+1}x${ABC[0:8]}); $for M in range(ACCUMULATORS * 2, ROW_TILE): vacc${M % ACCUMULATORS}x${ABC[0:8]} = _mm_add_epi16(vacc${M % ACCUMULATORS}x${ABC[0:8]}, vxi${M}x${ABC[0:8]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0x${ABC[0:8]} $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc${A}x${ABC[0:8]} = _mm_add_epi16(vacc${A}x${ABC[0:8]}, vacc${A + ACC_SLICE}x${ABC[0:8]}); $ACC_SLICE *= 2 $if SSE >= 4: const __m128i vacc${ABC[0:4]} = _mm_add_epi32(vbias, _mm_cvtepi16_epi32(vacc0x${ABC[0:8]})); const __m128i vacc${ABC[4:8]} = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vacc0x${ABC[0:8]}, _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[0:8]}))); $else: const __m128i vsgnacc0x${ABC[0:8]} = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[0:8]}); const __m128i vacc${ABC[0:4]} = _mm_add_epi32(vbias, _mm_unpacklo_epi16(vacc0x${ABC[0:8]}, vsgnacc0x${ABC[0:8]})); const __m128i vacc${ABC[4:8]} = _mm_add_epi32(vbias, _mm_unpackhi_epi16(vacc0x${ABC[0:8]}, vsgnacc0x${ABC[0:8]})); _mm_store_si128((__m128i*) b, vacc${ABC[0:4]}); _mm_store_si128((__m128i*) (b + 4), vacc${ABC[4:8]}); b += 8; c = doz(c, 8); } while (c != 0); } for (rows -= ${ROW_TILE}; rows > ${ROW_SUBTILE}; rows -= ${ROW_SUBTILE}) { $for M in range(ROW_SUBTILE): i${M} = (const int8_t*) ((uintptr_t) i${M + ROW_TILE - ROW_SUBTILE} + input_increment); int32_t* b = buffer; size_t c = channels; for (; ${"c >= %d" % CHANNEL_TILE if CHANNEL_TILE > 16 else "c != 0"}; ${("c -= %d" if CHANNEL_TILE > 16 else "c = doz(c, %d)") % CHANNEL_TILE}) { $for M in range(ROW_SUBTILE): $if SSE >= 4: const __m128i vxi${M}x${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i${M})); $for C in range(8, CHANNEL_TILE, 8): const __m128i vxi${M}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i${M} + ${C}))); $else: const __m128i vi${M}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${M}); $for C in range(8, CHANNEL_TILE, 8): const __m128i vi${M}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) (i${M} + ${C})); i${M} += ${CHANNEL_TILE}; $if SSE < 4: $for M in range(ROW_SUBTILE): $for C in range(0, CHANNEL_TILE, 8): const __m128i vxi${M}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vi${M}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${M}x${ABC[C:C+8]})); $for A in range(ACCUMULATORS): $for C in range(0, CHANNEL_TILE, 8): __m128i vacc${A}x${ABC[C:C+8]} = _mm_add_epi16(vxi${A*2}x${ABC[C:C+8]}, vxi${A*2+1}x${ABC[C:C+8]}); $for M in range(ACCUMULATORS * 2, ROW_SUBTILE): $for C in range(0, CHANNEL_TILE, 8): vacc${M % ACCUMULATORS}x${ABC[C:C+8]} = _mm_add_epi16(vacc${M % ACCUMULATORS}x${ABC[C:C+8]}, vxi${M}x${ABC[C:C+8]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0x${ABC[0:CHANNEL_TILE]} $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: $for C in range(0, CHANNEL_TILE, 8): vacc${A}x${ABC[C:C+8]} = _mm_add_epi16(vacc${A}x${ABC[C:C+8]}, vacc${A + ACC_SLICE}x${ABC[C:C+8]}); $ACC_SLICE *= 2 $for C in range(0, CHANNEL_TILE, 8): $if SSE >= 4: const __m128i vacc${ABC[C:C+4]} = _mm_add_epi32(_mm_cvtepi16_epi32(vacc0x${ABC[C:C+8]}), _mm_load_si128((const __m128i*) (b + ${C}))); const __m128i vacc${ABC[C+4:C+8]} = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x${ABC[C:C+8]}, _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[C:C+8]})), _mm_load_si128((const __m128i*) (b + ${C+4}))); $else: const __m128i vsgnacc0x${ABC[C:C+8]} = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[C:C+8]}); const __m128i vacc${ABC[C:C+4]} = _mm_add_epi32(_mm_unpacklo_epi16(vacc0x${ABC[C:C+8]}, vsgnacc0x${ABC[C:C+8]}), _mm_load_si128((const __m128i*) (b + ${C}))); const __m128i vacc${ABC[C+4:C+8]} = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x${ABC[C:C+8]}, vsgnacc0x${ABC[C:C+8]}), _mm_load_si128((const __m128i*) (b + ${C+4}))); _mm_store_si128((__m128i*) b, vacc${ABC[0:4]}); $for C in range(4, CHANNEL_TILE, 4): _mm_store_si128((__m128i*) (b + ${C}), vacc${ABC[C:C+4]}); b += ${CHANNEL_TILE}; } $if CHANNEL_TILE > 16: if XNN_UNLIKELY(c != 0) { do { $for M in range(ROW_SUBTILE): $if SSE >= 4: const __m128i vxi${M}x${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i${M})); $else: const __m128i vi${M}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${M}); i${M} += 8; $if SSE < 4: $for M in range(ROW_SUBTILE): const __m128i vxi${M}x${ABC[0:8]} = _mm_unpacklo_epi8(vi${M}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${M}x${ABC[0:8]})); $for A in range(ACCUMULATORS): __m128i vacc${A}x${ABC[0:8]} = _mm_add_epi16(vxi${A*2}x${ABC[0:8]}, vxi${A*2+1}x${ABC[0:8]}); $for M in range(ACCUMULATORS * 2, ROW_SUBTILE): vacc${M % ACCUMULATORS}x${ABC[0:8]} = _mm_add_epi16(vacc${M % ACCUMULATORS}x${ABC[0:8]}, vxi${M}x${ABC[0:8]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0x${ABC[0:8]} $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc${A}x${ABC[0:8]} = _mm_add_epi16(vacc${A}x${ABC[0:8]}, vacc${A + ACC_SLICE}x${ABC[0:8]}); $ACC_SLICE *= 2 $if SSE >= 4: const __m128i vacc${ABC[0:4]} = _mm_add_epi32(_mm_cvtepi16_epi32(vacc0x${ABC[0:8]}), _mm_load_si128((const __m128i*) b)); const __m128i vacc${ABC[4:8]} = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x${ABC[0:8]}, _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[0:8]})), _mm_load_si128((const __m128i*) (b + 4))); $else: const __m128i vsgnacc0x${ABC[0:8]} = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[0:8]}); const __m128i vacc${ABC[0:4]} = _mm_add_epi32(_mm_unpacklo_epi16(vacc0x${ABC[0:8]}, vsgnacc0x${ABC[0:8]}), _mm_load_si128((const __m128i*) b)); const __m128i vacc${ABC[4:8]} = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x${ABC[0:8]}, vsgnacc0x${ABC[0:8]}), _mm_load_si128((const __m128i*) (b + 4))); _mm_store_si128((__m128i*) b, vacc${ABC[0:4]}); _mm_store_si128((__m128i*) (b + 4), vacc${ABC[4:8]}); b += 8; c = doz(c, 8); } while (c != 0); } } i0 = (const int8_t*) ((uintptr_t) i${ROW_TILE - ROW_SUBTILE} + input_increment); $for M in range(1, ROW_SUBTILE): i${M} = (const int8_t*) ((uintptr_t) i${M + ROW_TILE - ROW_SUBTILE} + input_increment); $if M % 2 == 1: if XNN_UNPREDICTABLE(rows < ${M+1}) { i${M} = zero; } $else: if XNN_UNPREDICTABLE(rows <= ${M}) { i${M} = zero; } const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier); const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding); const __m128i vshift = _mm_loadl_epi64((const __m128i*) params->sse2.shift); while (channels >= ${CHANNEL_TILE}) { $for M in range(ROW_SUBTILE): $if SSE >= 4: const __m128i vxi${M}x${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i${M})); $for C in range(8, CHANNEL_TILE, 8): const __m128i vxi${M}x${ABC[C:C+8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) (i${M} + ${C}))); $else: const __m128i vi${M}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${M}); $for C in range(8, CHANNEL_TILE, 8): const __m128i vi${M}x${ABC[C:C+8]} = _mm_loadl_epi64((const __m128i*) (i${M} + ${C})); i${M} += ${CHANNEL_TILE}; $if SSE < 4: $for M in range(ROW_SUBTILE): $for C in range(0, CHANNEL_TILE, 8): const __m128i vxi${M}x${ABC[C:C+8]} = _mm_unpacklo_epi8(vi${M}x${ABC[C:C+8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${M}x${ABC[C:C+8]})); $for A in range(ACCUMULATORS): $for C in range(0, CHANNEL_TILE, 8): __m128i vacc${A}x${ABC[C:C+8]} = _mm_add_epi16(vxi${A*2}x${ABC[C:C+8]}, vxi${A*2+1}x${ABC[C:C+8]}); $for M in range(ACCUMULATORS * 2, ROW_SUBTILE): $for C in range(0, CHANNEL_TILE, 8): vacc${M % ACCUMULATORS}x${ABC[C:C+8]} = _mm_add_epi16(vacc${M % ACCUMULATORS}x${ABC[C:C+8]}, vxi${M}x${ABC[C:C+8]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0x${ABC[0:CHANNEL_TILE]} $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: $for C in range(0, CHANNEL_TILE, 8): vacc${A}x${ABC[C:C+8]} = _mm_add_epi16(vacc${A}x${ABC[C:C+8]}, vacc${A + ACC_SLICE}x${ABC[C:C+8]}); $ACC_SLICE *= 2 $for C in range(0, CHANNEL_TILE, 8): $if SSE >= 4: const __m128i vacc${ABC[C:C+4]} = _mm_add_epi32(_mm_cvtepi16_epi32(vacc0x${ABC[C:C+8]}), _mm_load_si128((const __m128i*) (buffer + ${C}))); const __m128i vacc${ABC[C+4:C+8]} = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x${ABC[C:C+8]}, _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[C:C+8]})), _mm_load_si128((const __m128i*) (buffer + ${C+4}))); $else: const __m128i vsgnacc0x${ABC[C:C+8]} = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[C:C+8]}); const __m128i vacc${ABC[C:C+4]} = _mm_add_epi32(_mm_unpacklo_epi16(vacc0x${ABC[C:C+8]}, vsgnacc0x${ABC[C:C+8]}), _mm_load_si128((const __m128i*) (buffer + ${C}))); const __m128i vacc${ABC[C+4:C+8]} = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x${ABC[C:C+8]}, vsgnacc0x${ABC[C:C+8]}), _mm_load_si128((const __m128i*) (buffer + ${C+4}))); buffer += ${CHANNEL_TILE}; $if SSE >= 3: $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsacc${ABC[C:C+4]} = _mm_abs_epi32(vacc${ABC[C:C+4]}); $else: $for C in range(0, CHANNEL_TILE, 4): const __m128i vsgnacc${ABC[C:C+4]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[C:C+4]}); $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsacc${ABC[C:C+4]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[C:C+4]}, vsgnacc${ABC[C:C+4]}), vsgnacc${ABC[C:C+4]}); $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsacc${ABC[C+1:C+4:2]} = _mm_shuffle_epi32(vabsacc${ABC[C:C+4]}, _MM_SHUFFLE(3, 3, 1, 1)); $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsprod${ABC[C:C+4:2]} = _mm_mul_epu32(vabsacc${ABC[C:C+4]}, vmultiplier); const __m128i vabsprod${ABC[C+1:C+4:2]} = _mm_mul_epu32(vabsacc${ABC[C+1:C+4:2]}, vmultiplier); $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsout${ABC[C:C+4:2]} = _mm_srl_epi64(_mm_add_epi64(vabsprod${ABC[C:C+4:2]}, vrounding), vshift); const __m128i vabsout${ABC[C+1:C+4:2]} = _mm_srl_epi64(_mm_add_epi64(vabsprod${ABC[C+1:C+4:2]}, vrounding), vshift); $if SSE >= 4: $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsout${ABC[C:C+4]} = _mm_blend_epi16(vabsout${ABC[C:C+4:2]}, _mm_shuffle_epi32(vabsout${ABC[C+1:C+4:2]}, _MM_SHUFFLE(2, 2, 0, 0)), 0xCC); $else: $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsout${ABC[C:C+4:2]}${ABC[C+1:C+4:2]} = _mm_castps_si128( _mm_shuffle_ps(_mm_castsi128_ps(vabsout${ABC[C:C+4:2]}), _mm_castsi128_ps(vabsout${ABC[C+1:C+4:2]}), _MM_SHUFFLE(2, 0, 2, 0))); $for C in range(0, CHANNEL_TILE, 4): const __m128i vabsout${ABC[C:C+4]} = _mm_shuffle_epi32(vabsout${ABC[C:C+4:2]}${ABC[C+1:C+4:2]}, _MM_SHUFFLE(3, 1, 2, 0)); $if SSE >= 3: $for C in range(0, CHANNEL_TILE, 4): const __m128i vout${ABC[C:C+4]} = _mm_sign_epi32(vabsout${ABC[C:C+4]}, vacc${ABC[C:C+4]}); $else: $for C in range(0, CHANNEL_TILE, 4): const __m128i vout${ABC[C:C+4]} = _mm_sub_epi32(_mm_xor_si128(vabsout${ABC[C:C+4]}, vsgnacc${ABC[C:C+4]}), vsgnacc${ABC[C:C+4]}); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); $for C in range(0, CHANNEL_TILE, 8): __m128i vout${ABC[C:C+8]} = _mm_adds_epi16(_mm_packs_epi32(vout${ABC[C:C+4]}, vout${ABC[C+4:C+8]}), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min); const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max); $for C in range(0, CHANNEL_TILE, 8): vout${ABC[C:C+8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[C:C+8]}, voutput_min), voutput_max); $for C in range(0, CHANNEL_TILE, 16): $if C + 8 < CHANNEL_TILE: __m128i vout${ABC[C:C+16]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C+8:C+16]}); $else: __m128i vout${ABC[C:C+8]}${ABC[C:C+8]} = _mm_packs_epi16(vout${ABC[C:C+8]}, vout${ABC[C:C+8]}); $if CHANNEL_TILE > 8: _mm_storeu_si128((__m128i*) output, vout${ABC[0:16]}); $else: _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); $for C in range(16, CHANNEL_TILE, 16): $if C + 8 < CHANNEL_TILE: _mm_storeu_si128((__m128i*) (output + ${C}), vout${ABC[C:C+16]}); $else: _mm_storel_epi64((__m128i*) (output + ${C}), vout${ABC[C:C+8]}${ABC[C:C+8]}); output += ${CHANNEL_TILE}; channels -= ${CHANNEL_TILE}; } if XNN_UNLIKELY(channels != 0) { ${"do " if CHANNEL_TILE > 8 else ""}{ $for M in range(ROW_SUBTILE): $if SSE >= 4: const __m128i vxi${M}x${ABC[0:8]} = _mm_cvtepi8_epi16(_mm_loadl_epi64((const __m128i*) i${M})); $else: const __m128i vi${M}x${ABC[0:8]} = _mm_loadl_epi64((const __m128i*) i${M}); i${M} += 8; $if SSE < 4: $for M in range(ROW_SUBTILE): const __m128i vxi${M}x${ABC[0:8]} = _mm_unpacklo_epi8(vi${M}x${ABC[0:8]}, _mm_cmpgt_epi8(_mm_setzero_si128(), vi${M}x${ABC[0:8]})); $for A in range(ACCUMULATORS): __m128i vacc${A}x${ABC[0:8]} = _mm_add_epi16(vxi${A*2}x${ABC[0:8]}, vxi${A*2+1}x${ABC[0:8]}); $for M in range(ACCUMULATORS * 2, ROW_SUBTILE): vacc${M % ACCUMULATORS}x${ABC[0:8]} = _mm_add_epi16(vacc${M % ACCUMULATORS}x${ABC[0:8]}, vxi${M}x${ABC[0:8]}); $if ACCUMULATORS > 1: // Add up all accumulators to vacc0x${ABC[0:8]} $ACC_SLICE = 1 $while ACC_SLICE < ACCUMULATORS: $for A in range(0, ACCUMULATORS, ACC_SLICE * 2): $if A + ACC_SLICE < ACCUMULATORS: vacc${A}x${ABC[0:8]} = _mm_add_epi16(vacc${A}x${ABC[0:8]}, vacc${A + ACC_SLICE}x${ABC[0:8]}); $ACC_SLICE *= 2 $if SSE >= 4: const __m128i vacc${ABC[0:4]} = _mm_add_epi32(_mm_cvtepi16_epi32(vacc0x${ABC[0:8]}), _mm_load_si128((const __m128i*) buffer)); const __m128i vacc${ABC[4:8]} = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x${ABC[0:8]}, _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[0:8]})), _mm_load_si128((const __m128i*) (buffer + 4))); $else: const __m128i vsgnacc0x${ABC[0:8]} = _mm_cmpgt_epi16(_mm_setzero_si128(), vacc0x${ABC[0:8]}); const __m128i vacc${ABC[0:4]} = _mm_add_epi32(_mm_unpacklo_epi16(vacc0x${ABC[0:8]}, vsgnacc0x${ABC[0:8]}), _mm_load_si128((const __m128i*) buffer)); const __m128i vacc${ABC[4:8]} = _mm_add_epi32(_mm_unpackhi_epi16(vacc0x${ABC[0:8]}, vsgnacc0x${ABC[0:8]}), _mm_load_si128((const __m128i*) (buffer + 4))); buffer += 8; $if SSE >= 3: const __m128i vabsacc${ABC[0:4]} = _mm_abs_epi32(vacc${ABC[0:4]}); const __m128i vabsacc${ABC[4:8]} = _mm_abs_epi32(vacc${ABC[4:8]}); $else: const __m128i vsgnacc${ABC[0:4]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[0:4]}); const __m128i vsgnacc${ABC[4:8]} = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc${ABC[4:8]}); const __m128i vabsacc${ABC[0:4]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[0:4]}, vsgnacc${ABC[0:4]}), vsgnacc${ABC[0:4]}); const __m128i vabsacc${ABC[4:8]} = _mm_sub_epi32(_mm_xor_si128(vacc${ABC[4:8]}, vsgnacc${ABC[4:8]}), vsgnacc${ABC[4:8]}); const __m128i vabsacc${ABC[1:4:2]} = _mm_shuffle_epi32(vabsacc${ABC[0:4]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vabsacc${ABC[5:8:2]} = _mm_shuffle_epi32(vabsacc${ABC[4:8]}, _MM_SHUFFLE(3, 3, 1, 1)); const __m128i vabsprod${ABC[0:4:2]} = _mm_mul_epu32(vabsacc${ABC[0:4]}, vmultiplier); const __m128i vabsprod${ABC[1:4:2]} = _mm_mul_epu32(vabsacc${ABC[1:4:2]}, vmultiplier); const __m128i vabsprod${ABC[4:8:2]} = _mm_mul_epu32(vabsacc${ABC[4:8]}, vmultiplier); const __m128i vabsprod${ABC[5:8:2]} = _mm_mul_epu32(vabsacc${ABC[5:8:2]}, vmultiplier); const __m128i vabsout${ABC[0:4:2]} = _mm_srl_epi64(_mm_add_epi64(vabsprod${ABC[0:4:2]}, vrounding), vshift); const __m128i vabsout${ABC[1:4:2]} = _mm_srl_epi64(_mm_add_epi64(vabsprod${ABC[1:4:2]}, vrounding), vshift); const __m128i vabsout${ABC[4:8:2]} = _mm_srl_epi64(_mm_add_epi64(vabsprod${ABC[4:8:2]}, vrounding), vshift); const __m128i vabsout${ABC[5:8:2]} = _mm_srl_epi64(_mm_add_epi64(vabsprod${ABC[5:8:2]}, vrounding), vshift); $if SSE >= 4: const __m128i vabsout${ABC[0:4]} = _mm_blend_epi16(vabsout${ABC[0:4:2]}, _mm_shuffle_epi32(vabsout${ABC[1:4:2]}, _MM_SHUFFLE(2, 2, 0, 0)), 0xCC); const __m128i vabsout${ABC[4:8]} = _mm_blend_epi16(vabsout${ABC[4:8:2]}, _mm_shuffle_epi32(vabsout${ABC[5:8:2]}, _MM_SHUFFLE(2, 2, 0, 0)), 0xCC); $else: const __m128i vabsout${ABC[0:4:2]}${ABC[1:4:2]} = _mm_castps_si128( _mm_shuffle_ps(_mm_castsi128_ps(vabsout${ABC[0:4:2]}), _mm_castsi128_ps(vabsout${ABC[1:4:2]}), _MM_SHUFFLE(2, 0, 2, 0))); const __m128i vabsout${ABC[4:8:2]}${ABC[5:8:2]} = _mm_castps_si128( _mm_shuffle_ps(_mm_castsi128_ps(vabsout${ABC[4:8:2]}), _mm_castsi128_ps(vabsout${ABC[5:8:2]}), _MM_SHUFFLE(2, 0, 2, 0))); const __m128i vabsout${ABC[0:4]} = _mm_shuffle_epi32(vabsout${ABC[0:4:2]}${ABC[1:4:2]}, _MM_SHUFFLE(3, 1, 2, 0)); const __m128i vabsout${ABC[4:8]} = _mm_shuffle_epi32(vabsout${ABC[4:8:2]}${ABC[5:8:2]}, _MM_SHUFFLE(3, 1, 2, 0)); $if SSE >= 3: const __m128i vout${ABC[0:4]} = _mm_sign_epi32(vabsout${ABC[0:4]}, vacc${ABC[0:4]}); const __m128i vout${ABC[4:8]} = _mm_sign_epi32(vabsout${ABC[4:8]}, vacc${ABC[4:8]}); $else: const __m128i vout${ABC[0:4]} = _mm_sub_epi32(_mm_xor_si128(vabsout${ABC[0:4]}, vsgnacc${ABC[0:4]}), vsgnacc${ABC[0:4]}); const __m128i vout${ABC[4:8]} = _mm_sub_epi32(_mm_xor_si128(vabsout${ABC[4:8]}, vsgnacc${ABC[4:8]}), vsgnacc${ABC[4:8]}); const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point); __m128i vout${ABC[0:8]} = _mm_adds_epi16(_mm_packs_epi32(vout${ABC[0:4]}, vout${ABC[4:8]}), voutput_zero_point); const __m128i voutput_min = _mm_load_si128((const __m128i*) params->sse2.output_min); const __m128i voutput_max = _mm_load_si128((const __m128i*) params->sse2.output_max); vout${ABC[0:8]} = _mm_min_epi16(_mm_max_epi16(vout${ABC[0:8]}, voutput_min), voutput_max); __m128i vout${ABC[0:8]}${ABC[0:8]} = _mm_packs_epi16(vout${ABC[0:8]}, vout${ABC[0:8]}); $if CHANNEL_TILE > 8: if XNN_LIKELY(channels >= 8) { _mm_storel_epi64((__m128i*) output, vout${ABC[0:8]}${ABC[0:8]}); output += 8; channels -= 8; } else { if (channels & 4) { *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); output += 4; } if (channels & 2) { *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0); vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); output += 2; } if (channels & 1) { $if SSE >= 4: *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); $else: *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); output += 1; } channels = 0; } $else: if (channels & 4) { *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi64(vout${ABC[0:8]}${ABC[0:8]}, 32); output += 4; } if (channels & 2) { *((uint16_t*) output) = (uint16_t) _mm_extract_epi16(vout${ABC[0:8]}${ABC[0:8]}, 0); vout${ABC[0:8]}${ABC[0:8]} = _mm_srli_epi32(vout${ABC[0:8]}${ABC[0:8]}, 16); output += 2; } if (channels & 1) { $if SSE >= 4: *output = (int8_t) _mm_extract_epi8(vout${ABC[0:8]}${ABC[0:8]}, 0); $else: *output = (int32_t) _mm_cvtsi128_si32(vout${ABC[0:8]}${ABC[0:8]}); } }${" while (channels != 0);" if CHANNEL_TILE > 8 else ""} } }