1// Copyright 2019 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert BATCH_TILE % 16 == 0 7$assert BATCH_TILE >= 16 8$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 9$assert OP in ["ADD", "DIV", "MAX", "MIN", "MUL", "SUB", "SQRDIFF"] 10$assert ACTIVATION in ["LINEAR", "MINMAX"] 11#include <assert.h> 12 13#include <immintrin.h> 14 15#include <xnnpack/common.h> 16#include <xnnpack/intrinsics-polyfill.h> 17#include <xnnpack/vbinary.h> 18 19 20$_MM512_OP_PS = { 21$ "ADD": lambda x, y: "_mm512_add_ps(%s, %s)" % (x, y), 22$ "DIV": lambda x, y: "_mm512_div_ps(%s, %s)" % (x, y), 23$ "MAX": lambda x, y: "_mm512_max_ps(%s, %s)" % (x, y), 24$ "MIN": lambda x, y: "_mm512_min_ps(%s, %s)" % (x, y), 25$ "MUL": lambda x, y: "_mm512_mul_ps(%s, %s)" % (x, y), 26$ "SUB": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y), 27$ "SQRDIFF": lambda x, y: "_mm512_sub_ps(%s, %s)" % (x, y), 28$}[OP] 29$SUFFIX = {"LINEAR": "", "MINMAX": "_minmax"}[ACTIVATION] 30$PARAMS = {"LINEAR": "xnn_f32_default_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION] 31void xnn_f32_v${OP.lower()}${SUFFIX}_ukernel__avx512f_x${BATCH_TILE}( 32 size_t n, 33 const float* a, 34 const float* b, 35 float* y, 36 const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) 37{ 38 assert(n != 0); 39 assert(n % sizeof(float) == 0); 40 assert(a != NULL); 41 assert(b != NULL); 42 assert(y != NULL); 43 44 $if ACTIVATION == "MINMAX": 45 const __m512 vy_min = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.min)); 46 const __m512 vy_max = _mm512_broadcast_f32x4(_mm_load_ps(params->sse.max)); 47 48 for (; n >= ${BATCH_TILE} * sizeof(float); n -= ${BATCH_TILE} * sizeof(float)) { 49 const __m512 va${ABC[0:16]} = _mm512_loadu_ps(a); 50 $for N in range(16, BATCH_TILE, 16): 51 const __m512 va${ABC[N:N+16]} = _mm512_loadu_ps(a + ${N}); 52 a += ${BATCH_TILE}; 53 54 const __m512 vb${ABC[0:16]} = _mm512_loadu_ps(b); 55 $for N in range(16, BATCH_TILE, 16): 56 const __m512 vb${ABC[N:N+16]} = _mm512_loadu_ps(b + ${N}); 57 b += ${BATCH_TILE}; 58 59 $for N in range(0, BATCH_TILE, 16): 60 __m512 vy${ABC[N:N+16]} = ${_MM512_OP_PS("va" + ABC[N:N+16], "vb" + ABC[N:N+16])}; 61 62 $if OP == "SQRDIFF": 63 $for N in range(0, BATCH_TILE, 16): 64 vy${ABC[N:N+16]} = _mm512_mul_ps(vy${ABC[N:N+16]}, vy${ABC[N:N+16]}); 65 66 $if ACTIVATION == "MINMAX": 67 $for N in range(0, BATCH_TILE, 16): 68 vy${ABC[N:N+16]} = _mm512_max_ps(vy${ABC[N:N+16]}, vy_min); 69 70 $for N in range(0, BATCH_TILE, 16): 71 vy${ABC[N:N+16]} = _mm512_min_ps(vy${ABC[N:N+16]}, vy_max); 72 73 _mm512_storeu_ps(y, vy${ABC[0:16]}); 74 $for N in range(16, BATCH_TILE, 16): 75 _mm512_storeu_ps(y + ${N}, vy${ABC[N:N+16]}); 76 y += ${BATCH_TILE}; 77 } 78 $if BATCH_TILE > 16: 79 for (; n >= 16 * sizeof(float); n -= 16 * sizeof(float)) { 80 const __m512 va = _mm512_loadu_ps(a); 81 a += 16; 82 83 const __m512 vb = _mm512_loadu_ps(b); 84 b += 16; 85 86 __m512 vy = ${_MM512_OP_PS("va", "vb")}; 87 $if OP == "SQRDIFF": 88 vy = _mm512_mul_ps(vy, vy); 89 $if ACTIVATION == "MINMAX": 90 vy = _mm512_max_ps(vy, vy_min); 91 vy = _mm512_min_ps(vy, vy_max); 92 _mm512_storeu_ps(y, vy); 93 y += 16; 94 } 95 if XNN_UNLIKELY(n != 0) { 96 assert(n >= 1 * sizeof(float)); 97 assert(n <= 15 * sizeof(float)); 98 // Prepare mask for valid 32-bit elements (depends on n). 99 n >>= 2 /* log2(sizeof(float)) */; 100 const __mmask16 vmask = _cvtu32_mask16((uint16_t) ((uint32_t) (UINT32_C(1) << n) - UINT32_C(1))); 101 102 const __m512 va = _mm512_maskz_loadu_ps(vmask, a); 103 const __m512 vb = _mm512_maskz_loadu_ps(vmask, b); 104 105 __m512 vy = ${_MM512_OP_PS("va", "vb")}; 106 $if OP == "SQRDIFF": 107 vy = _mm512_mul_ps(vy, vy); 108 $if ACTIVATION == "MINMAX": 109 vy = _mm512_max_ps(vy, vy_min); 110 vy = _mm512_min_ps(vy, vy_max); 111 _mm512_mask_storeu_ps(y, vmask, vy); 112 } 113} 114