1// Copyright 2021 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert REQUANTIZATION == "FP32" 7$assert DATATYPE in ["QC8", "QS8", "QU8"] 8$assert VARIANT in ["LD64", "LD128"] 9$assert MR <= 4 10#include <assert.h> 11 12#include <wasm_simd128.h> 13 14#include <xnnpack/igemm.h> 15#include <xnnpack/math.h> 16 17 18$PARAMS_UNION = "xnn_qs8_minmax_params" if DATATYPE == "QC8" else "xnn_%s_conv_minmax_params" % DATATYPE.lower() 19$PARAMS_STRUCT = "wasmsimd" if DATATYPE == "QC8" else "fp32_wasmsimd" 20$XINT8_T = "uint8_t" if DATATYPE == "QU8" else "int8_t" 21$WASM_X16X8_LOAD8X8 = "wasm_u16x8_load8x8" if DATATYPE == "QU8" else "wasm_i16x8_load8x8" 22$WASM_X8X16_NARROW_I16X8 = "wasm_u8x16_narrow_i16x8" if DATATYPE == "QU8" else "wasm_i8x16_narrow_i16x8" 23$WASM_X8X16_MIN = "wasm_u8x16_min" if DATATYPE == "QU8" else "wasm_i8x16_min" 24void xnn_${DATATYPE.lower()}_igemm_minmax_fp32_ukernel_${MR}x4c2__wasmsimd_dot16x2_${VARIANT.lower()}( 25 size_t mr, 26 size_t nc, 27 size_t kc, 28 size_t ks, 29 const ${XINT8_T}** restrict a, 30 const void* restrict w, 31 ${XINT8_T}* restrict c, 32 size_t cm_stride, 33 size_t cn_stride, 34 size_t a_offset, 35 const ${XINT8_T}* zero, 36 const union ${PARAMS_UNION} params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS 37{ 38 assert(mr != 0); 39 assert(mr <= ${MR}); 40 assert(nc != 0); 41 assert(kc != 0); 42 assert(ks != 0); 43 assert(ks % (${MR} * sizeof(void*)) == 0); 44 assert(a_offset % sizeof(${XINT8_T}) == 0); 45 assert(a != NULL); 46 assert(w != NULL); 47 assert(c != NULL); 48 49 kc = round_up_po2(kc, 2); 50 ${XINT8_T}* c0 = c; 51 $for M in range(1, MR): 52 ${XINT8_T}* c${M} = (${XINT8_T}*) ((uintptr_t) c${M-1} + cm_stride); 53 $if M % 2 == 0: 54 if XNN_UNPREDICTABLE(mr <= ${M}) { 55 c${M} = c${M-1}; 56 } 57 $elif M + 1 == MR: 58 if XNN_UNPREDICTABLE(mr != ${M+1}) { 59 c${M} = c${M-1}; 60 } 61 $else: 62 if XNN_UNPREDICTABLE(mr < ${M+1}) { 63 c${M} = c${M-1}; 64 } 65 66 do { 67 v128_t vacc0x0123 = wasm_v128_load(w); 68 $for M in range(1, MR): 69 v128_t vacc${M}x0123 = vacc0x0123; 70 w = (const void*) ((const int32_t*) w + 4); 71 72 size_t p = ks; 73 do { 74 $for M in range(MR): 75 const ${XINT8_T}* restrict a${M} = a[${M}]; 76 if XNN_UNPREDICTABLE(a${M} != zero) { 77 a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + a_offset); 78 } 79 a += ${MR}; 80 81 size_t k = kc; 82 $if DATATYPE == "QU8": 83 const v128_t vb_zero_point = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.kernel_zero_point); 84 while (k >= 8 * sizeof(${XINT8_T})) { 85 $for M in range(MR): 86 const v128_t vxa${M} = ${WASM_X16X8_LOAD8X8}(a${M}); 87 a${M} += 8; 88 89 $if VARIANT == "LD128": 90 $for K in range(0, 4, 2): 91 $if K == 0: 92 const v128_t vb${K}${K+1} = wasm_v128_load(w); 93 $else: 94 const v128_t vb${K}${K+1} = wasm_v128_load((const ${XINT8_T}*) w + ${K * 8}); 95 $if DATATYPE == "QU8": 96 const v128_t vxb${K} = wasm_i16x8_sub(wasm_u16x8_extend_low_u8x16(vb${K}${K+1}), vb_zero_point); 97 const v128_t vxb${K+1} = wasm_i16x8_sub(wasm_u16x8_extend_high_u8x16(vb${K}${K+1}), vb_zero_point); 98 $else: 99 const v128_t vxb${K} = wasm_i16x8_extend_low_i8x16(vb${K}${K+1}); 100 const v128_t vxb${K+1} = wasm_i16x8_extend_high_i8x16(vb${K}${K+1}); 101 102 $for M in range(MR): 103 vacc${M}x0123 = wasm_i32x4_add(vacc${M}x0123, 104 wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa${M}, vxa${M}, ${K}, ${K}, ${K}, ${K}), vxb${K})); 105 106 $for M in range(MR): 107 vacc${M}x0123 = wasm_i32x4_add(vacc${M}x0123, 108 wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa${M}, vxa${M}, ${K+1}, ${K+1}, ${K+1}, ${K+1}), vxb${K+1})); 109 $else: 110 $for K in range(4): 111 $if K == 0: 112 $if DATATYPE == "QU8": 113 const v128_t vxb${K} = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point); 114 $else: 115 const v128_t vxb${K} = wasm_i16x8_load8x8(w); 116 $else: 117 $if DATATYPE == "QU8": 118 const v128_t vxb${K} = wasm_i16x8_sub(wasm_u16x8_load8x8((const ${XINT8_T}*) w + ${K * 8}), vb_zero_point); 119 $else: 120 const v128_t vxb${K} = wasm_i16x8_load8x8((const ${XINT8_T}*) w + ${K * 8}); 121 122 $for M in range(MR): 123 vacc${M}x0123 = wasm_i32x4_add(vacc${M}x0123, 124 wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa${M}, vxa${M}, ${K}, ${K}, ${K}, ${K}), vxb${K})); 125 126 w = (const void*) ((const ${XINT8_T}*) w + 32); 127 k -= 8 * sizeof(${XINT8_T}); 128 } 129 if (k != 0) { 130 $for M in range(MR): 131 const v128_t vxa${M} = ${WASM_X16X8_LOAD8X8}(a${M}); 132 a${M} = (const ${XINT8_T}*) ((uintptr_t) a${M} + k); 133 134 $if DATATYPE == "QU8": 135 const v128_t vxb0 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point); 136 $else: 137 const v128_t vxb0 = wasm_i16x8_load8x8(w); 138 w = (const void*) ((const ${XINT8_T}*) w + 8); 139 140 $for M in range(MR): 141 vacc${M}x0123 = wasm_i32x4_add(vacc${M}x0123, 142 wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa${M}, vxa${M}, 0, 0, 0, 0), vxb0)); 143 144 if (k > 2 * sizeof(${XINT8_T})) { 145 $if DATATYPE == "QU8": 146 const v128_t vxb1 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point); 147 $else: 148 const v128_t vxb1 = wasm_i16x8_load8x8(w); 149 w = (const void*) ((const ${XINT8_T}*) w + 8); 150 151 $for M in range(MR): 152 vacc${M}x0123 = wasm_i32x4_add(vacc${M}x0123, 153 wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa${M}, vxa${M}, 1, 1, 1, 1), vxb1)); 154 155 if (k > 4 * sizeof(${XINT8_T})) { 156 $if DATATYPE == "QU8": 157 const v128_t vxb2 = wasm_i16x8_sub(wasm_u16x8_load8x8(w), vb_zero_point); 158 $else: 159 const v128_t vxb2 = wasm_i16x8_load8x8(w); 160 w = (const void*) ((const ${XINT8_T}*) w + 8); 161 162 $for M in range(MR): 163 vacc${M}x0123 = wasm_i32x4_add(vacc${M}x0123, 164 wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa${M}, vxa${M}, 2, 2, 2, 2), vxb2)); 165 } 166 } 167 } 168 p -= ${MR} * sizeof(void*); 169 } while (p != 0); 170 171 $for M in range(MR): 172 vacc${M}x0123 = wasm_f32x4_convert_i32x4(vacc${M}x0123); 173 174 $if DATATYPE == "QC8": 175 const v128_t vscale0123 = wasm_v128_load(w); 176 w = (const void*) ((const float*) w + 4); 177 $for M in range(MR): 178 vacc${M}x0123 = wasm_f32x4_mul(vacc${M}x0123, vscale0123); 179 $else: 180 const v128_t vscale = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.scale); 181 $for M in range(MR): 182 vacc${M}x0123 = wasm_f32x4_mul(vacc${M}x0123, vscale); 183 184 const v128_t vmagic_bias = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_bias); 185 $for M in range(MR): 186 vacc${M}x0123 = wasm_f32x4_add(vacc${M}x0123, vmagic_bias); 187 188 const v128_t vmagic_min = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_min); 189 $for M in range(MR): 190 vacc${M}x0123 = wasm_i32x4_max(vacc${M}x0123, vmagic_min); 191 192 const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.magic_bias_less_output_zero_point); 193 $for M in range(MR): 194 vacc${M}x0123 = wasm_i32x4_sub(vacc${M}x0123, vmagic_bias_less_output_zero_point); 195 196 $for M in range(0, MR, 2): 197 v128_t vacc${M}${min(M+1, MR-1)}x0123 = wasm_i16x8_narrow_i32x4(vacc${M}x0123, vacc${min(M+1, MR-1)}x0123); 198 199 $if MR > 2: 200 v128_t vout = ${WASM_X8X16_NARROW_I16X8}(vacc0${min(1, MR-1)}x0123, vacc${min(2, MR-1)}${min(3, MR-1)}x0123); 201 $else: 202 v128_t vout = ${WASM_X8X16_NARROW_I16X8}(vacc0${min(1, MR-1)}x0123, vacc0${min(1, MR-1)}x0123); 203 204 const v128_t voutput_max = wasm_v128_load64_splat(params->${PARAMS_STRUCT}.output_max); 205 vout = ${WASM_X8X16_MIN}(vout, voutput_max); 206 207 if (nc >= 4) { 208 $for M in reversed(range(MR)): 209 *((float*) c${M}) = (float) wasm_f32x4_extract_lane(vout, ${M}); 210 211 $for M in reversed(range(MR)): 212 c${M} = (${XINT8_T}*) ((uintptr_t) c${M} + cn_stride); 213 214 a = (const ${XINT8_T}**restrict) ((uintptr_t) a - ks); 215 216 nc -= 4; 217 } else { 218 $for M in reversed(range(MR)): 219 uint32_t vout${M} = wasm_i32x4_extract_lane(vout, ${M}); 220 if (nc & 2) { 221 $for M in reversed(range(MR)): 222 *((uint16_t*) c${M}) = (uint16_t) vout${M}; 223 vout${M} >>= 16; 224 c${M} += 2; 225 } 226 if (nc & 1) { 227 $for M in reversed(range(MR)): 228 *c${M} = (${XINT8_T}) vout${M}; 229 } 230 231 nc = 0; 232 } 233 } while (nc != 0); 234} 235