1// Copyright 2020 Google LLC 2// 3// This source code is licensed under the BSD-style license found in the 4// LICENSE file in the root directory of this source tree. 5 6$assert NR % 4 == 0 7$assert ACTIVATION != "MINMAX" or ARCH in ["ARM", "X86", "RELAXED"] 8$assert not FMA or ARCH == "RELAXED" 9$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" 10#include <assert.h> 11 12#include <wasm_simd128.h> 13 14#include <xnnpack/igemm.h> 15 16 17$assert ACTIVATION in ["LINEAR", "RELU", "MINMAX"] 18$if ACTIVATION == "MINMAX": 19$ WASM_F32X4_MIN={"ARM": "wasm_f32x4_min", "X86": "wasm_f32x4_pmin", "RELAXED": "__builtin_wasm_relaxed_min_f32x4"}[ARCH] 20$ WASM_F32X4_MAX={"ARM": "wasm_f32x4_max", "X86": "wasm_f32x4_pmax", "RELAXED": "__builtin_wasm_relaxed_max_f32x4"}[ARCH] 21$ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower()) 22$ISA = "wasmsimd" if not FMA and (ACTIVATION in ["LINEAR", "RELU"] or ARCH != "RELAXED") else "wasmrelaxedsimd" 23$ARCH_SUFFIX = "" if not FMA and (ACTIVATION in ["LINEAR", "RELU"] or ARCH == "RELAXED") else "_" + ("fma" if FMA else ARCH.lower()) 24$PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION] 25void xnn_f32_igemm${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}__${ISA}${ARCH_SUFFIX}_loadsplat( 26 size_t mr, 27 size_t nc, 28 size_t kc, 29 size_t ks, 30 const float**restrict a, 31 const float*restrict w, 32 float*restrict c, 33 size_t cm_stride, 34 size_t cn_stride, 35 size_t a_offset, 36 const float* zero, 37 const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) 38{ 39 assert(mr != 0); 40 assert(mr <= ${MR}); 41 assert(nc != 0); 42 assert(kc != 0); 43 assert(kc % sizeof(float) == 0); 44 assert(ks != 0); 45 assert(ks % (${MR} * sizeof(void*)) == 0); 46 assert(a_offset % sizeof(float) == 0); 47 assert(a != NULL); 48 assert(w != NULL); 49 assert(c != NULL); 50 51 float* c0 = c; 52 $for M in range(1, MR): 53 float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride); 54 $if M % 2 == 0: 55 if XNN_UNPREDICTABLE(mr <= ${M}) { 56 c${M} = c${M-1}; 57 } 58 $elif M + 1 == MR: 59 if XNN_UNPREDICTABLE(mr != ${M+1}) { 60 c${M} = c${M-1}; 61 } 62 $else: 63 if XNN_UNPREDICTABLE(mr < ${M+1}) { 64 c${M} = c${M-1}; 65 } 66 67 $if ACTIVATION == "MINMAX": 68 const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min); 69 const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max); 70 do { 71 v128_t vacc0x${ABC[0:4]} = wasm_v128_load(w); 72 $for N in range(4, NR, 4): 73 v128_t vacc0x${ABC[N:N+4]} = wasm_v128_load(w + ${N}); 74 $for M in range(1, MR): 75 $for N in range(0, NR, 4): 76 v128_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]}; 77 w += ${NR}; 78 79 size_t p = ks; 80 do { 81 $for M in range(MR): 82 const float* restrict a${M} = a[${M}]; 83 assert(a${M} != NULL); 84 if XNN_UNPREDICTABLE(a${M} != zero) { 85 a${M} = (const float*) ((uintptr_t) a${M} + a_offset); 86 } 87 a += ${MR}; 88 89 size_t k = kc; 90 do { 91 const v128_t vb${ABC[0:4]} = wasm_v128_load(w); 92 $for N in range(4, NR, 4): 93 const v128_t vb${ABC[N:N+4]} = wasm_v128_load(w + ${N}); 94 w += ${NR}; 95 96 $for M in range(MR): 97 const v128_t va${M} = wasm_v128_load32_splat(a${M}); 98 a${M} += 1; 99 100 $for M in range(MR): 101 $for N in range(0, NR, 4): 102 $if FMA: 103 vacc${M}x${ABC[N:N+4]} = __builtin_wasm_fma_f32x4(vacc${M}x${ABC[N:N+4]}, va${M}, vb${ABC[N:N+4]}); 104 $else: 105 vacc${M}x${ABC[N:N+4]} = wasm_f32x4_add(vacc${M}x${ABC[N:N+4]}, wasm_f32x4_mul(va${M}, vb${ABC[N:N+4]})); 106 k -= sizeof(float); 107 } while (k != 0); 108 p -= ${MR} * sizeof(void*); 109 } while (p != 0); 110 111 $if ACTIVATION == "MINMAX": 112 $for N in range(0, NR, 4): 113 $for M in range(MR): 114 vacc${M}x${ABC[N:N+4]} = ${WASM_F32X4_MAX}(vmin, vacc${M}x${ABC[N:N+4]}); 115 116 $for N in range(0, NR, 4): 117 $for M in range(MR): 118 vacc${M}x${ABC[N:N+4]} = ${WASM_F32X4_MIN}(vmax, vacc${M}x${ABC[N:N+4]}); 119 $elif ACTIVATION == "RELU": 120 const v128_t vzero = wasm_i32x4_const_splat(0); 121 $for N in range(0, NR, 4): 122 $for M in range(MR): 123 vacc${M}x${ABC[N:N+4]} = wasm_i32x4_max(vacc${M}x${ABC[N:N+4]}, vzero); 124 125 if XNN_LIKELY(nc >= ${NR}) { 126 $for M in reversed(range(MR)): 127 wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]}); 128 $for N in range(4, NR, 4): 129 wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]}); 130 c${M} = (float*) ((uintptr_t) c${M} + cn_stride); 131 132 a = (const float**restrict) ((uintptr_t) a - ks); 133 nc -= ${NR}; 134 } else { 135 $for LOG2N in reversed(range(NR.bit_length())): 136 $if NR != 1 << LOG2N: 137 if (nc & ${1 << LOG2N}) { 138 $if LOG2N >= 2: 139 $for M in reversed(range(MR)): 140 wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]}); 141 $for N in range(4, 1 << LOG2N, 4): 142 wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]}); 143 144 $for M in reversed(range(MR)): 145 $for N in range(0, 1 << (LOG2N - 1), 4): 146 vacc${M}x${ABC[N:N+4]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+4]}; 147 148 $for M in reversed(range(MR)): 149 c${M} += ${1 << LOG2N}; 150 $elif LOG2N == 1: 151 $for M in reversed(range(MR)): 152 *((double*) c${M}) = wasm_f64x2_extract_lane(vacc${M}x${ABC[0:4]}, 0); 153 154 $for M in reversed(range(MR)): 155 vacc${M}x${ABC[0:4]} = wasm_v32x4_shuffle(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]}, 2, 3, 2, 3); 156 157 $for M in reversed(range(MR)): 158 c${M} += 2; 159 $elif LOG2N == 0: 160 $for M in reversed(range(MR)): 161 *c${M} = wasm_f32x4_extract_lane(vacc${M}x${ABC[0:4]}, 0); 162 } 163 164 nc = 0; 165 } 166 } while (nc != 0); 167} 168