// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $assert NR % 4 == 0 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" #include #include #include $assert ACTIVATION in ["LINEAR", "RELU", "MINMAX"] $ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower()) $ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm" $PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION] void xnn_f32_gemm${"inc" if INC else ""}${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}s4__wasmsimd${ARCH_SUFFIX}( size_t mr, size_t nc, size_t kc, const float*restrict a, size_t a_stride, const float*restrict w, float*restrict c, size_t cm_stride, size_t cn_stride, $if INC: const float*restrict acc, const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)]) { assert(mr != 0); assert(mr <= ${MR}); assert(nc != 0); assert(kc != 0); assert(kc % sizeof(float) == 0); assert(a != NULL); assert(w != NULL); assert(c != NULL); $if INC: assert(acc != NULL); const float* a0 = a; float* c0 = c; $for M in range(1, MR): const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride); float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride); $if M % 2 == 0: if XNN_UNPREDICTABLE(mr <= ${M}) { a${M} = a${M-1}; c${M} = c${M-1}; } $elif M + 1 == MR: if XNN_UNPREDICTABLE(mr != ${M+1}) { a${M} = a${M-1}; c${M} = c${M-1}; } $else: if XNN_UNPREDICTABLE(mr < ${M+1}) { a${M} = a${M-1}; c${M} = c${M-1}; } $if ACTIVATION == "MINMAX" and not X86: const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min); const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max); do { $if INC: $for M in range(MR): $for N in range(0, NR, 4): v128_t vacc${M}x${ABC[N:N+4]} = wasm_v128_load(acc + ${M*NR+N}); acc += ${MR*NR}; $else: $for N in range(0, NR, 4): v128_t vacc0x${ABC[N:N+4]} = wasm_v128_load(w + ${N}); $for M in range(1, MR): $for N in range(0, NR, 4): v128_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]}; w += ${NR}; size_t k = kc; while (k >= 4 * sizeof(float)) { $for M in range(MR): v128_t va${M} = wasm_v128_load(a${M}); a${M} += 4; $for L in range(4): $for N in range(0, NR, 4): const v128_t vb${ABC[N:N+4]}c${L} = wasm_v128_load(w + ${L * NR + N}); $for N in range(0, NR, 4): $for M in range(MR): vacc${M}x${ABC[N:N+4]} = wasm_f32x4_add(vacc${M}x${ABC[N:N+4]}, wasm_f32x4_mul(va${M}, vb${ABC[N:N+4]}c${L})); $if L + 1 != 4: $for M in range(MR): va${M} = wasm_v32x4_shuffle(va${M}, va${M}, 1, 2, 3, 0); w += ${4 * NR}; k -= 4 * sizeof(float); } if XNN_UNLIKELY(k != 0) { do { $for M in range(MR): const v128_t va${M} = wasm_v32x4_load_splat(a${M}); a${M} += 1; const v128_t vb${ABC[0:4]} = wasm_v128_load(w); $for N in range(4, NR, 4): const v128_t vb${ABC[N:N+4]} = wasm_v128_load(w + ${N}); w += ${NR}; $for N in range(0, NR, 4): $for M in range(MR): vacc${M}x${ABC[N:N+4]} = wasm_f32x4_add(vacc${M}x${ABC[N:N+4]}, wasm_f32x4_mul(va${M}, vb${ABC[N:N+4]})); k -= sizeof(float); } while (k != 0); } $if ACTIVATION == "MINMAX": $if X86: const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min); $for N in range(0, NR, 4): $for M in range(MR): vacc${M}x${ABC[N:N+4]} = wasm_v128_bitselect(vmin, vacc${M}x${ABC[N:N+4]}, wasm_f32x4_lt(vacc${M}x${ABC[N:N+4]}, vmin)); const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max); $for N in range(0, NR, 4): $for M in range(MR): vacc${M}x${ABC[N:N+4]} = wasm_v128_bitselect(vacc${M}x${ABC[N:N+4]}, vmax, wasm_f32x4_le(vacc${M}x${ABC[N:N+4]}, vmax)); $else: $for N in range(0, NR, 4): $for M in range(MR): vacc${M}x${ABC[N:N+4]} = wasm_f32x4_max(vacc${M}x${ABC[N:N+4]}, vmin); $for N in range(0, NR, 4): $for M in range(MR): vacc${M}x${ABC[N:N+4]} = wasm_f32x4_min(vacc${M}x${ABC[N:N+4]}, vmax); $elif ACTIVATION == "RELU": const v128_t vzero = wasm_f32x4_splat(0.0f); $for N in range(0, NR, 4): $for M in range(MR): vacc${M}x${ABC[N:N+4]} = wasm_i32x4_max(vacc${M}x${ABC[N:N+4]}, vzero); if XNN_LIKELY(nc >= ${NR}) { $for M in reversed(range(MR)): wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]}); $for N in range(4, NR, 4): wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]}); c${M} = (float*) ((uintptr_t) c${M} + cn_stride); $for M in reversed(range(MR)): a${M} = (const float*) ((uintptr_t) a${M} - kc); nc -= ${NR}; } else { $for LOG2N in reversed(range(NR.bit_length())): $if NR != 1 << LOG2N: if (nc & ${1 << LOG2N}) { $if LOG2N >= 2: $for M in reversed(range(MR)): wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]}); $for N in range(4, 1 << LOG2N, 4): wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]}); $for M in reversed(range(MR)): $for N in range(0, 1 << (LOG2N - 1), 4): vacc${M}x${ABC[N:N+4]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+4]}; $for M in reversed(range(MR)): c${M} += ${1 << LOG2N}; $elif LOG2N == 1: $for M in reversed(range(MR)): *((double*) c${M}) = wasm_f64x2_extract_lane(vacc${M}x${ABC[0:4]}, 0); $for M in reversed(range(MR)): vacc${M}x${ABC[0:4]} = wasm_v32x4_shuffle(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]}, 2, 3, 2, 3); $for M in reversed(range(MR)): c${M} += 2; $elif LOG2N == 0: $for M in reversed(range(MR)): *c${M} = wasm_f32x4_extract_lane(vacc${M}x${ABC[0:4]}, 0); } nc = 0; } } while (nc != 0); }