// Copyright 2020 Google LLC // // This source code is licensed under the BSD-style license found in the // LICENSE file in the root directory of this source tree. $assert CHANNEL_TILE % 4 == 0 $assert CHANNEL_TILE >= 4 $assert ROW_TILE >= 1 $ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ" #include #include #include #include void xnn_f32_vmulcaddc_minmax_ukernel_c${CHANNEL_TILE}__wasmsimd_${"x86" if X86 else "arm"}_${ROW_TILE}x( size_t rows, size_t channels, const float*restrict input, size_t input_stride, const float*restrict weights, float*restrict output, size_t output_stride, const union xnn_f32_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_DISABLE_TSAN { assert(rows != 0); assert(channels != 0); assert(channels % sizeof(float) == 0); const float* i0 = input; float* o0 = output; $for M in range(1, ROW_TILE): const float* i${M} = (const float*) ((uintptr_t) i${M-1} + input_stride); float* o${M} = (float*) ((uintptr_t) o${M-1} + output_stride); $if M % 2 == 0: if XNN_UNPREDICTABLE(rows <= ${M}) { i${M} = i${M-1}; o${M} = o${M-1}; } $else: if XNN_UNPREDICTABLE(rows < ${M+1}) { i${M} = i${M-1}; o${M} = o${M-1}; } const size_t input_increment = input_stride * ${ROW_TILE} - channels; const size_t output_increment = output_stride * ${ROW_TILE} - channels; const v128_t vmin = wasm_v32x4_load_splat(¶ms->scalar.min); const v128_t vmax = wasm_v32x4_load_splat(¶ms->scalar.max); do { const float* w = weights; size_t c = channels; for (; c >= ${CHANNEL_TILE} * sizeof(float); c -= ${CHANNEL_TILE} * sizeof(float)) { const v128_t vscale${ABC[0:4]} = wasm_v128_load(w); $for C in range(4, CHANNEL_TILE, 4): const v128_t vscale${ABC[C:C+4]} = wasm_v128_load(w + ${C}); $for M in range(ROW_TILE): v128_t vacc${M}x${ABC[0:4]} = wasm_v128_load(i${M}); $for C in range(4, CHANNEL_TILE, 4): v128_t vacc${M}x${ABC[C:C+4]} = wasm_v128_load(i${M} + ${C}); i${M} += ${CHANNEL_TILE}; $for C in range(0, CHANNEL_TILE, 4): const v128_t vbias${ABC[C:C+4]} = wasm_v128_load(w + ${C + CHANNEL_TILE}); $for M in range(ROW_TILE): $for C in range(0, CHANNEL_TILE, 4): vacc${M}x${ABC[C:C+4]} = wasm_f32x4_add(vbias${ABC[C:C+4]}, wasm_f32x4_mul(vscale${ABC[C:C+4]}, vacc${M}x${ABC[C:C+4]})); $if X86: $for M in range(ROW_TILE): $for C in range(0, CHANNEL_TILE, 4): vacc${M}x${ABC[C:C+4]} = wasm_v128_bitselect(vmin, vacc${M}x${ABC[C:C+4]}, wasm_f32x4_lt(vacc${M}x${ABC[C:C+4]}, vmin)); $for M in range(ROW_TILE): $for C in range(0, CHANNEL_TILE, 4): vacc${M}x${ABC[C:C+4]} = wasm_v128_bitselect(vacc${M}x${ABC[C:C+4]}, vmax, wasm_f32x4_le(vacc${M}x${ABC[C:C+4]}, vmax)); $else: $for M in range(ROW_TILE): $for C in range(0, CHANNEL_TILE, 4): vacc${M}x${ABC[C:C+4]} = wasm_f32x4_max(vacc${M}x${ABC[C:C+4]}, vmin); $for M in range(ROW_TILE): $for C in range(0, CHANNEL_TILE, 4): vacc${M}x${ABC[C:C+4]} = wasm_f32x4_min(vacc${M}x${ABC[C:C+4]}, vmax); $for M in range(ROW_TILE): wasm_v128_store(o${M}, vacc${M}x${ABC[0:4]}); $for C in range(4, CHANNEL_TILE, 4): wasm_v128_store(o${M} + ${C}, vacc${M}x${ABC[C:C+4]}); o${M} += ${CHANNEL_TILE}; w += ${CHANNEL_TILE * 2}; } $if CHANNEL_TILE > 4: for (; c >= 4 * sizeof(float); c -= 4 * sizeof(float)) { const v128_t vscale = wasm_v128_load(w); $for M in range(ROW_TILE): v128_t vacc${M} = wasm_v128_load(i${M}); i${M} += 4; const v128_t vbias = wasm_v128_load(w + ${CHANNEL_TILE}); $for M in range(ROW_TILE): vacc${M} = wasm_f32x4_add(vbias, wasm_f32x4_mul(vscale, vacc${M})); $if X86: $for M in range(ROW_TILE): vacc${M} = wasm_v128_bitselect(vmin, vacc${M}, wasm_f32x4_lt(vacc${M}, vmin)); $for M in range(ROW_TILE): vacc${M} = wasm_v128_bitselect(vacc${M}, vmax, wasm_f32x4_le(vacc${M}, vmax)); $else: $for M in range(ROW_TILE): vacc${M} = wasm_f32x4_max(vacc${M}, vmin); $for M in range(ROW_TILE): vacc${M} = wasm_f32x4_min(vacc${M}, vmax); $for M in range(ROW_TILE): wasm_v128_store(o${M}, vacc${M}); o${M} += 4; w += 4; } if XNN_UNLIKELY(c != 0) { const v128_t vscale = wasm_v128_load(w); $for M in range(ROW_TILE): v128_t vacc${M} = wasm_v128_load(i${M}); i${M} = (const float*) ((uintptr_t) i${M} + c); const v128_t vbias = wasm_v128_load(w + ${CHANNEL_TILE}); $for M in range(ROW_TILE): vacc${M} = wasm_f32x4_add(vbias, wasm_f32x4_mul(vscale, vacc${M})); $if X86: $for M in range(ROW_TILE): vacc${M} = wasm_v128_bitselect(vmin, vacc${M}, wasm_f32x4_lt(vacc${M}, vmin)); $for M in range(ROW_TILE): vacc${M} = wasm_v128_bitselect(vacc${M}, vmax, wasm_f32x4_le(vacc${M}, vmax)); $else: $for M in range(ROW_TILE): vacc${M} = wasm_f32x4_max(vacc${M}, vmin); $for M in range(ROW_TILE): vacc${M} = wasm_f32x4_min(vacc${M}, vmax); if (c & (2 * sizeof(float))) { $for M in range(ROW_TILE): *((double*) o${M}) = wasm_f64x2_extract_lane(vacc${M}, 0); $for M in range(ROW_TILE): vacc${M} = wasm_v32x4_shuffle(vacc${M}, vacc${M}, 2, 3, 2, 3); $for M in range(ROW_TILE): o${M} += 2; } if (c & (1 * sizeof(float))) { $for M in range(ROW_TILE): *o${M}++ = wasm_f32x4_extract_lane(vacc${M}, 0); } } $for M in range(ROW_TILE): i${M} = (const float*) ((uintptr_t) i${M} + input_increment); o${M} = (float*) ((uintptr_t) o${M} + output_increment); $if M % 2 == 1: if XNN_UNPREDICTABLE(rows < ${ROW_TILE + M + 1}) { i${M} = i${M-1}; o${M} = o${M-1}; } $elif M != 0: if XNN_UNPREDICTABLE(rows <= ${ROW_TILE + M}) { i${M} = i${M-1}; o${M} = o${M-1}; } rows = doz(rows, ${ROW_TILE}); } while (rows != 0); }