• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1// Copyright 2020 Google LLC
2//
3// This source code is licensed under the BSD-style license found in the
4// LICENSE file in the root directory of this source tree.
5
6$assert NR % 4 == 0
7$ABC = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"
8#include <assert.h>
9
10#include <wasm_simd128.h>
11
12#include <xnnpack/gemm.h>
13
14
15$assert ACTIVATION in ["LINEAR", "RELU", "MINMAX"]
16$ACTIVATION_SUFFIX = {"LINEAR": ""}.get(ACTIVATION, "_" + ACTIVATION.lower())
17$ARCH_SUFFIX = "" if ACTIVATION in ["LINEAR", "RELU"] else "_x86" if X86 else "_arm"
18$PARAMS = {"LINEAR": "xnn_f32_default_params", "RELU": "xnn_f32_relu_params", "MINMAX": "xnn_f32_minmax_params"}[ACTIVATION]
19void xnn_f32_gemm${"inc" if INC else ""}${ACTIVATION_SUFFIX}_ukernel_${MR}x${NR}s4__wasmsimd${ARCH_SUFFIX}(
20    size_t mr,
21    size_t nc,
22    size_t kc,
23    const float*restrict a,
24    size_t a_stride,
25    const float*restrict w,
26    float*restrict c,
27    size_t cm_stride,
28    size_t cn_stride,
29    $if INC:
30      const float*restrict acc,
31    const union ${PARAMS} params[restrict XNN_MIN_ELEMENTS(1)])
32{
33  assert(mr != 0);
34  assert(mr <= ${MR});
35  assert(nc != 0);
36  assert(kc != 0);
37  assert(kc % sizeof(float) == 0);
38  assert(a != NULL);
39  assert(w != NULL);
40  assert(c != NULL);
41  $if INC:
42    assert(acc != NULL);
43
44  const float* a0 = a;
45  float* c0 = c;
46  $for M in range(1, MR):
47    const float* a${M} = (const float*) ((uintptr_t) a${M-1} + a_stride);
48    float* c${M} = (float*) ((uintptr_t) c${M-1} + cm_stride);
49    $if M % 2 == 0:
50      if XNN_UNPREDICTABLE(mr <= ${M}) {
51        a${M} = a${M-1};
52        c${M} = c${M-1};
53      }
54    $elif M + 1 == MR:
55      if XNN_UNPREDICTABLE(mr != ${M+1}) {
56        a${M} = a${M-1};
57        c${M} = c${M-1};
58      }
59    $else:
60      if XNN_UNPREDICTABLE(mr < ${M+1}) {
61        a${M} = a${M-1};
62        c${M} = c${M-1};
63      }
64
65  $if ACTIVATION == "MINMAX" and not X86:
66    const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
67    const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
68  do {
69    $if INC:
70      $for M in range(MR):
71        $for N in range(0, NR, 4):
72          v128_t vacc${M}x${ABC[N:N+4]} = wasm_v128_load(acc + ${M*NR+N});
73      acc += ${MR*NR};
74    $else:
75      $for N in range(0, NR, 4):
76        v128_t vacc0x${ABC[N:N+4]} = wasm_v128_load(w + ${N});
77      $for M in range(1, MR):
78        $for N in range(0, NR, 4):
79          v128_t vacc${M}x${ABC[N:N+4]} = vacc0x${ABC[N:N+4]};
80      w += ${NR};
81
82    size_t k = kc;
83    while (k >= 4 * sizeof(float)) {
84      $for M in range(MR):
85        v128_t va${M} = wasm_v128_load(a${M});
86        a${M} += 4;
87
88      $for L in range(4):
89
90        $for N in range(0, NR, 4):
91          const v128_t vb${ABC[N:N+4]}c${L} = wasm_v128_load(w + ${L * NR + N});
92
93        $for N in range(0, NR, 4):
94          $for M in range(MR):
95            vacc${M}x${ABC[N:N+4]} = wasm_f32x4_add(vacc${M}x${ABC[N:N+4]}, wasm_f32x4_mul(va${M}, vb${ABC[N:N+4]}c${L}));
96
97        $if L + 1 != 4:
98          $for M in range(MR):
99            va${M} = wasm_v32x4_shuffle(va${M}, va${M}, 1, 2, 3, 0);
100
101      w += ${4 * NR};
102      k -= 4 * sizeof(float);
103    }
104    if XNN_UNLIKELY(k != 0) {
105      $for M in range(MR):
106        v128_t va${M} = wasm_v128_load(a${M});
107        a${M} = (const float*) ((uintptr_t) a${M} + k);
108
109      const v128_t vzero = wasm_f32x4_const_splat(0.0f);
110      $for L in range(4):
111
112        $for N in range(0, NR, 4):
113          const v128_t vb${ABC[N:N+4]}c${L} = wasm_v128_load(w + ${L * NR + N});
114
115        $for N in range(0, NR, 4):
116          $for M in range(MR):
117            vacc${M}x${ABC[N:N+4]} = wasm_f32x4_add(vacc${M}x${ABC[N:N+4]}, wasm_f32x4_mul(wasm_v128_andnot(va${M}, wasm_f32x4_eq(vb${ABC[N:N+4]}c${L}, vzero)), vb${ABC[N:N+4]}c${L}));
118
119        $if L + 1 != 4:
120          $for M in range(MR):
121            va${M} = wasm_v32x4_shuffle(va${M}, va${M}, 1, 2, 3, 0);
122
123      w += ${4 * NR};
124    }
125
126    $if ACTIVATION == "MINMAX":
127      $if X86:
128        const v128_t vmin = wasm_v128_load64_splat(params->wasmsimd.min);
129        $for N in range(0, NR, 4):
130          $for M in range(MR):
131            vacc${M}x${ABC[N:N+4]} = wasm_f32x4_pmax(vmin, vacc${M}x${ABC[N:N+4]});
132
133        const v128_t vmax = wasm_v128_load64_splat(params->wasmsimd.max);
134        $for N in range(0, NR, 4):
135          $for M in range(MR):
136            vacc${M}x${ABC[N:N+4]} = wasm_f32x4_pmin(vmax, vacc${M}x${ABC[N:N+4]});
137      $else:
138        $for N in range(0, NR, 4):
139          $for M in range(MR):
140            vacc${M}x${ABC[N:N+4]} = wasm_f32x4_max(vacc${M}x${ABC[N:N+4]}, vmin);
141
142        $for N in range(0, NR, 4):
143          $for M in range(MR):
144            vacc${M}x${ABC[N:N+4]} = wasm_f32x4_min(vacc${M}x${ABC[N:N+4]}, vmax);
145    $elif ACTIVATION == "RELU":
146      const v128_t vzero = wasm_i32x4_const_splat(0);
147      $for N in range(0, NR, 4):
148        $for M in range(MR):
149          vacc${M}x${ABC[N:N+4]} = wasm_i32x4_max(vacc${M}x${ABC[N:N+4]}, vzero);
150
151    if XNN_LIKELY(nc >= ${NR}) {
152      $for M in reversed(range(MR)):
153        wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]});
154        $for N in range(4, NR, 4):
155          wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]});
156        c${M} = (float*) ((uintptr_t) c${M} + cn_stride);
157
158      $for M in reversed(range(MR)):
159        a${M} = (const float*) ((uintptr_t) a${M} - kc);
160
161      nc -= ${NR};
162    } else {
163      $for LOG2N in reversed(range(NR.bit_length())):
164        $if NR != 1 << LOG2N:
165          if (nc & ${1 << LOG2N}) {
166            $if LOG2N >= 2:
167              $for M in reversed(range(MR)):
168                wasm_v128_store(c${M}, vacc${M}x${ABC[0:4]});
169                $for N in range(4, 1 << LOG2N, 4):
170                  wasm_v128_store(c${M} + ${N}, vacc${M}x${ABC[N:N+4]});
171
172              $for M in reversed(range(MR)):
173                $for N in range(0, 1 << (LOG2N - 1), 4):
174                  vacc${M}x${ABC[N:N+4]} = vacc${M}x${ABC[N + (1 << LOG2N):N + (1 << LOG2N)+4]};
175
176              $for M in reversed(range(MR)):
177                c${M} += ${1 << LOG2N};
178            $elif LOG2N == 1:
179              $for M in reversed(range(MR)):
180                *((double*) c${M}) = wasm_f64x2_extract_lane(vacc${M}x${ABC[0:4]}, 0);
181
182              $for M in reversed(range(MR)):
183                vacc${M}x${ABC[0:4]} = wasm_v32x4_shuffle(vacc${M}x${ABC[0:4]}, vacc${M}x${ABC[0:4]}, 2, 3, 2, 3);
184
185              $for M in reversed(range(MR)):
186                c${M} += 2;
187            $elif LOG2N == 0:
188              $for M in reversed(range(MR)):
189                *c${M} = wasm_f32x4_extract_lane(vacc${M}x${ABC[0:4]}, 0);
190          }
191
192      nc = 0;
193    }
194  } while (nc != 0);
195}
196