• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/MRx4c2-wasmsimd-dot16x2.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <wasm_simd128.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
18 
xnn_qc8_gemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld64(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])19 void xnn_qc8_gemm_minmax_fp32_ukernel_4x4c2__wasmsimd_dot16x2_ld64(
20     size_t mr,
21     size_t nc,
22     size_t kc,
23     const int8_t* restrict a,
24     size_t a_stride,
25     const void* restrict w,
26     int8_t* restrict c,
27     size_t cm_stride,
28     size_t cn_stride,
29     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
30 {
31   assert(mr != 0);
32   assert(mr <= 4);
33   assert(nc != 0);
34   assert(kc != 0);
35   assert(kc % sizeof(int8_t) == 0);
36   assert(a != NULL);
37   assert(w != NULL);
38   assert(c != NULL);
39 
40   kc = round_up_po2(kc, 2);
41   const int8_t* a0 = a;
42   int8_t* c0 = c;
43   const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
44   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
45   if XNN_UNPREDICTABLE(mr < 2) {
46     a1 = a0;
47     c1 = c0;
48   }
49   const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
50   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
51   if XNN_UNPREDICTABLE(mr <= 2) {
52     a2 = a1;
53     c2 = c1;
54   }
55   const int8_t* a3 = (const int8_t*) ((uintptr_t) a2 + a_stride);
56   int8_t* c3 = (int8_t*) ((uintptr_t) c2 + cm_stride);
57   if XNN_UNPREDICTABLE(mr != 4) {
58     a3 = a2;
59     c3 = c2;
60   }
61 
62   do {
63     v128_t vacc0x0123 = wasm_v128_load(w);
64     v128_t vacc1x0123 = vacc0x0123;
65     v128_t vacc2x0123 = vacc0x0123;
66     v128_t vacc3x0123 = vacc0x0123;
67     w = (const void*) ((const int32_t*) w + 4);
68 
69     size_t k = kc;
70     while (k >= 8 * sizeof(int8_t)) {
71       const v128_t vxa0 = wasm_i16x8_load8x8((const v128_t*) a0);
72       a0 += 8;
73       const v128_t vxa1 = wasm_i16x8_load8x8((const v128_t*) a1);
74       a1 += 8;
75       const v128_t vxa2 = wasm_i16x8_load8x8((const v128_t*) a2);
76       a2 += 8;
77       const v128_t vxa3 = wasm_i16x8_load8x8((const v128_t*) a3);
78       a3 += 8;
79 
80       const v128_t vxb0 = wasm_i16x8_load8x8(w);
81 
82       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
83         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
84       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
85         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
86       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
87         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
88       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
89         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
90       const v128_t vxb1 = wasm_i16x8_load8x8((const int8_t*) w + 8);
91 
92       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
93         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
94       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
95         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
96       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
97         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
98       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
99         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
100       const v128_t vxb2 = wasm_i16x8_load8x8((const int8_t*) w + 16);
101 
102       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
103         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
104       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
105         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
106       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
107         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
108       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
109         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
110       const v128_t vxb3 = wasm_i16x8_load8x8((const int8_t*) w + 24);
111 
112       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
113         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 3, 3, 3, 3), vxb3));
114       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
115         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 3, 3, 3, 3), vxb3));
116       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
117         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 3, 3, 3, 3), vxb3));
118       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
119         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 3, 3, 3, 3), vxb3));
120 
121       w = (const void*) ((const int8_t*) w + 32);
122       k -= 8 * sizeof(int8_t);
123     }
124     if (k != 0) {
125       const v128_t vxa0 = wasm_i16x8_load8x8(a0);
126       a0 = (const int8_t*) ((uintptr_t) a0 + k);
127       const v128_t vxa1 = wasm_i16x8_load8x8(a1);
128       a1 = (const int8_t*) ((uintptr_t) a1 + k);
129       const v128_t vxa2 = wasm_i16x8_load8x8(a2);
130       a2 = (const int8_t*) ((uintptr_t) a2 + k);
131       const v128_t vxa3 = wasm_i16x8_load8x8(a3);
132       a3 = (const int8_t*) ((uintptr_t) a3 + k);
133 
134       const v128_t vxb0 = wasm_i16x8_load8x8(w);
135       w = (const void*) ((const int8_t*) w + 8);
136 
137       vacc0x0123 = wasm_i32x4_add(vacc0x0123,
138         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 0, 0, 0, 0), vxb0));
139       vacc1x0123 = wasm_i32x4_add(vacc1x0123,
140         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 0, 0, 0, 0), vxb0));
141       vacc2x0123 = wasm_i32x4_add(vacc2x0123,
142         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 0, 0, 0, 0), vxb0));
143       vacc3x0123 = wasm_i32x4_add(vacc3x0123,
144         wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 0, 0, 0, 0), vxb0));
145 
146       if (k > 2 * sizeof(int8_t)) {
147         const v128_t vxb1 = wasm_i16x8_load8x8(w);
148         w = (const void*) ((const int8_t*) w + 8);
149 
150         vacc0x0123 = wasm_i32x4_add(vacc0x0123,
151           wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 1, 1, 1, 1), vxb1));
152         vacc1x0123 = wasm_i32x4_add(vacc1x0123,
153           wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 1, 1, 1, 1), vxb1));
154         vacc2x0123 = wasm_i32x4_add(vacc2x0123,
155           wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 1, 1, 1, 1), vxb1));
156         vacc3x0123 = wasm_i32x4_add(vacc3x0123,
157           wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 1, 1, 1, 1), vxb1));
158 
159         if (k > 4 * sizeof(int8_t)) {
160           const v128_t vxb2 = wasm_i16x8_load8x8(w);
161           w = (const void*) ((const int8_t*) w + 8);
162 
163           vacc0x0123 = wasm_i32x4_add(vacc0x0123,
164             wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa0, vxa0, 2, 2, 2, 2), vxb2));
165           vacc1x0123 = wasm_i32x4_add(vacc1x0123,
166             wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa1, vxa1, 2, 2, 2, 2), vxb2));
167           vacc2x0123 = wasm_i32x4_add(vacc2x0123,
168             wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa2, vxa2, 2, 2, 2, 2), vxb2));
169           vacc3x0123 = wasm_i32x4_add(vacc3x0123,
170             wasm_i32x4_dot_i16x8(wasm_v32x4_shuffle(vxa3, vxa3, 2, 2, 2, 2), vxb2));
171         }
172       }
173     }
174 
175     vacc0x0123 = wasm_f32x4_convert_i32x4(vacc0x0123);
176     vacc1x0123 = wasm_f32x4_convert_i32x4(vacc1x0123);
177     vacc2x0123 = wasm_f32x4_convert_i32x4(vacc2x0123);
178     vacc3x0123 = wasm_f32x4_convert_i32x4(vacc3x0123);
179 
180     const v128_t vscale0123 = wasm_v128_load(w);
181     w = (const void*) ((const float*) w + 4);
182     vacc0x0123 = wasm_f32x4_mul(vacc0x0123, vscale0123);
183     vacc1x0123 = wasm_f32x4_mul(vacc1x0123, vscale0123);
184     vacc2x0123 = wasm_f32x4_mul(vacc2x0123, vscale0123);
185     vacc3x0123 = wasm_f32x4_mul(vacc3x0123, vscale0123);
186 
187     const v128_t vmagic_bias = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias);
188     vacc0x0123 = wasm_f32x4_add(vacc0x0123, vmagic_bias);
189     vacc1x0123 = wasm_f32x4_add(vacc1x0123, vmagic_bias);
190     vacc2x0123 = wasm_f32x4_add(vacc2x0123, vmagic_bias);
191     vacc3x0123 = wasm_f32x4_add(vacc3x0123, vmagic_bias);
192 
193     const v128_t vmagic_min = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_min);
194     vacc0x0123 = wasm_i32x4_max(vacc0x0123, vmagic_min);
195     vacc1x0123 = wasm_i32x4_max(vacc1x0123, vmagic_min);
196     vacc2x0123 = wasm_i32x4_max(vacc2x0123, vmagic_min);
197     vacc3x0123 = wasm_i32x4_max(vacc3x0123, vmagic_min);
198 
199     const v128_t vmagic_bias_less_output_zero_point = wasm_v128_load64_splat(params->fp32_wasmsimd.magic_bias_less_output_zero_point);
200     vacc0x0123 = wasm_i32x4_sub(vacc0x0123, vmagic_bias_less_output_zero_point);
201     vacc1x0123 = wasm_i32x4_sub(vacc1x0123, vmagic_bias_less_output_zero_point);
202     vacc2x0123 = wasm_i32x4_sub(vacc2x0123, vmagic_bias_less_output_zero_point);
203     vacc3x0123 = wasm_i32x4_sub(vacc3x0123, vmagic_bias_less_output_zero_point);
204 
205     v128_t vacc01x0123 = wasm_i16x8_narrow_i32x4(vacc0x0123, vacc1x0123);
206     v128_t vacc23x0123 = wasm_i16x8_narrow_i32x4(vacc2x0123, vacc3x0123);
207 
208     v128_t vout = wasm_i8x16_narrow_i16x8(vacc01x0123, vacc23x0123);
209 
210     const v128_t voutput_max = wasm_v128_load64_splat(params->fp32_wasmsimd.output_max);
211     vout = wasm_i8x16_min(vout, voutput_max);
212 
213     if (nc >= 4) {
214       *((float*) c0) = (float) wasm_f32x4_extract_lane(vout, 0);
215       *((float*) c1) = (float) wasm_f32x4_extract_lane(vout, 1);
216       *((float*) c2) = (float) wasm_f32x4_extract_lane(vout, 2);
217       *((float*) c3) = (float) wasm_f32x4_extract_lane(vout, 3);
218 
219       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
220       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
221       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
222       c3 = (int8_t*) ((uintptr_t) c3 + cn_stride);
223 
224       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
225       a1 = (const int8_t*) ((uintptr_t) a1 - kc);
226       a2 = (const int8_t*) ((uintptr_t) a2 - kc);
227       a3 = (const int8_t*) ((uintptr_t) a3 - kc);
228 
229       nc -= 4;
230     } else {
231       uint32_t vout0 = wasm_i32x4_extract_lane(vout, 0);
232       uint32_t vout1 = wasm_i32x4_extract_lane(vout, 1);
233       uint32_t vout2 = wasm_i32x4_extract_lane(vout, 2);
234       uint32_t vout3 = wasm_i32x4_extract_lane(vout, 3);
235       if (nc & 2) {
236         *((uint16_t*) c0) = (uint16_t) vout0;
237         vout0 >>= 16;
238         c0 += 2;
239         *((uint16_t*) c1) = (uint16_t) vout1;
240         vout1 >>= 16;
241         c1 += 2;
242         *((uint16_t*) c2) = (uint16_t) vout2;
243         vout2 >>= 16;
244         c2 += 2;
245         *((uint16_t*) c3) = (uint16_t) vout3;
246         vout3 >>= 16;
247         c3 += 2;
248       }
249       if (nc & 1) {
250         *c0 = (int8_t) vout0;
251         *c1 = (int8_t) vout1;
252         *c2 = (int8_t) vout2;
253         *c3 = (int8_t) vout3;
254       }
255 
256       nc = 0;
257     }
258   } while (nc != 0);
259 }
260