1 // Copyright 2020 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <wasm_simd128.h>
9
10 #include <xnnpack/zip.h>
11
12
xnn_x32_zip_xm_ukernel__wasmsimd(size_t n,size_t m,const uint32_t * input,uint32_t * output)13 void xnn_x32_zip_xm_ukernel__wasmsimd(
14 size_t n,
15 size_t m,
16 const uint32_t* input,
17 uint32_t* output)
18 {
19 assert(n != 0);
20 assert(n % sizeof(uint32_t) == 0);
21 assert(m >= 4);
22
23 const float* w = (const float*) input;
24 float* o = (float*) output;
25 const size_t group_increment = m * 4;
26 const size_t input_increment = n * 3;
27 const size_t output_increment = 4 * sizeof(uint32_t) - m * n;
28 const float* last_input = (const float*) ((uintptr_t) input + n * (m - 1));
29 float* last_output = (float*) ((uintptr_t) output + (m * 4 - 4 * sizeof(uint32_t)));
30
31 for (size_t i = 0; i < m; i += 4) {
32 w = (const float*) ((uintptr_t) w + input_increment);
33 if (w >= last_input) {
34 w = last_input;
35 }
36 const float* z = (const float*) ((uintptr_t) w - n);
37 const float* y = (const float*) ((uintptr_t) z - n);
38 const float* x = (const float*) ((uintptr_t) y - n);
39
40 size_t k = n;
41 while (k >= 4 * sizeof(uint32_t)) {
42 const v128_t vx = wasm_v128_load((const v128_t*) x);
43 x += 4;
44 const v128_t vy = wasm_v128_load((const v128_t*) y);
45 y += 4;
46 const v128_t vz = wasm_v128_load((const v128_t*) z);
47 z += 4;
48 const v128_t vw = wasm_v128_load((const v128_t*) w);
49 w += 4;
50
51 const v128_t vxy_lo = wasm_v32x4_shuffle(vx, vy, 0, 4, 1, 5);
52 const v128_t vxy_hi = wasm_v32x4_shuffle(vx, vy, 2, 6, 3, 7);
53 const v128_t vzw_lo = wasm_v32x4_shuffle(vz, vw, 0, 4, 1, 5);
54 const v128_t vzw_hi = wasm_v32x4_shuffle(vz, vw, 2, 6, 3, 7);
55
56 const v128_t vxyzw0 = wasm_v32x4_shuffle(vxy_lo, vzw_lo, 0, 1, 4, 5);
57 const v128_t vxyzw1 = wasm_v32x4_shuffle(vxy_lo, vzw_lo, 2, 3, 6, 7);
58 const v128_t vxyzw2 = wasm_v32x4_shuffle(vxy_hi, vzw_hi, 0, 1, 4, 5);
59 const v128_t vxyzw3 = wasm_v32x4_shuffle(vxy_hi, vzw_hi, 2, 3, 6, 7);
60
61 wasm_v128_store(o, vxyzw0);
62 o = (float*) ((uintptr_t) o + group_increment);
63
64 wasm_v128_store(o, vxyzw1);
65 o = (float*) ((uintptr_t) o + group_increment);
66
67 wasm_v128_store(o, vxyzw2);
68 o = (float*) ((uintptr_t) o + group_increment);
69
70 wasm_v128_store(o, vxyzw3);
71 o = (float*) ((uintptr_t) o + group_increment);
72
73 k -= 4 * sizeof(uint32_t);
74 }
75 if XNN_UNLIKELY(k != 0) {
76 if (k & (2 * sizeof(uint32_t))) {
77 const double vx = *((const double*) x);
78 x += 2;
79 const double vy = *((const double*) y);
80 y += 2;
81 const double vz = *((const double*) z);
82 z += 2;
83 const double vw = *((const double*) w);
84 w += 2;
85
86 const v128_t vxy = wasm_f64x2_make(vx, vy);
87 const v128_t vzw = wasm_f64x2_make(vz, vw);
88
89 const v128_t vxyzw_lo = wasm_v32x4_shuffle(vxy, vzw, 0, 2, 4, 6);
90 const v128_t vxyzw_hi = wasm_v32x4_shuffle(vxy, vzw, 1, 3, 5, 7);
91
92 wasm_v128_store(o, vxyzw_lo);
93 o = (float*) ((uintptr_t) o + group_increment);
94
95 wasm_v128_store(o, vxyzw_hi);
96 o = (float*) ((uintptr_t) o + group_increment);
97 }
98 if (k & (1 * sizeof(uint32_t))) {
99 const float vx = *x;
100 const float vy = *y;
101 const float vz = *z;
102 const float vw = *w++;
103
104 o[0] = vx;
105 o[1] = vy;
106 o[2] = vz;
107 o[3] = vw;
108 o = (float*) ((uintptr_t) o + group_increment);
109 }
110 }
111 o = (float*) ((uintptr_t) o + output_increment);
112 if (o > last_output) {
113 o = last_output;
114 }
115 }
116 }
117