1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <emmintrin.h>
9
10 #include <xnnpack/zip.h>
11
12
xnn_x32_zip_xm_ukernel__sse2(size_t n,size_t m,const uint32_t * input,uint32_t * output)13 void xnn_x32_zip_xm_ukernel__sse2(
14 size_t n,
15 size_t m,
16 const uint32_t* input,
17 uint32_t* output)
18 {
19 assert(n != 0);
20 assert(n % 4 == 0);
21 assert(m >= 4);
22
23 const uint32_t* w = input;
24 const size_t group_increment = m * 4;
25 const size_t input_increment = n * 3;
26 const size_t output_increment = 16 - m * n;
27 const uint32_t* last_input = (const uint32_t*) ((uintptr_t) input + n * (m - 1));
28 uint32_t* last_output = (uint32_t*) ((uintptr_t) output + (m * 4 - 16));
29
30 for (size_t i = 0; i < m; i += 4) {
31 w = (const uint32_t*) ((uintptr_t) w + input_increment);
32 if (w >= last_input) {
33 w = last_input;
34 }
35 const uint32_t* z = (const uint32_t*) ((uintptr_t) w - n);
36 const uint32_t* y = (const uint32_t*) ((uintptr_t) z - n);
37 const uint32_t* x = (const uint32_t*) ((uintptr_t) y - n);
38
39 size_t k = n;
40 while (k >= 16) {
41 const __m128i vx = _mm_loadu_si128((const __m128i*) x);
42 x += 4;
43 const __m128i vy = _mm_loadu_si128((const __m128i*) y);
44 y += 4;
45 const __m128i vz = _mm_loadu_si128((const __m128i*) z);
46 z += 4;
47 const __m128i vw = _mm_loadu_si128((const __m128i*) w);
48 w += 4;
49
50 const __m128i vxy_lo = _mm_unpacklo_epi32(vx, vy);
51 const __m128i vxy_hi = _mm_unpackhi_epi32(vx, vy);
52 const __m128i vzw_lo = _mm_unpacklo_epi32(vz, vw);
53 const __m128i vzw_hi = _mm_unpackhi_epi32(vz, vw);
54
55 const __m128i vxyzw0 = _mm_unpacklo_epi64(vxy_lo, vzw_lo);
56 const __m128i vxyzw1 = _mm_unpackhi_epi64(vxy_lo, vzw_lo);
57 const __m128i vxyzw2 = _mm_unpacklo_epi64(vxy_hi, vzw_hi);
58 const __m128i vxyzw3 = _mm_unpackhi_epi64(vxy_hi, vzw_hi);
59
60 _mm_storeu_si128((__m128i*) output, vxyzw0);
61 output = (uint32_t*) ((uintptr_t) output + group_increment);
62
63 _mm_storeu_si128((__m128i*) output, vxyzw1);
64 output = (uint32_t*) ((uintptr_t) output + group_increment);
65
66 _mm_storeu_si128((__m128i*) output, vxyzw2);
67 output = (uint32_t*) ((uintptr_t) output + group_increment);
68
69 _mm_storeu_si128((__m128i*) output, vxyzw3);
70 output = (uint32_t*) ((uintptr_t) output + group_increment);
71
72 k -= 16;
73 }
74 if XNN_UNLIKELY(k != 0) {
75 if (k & 8) {
76 const __m128i vx = _mm_loadl_epi64((const __m128i*) x);
77 x += 2;
78 const __m128i vy = _mm_loadl_epi64((const __m128i*) y);
79 y += 2;
80 const __m128i vz = _mm_loadl_epi64((const __m128i*) z);
81 z += 2;
82 const __m128i vw = _mm_loadl_epi64((const __m128i*) w);
83 w += 2;
84
85 const __m128i vxy = _mm_unpacklo_epi32(vx, vy);
86 const __m128i vzw = _mm_unpacklo_epi32(vz, vw);
87
88 const __m128i vxyzw_lo = _mm_unpacklo_epi64(vxy, vzw);
89 const __m128i vxyzw_hi = _mm_unpackhi_epi64(vxy, vzw);
90
91 _mm_storeu_si128((__m128i*) output, vxyzw_lo);
92 output = (uint32_t*) ((uintptr_t) output + group_increment);
93
94 _mm_storeu_si128((__m128i*) output, vxyzw_hi);
95 output = (uint32_t*) ((uintptr_t) output + group_increment);
96 }
97 if (k & 4) {
98 const uint32_t vx = *x;
99 const uint32_t vy = *y;
100 const uint32_t vz = *z;
101 const uint32_t vw = *w++;
102
103 output[0] = vx;
104 output[1] = vy;
105 output[2] = vz;
106 output[3] = vw;
107 output = (uint32_t*) ((uintptr_t) output + group_increment);
108 }
109 }
110 output = (uint32_t*) ((uintptr_t) output + output_increment);
111 if (output > last_output) {
112 output = last_output;
113 }
114 }
115 }
116