1 // Auto-generated file. Do not edit!
2 // Template: src/s8-ibilinear/sse.c.in
3 // Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9
10 #include <assert.h>
11
12 #include <emmintrin.h>
13
14 #include <xnnpack/common.h>
15 #include <xnnpack/ibilinear.h>
16
17
xnn_s8_ibilinear_ukernel__sse2_c16(size_t output_pixels,size_t channels,const int8_t ** restrict input,size_t input_offset,const int16_t * restrict weights,int8_t * restrict output,size_t output_increment)18 void xnn_s8_ibilinear_ukernel__sse2_c16(
19 size_t output_pixels,
20 size_t channels,
21 const int8_t**restrict input,
22 size_t input_offset,
23 const int16_t*restrict weights,
24 int8_t*restrict output,
25 size_t output_increment) XNN_OOB_READS
26 {
27 assert(output_pixels != 0);
28 assert(channels != 0);
29
30 do {
31 const int8_t* i0 = (const int8_t*) ((uintptr_t) input[0] + input_offset);
32 const int8_t* i1 = (const int8_t*) ((uintptr_t) input[1] + input_offset);
33 const int8_t* i2 = (const int8_t*) ((uintptr_t) input[2] + input_offset);
34 const int8_t* i3 = (const int8_t*) ((uintptr_t) input[3] + input_offset);
35 input += 4;
36
37 const __m128i valpha = _mm_cvtsi32_si128(*((const int*) weights));
38 weights += 2;
39 __m128i valphah = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(0, 0, 0, 0));
40 valphah = _mm_unpacklo_epi64(valphah, valphah);
41 __m128i valphav = _mm_shufflelo_epi16(valpha, _MM_SHUFFLE(1, 1, 1, 1));
42 valphav = _mm_unpacklo_epi64(valphav, valphav);
43
44 valphah = _mm_xor_si128(valphah, _mm_set1_epi32(0xFFFF0000));
45 valphah = _mm_add_epi16(valphah, _mm_set1_epi32(0x08010000));
46
47 const __m128i vrounding = _mm_set1_epi32(0x00200000);
48
49 size_t c = channels;
50 for (; c >= 16 * sizeof(int8_t); c -= 16 * sizeof(int8_t)) {
51 __m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
52 __m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
53 __m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
54 __m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
55 __m128i vtl89ABCDEF = _mm_loadl_epi64((const __m128i*) (i0 + 8));
56 __m128i vtr89ABCDEF = _mm_loadl_epi64((const __m128i*) (i1 + 8));
57 __m128i vbl89ABCDEF = _mm_loadl_epi64((const __m128i*) (i2 + 8));
58 __m128i vbr89ABCDEF = _mm_loadl_epi64((const __m128i*) (i3 + 8));
59 i0 += 16;
60 i1 += 16;
61 i2 += 16;
62 i3 += 16;
63
64 vtl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtl01234567, vtl01234567), 8);
65 vtr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtr01234567, vtr01234567), 8);
66 vbl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbl01234567, vbl01234567), 8);
67 vbr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbr01234567, vbr01234567), 8);
68 vtl89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vtl89ABCDEF, vtl89ABCDEF), 8);
69 vtr89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vtr89ABCDEF, vtr89ABCDEF), 8);
70 vbl89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vbl89ABCDEF, vbl89ABCDEF), 8);
71 vbr89ABCDEF = _mm_srai_epi16(_mm_unpacklo_epi8(vbr89ABCDEF, vbr89ABCDEF), 8);
72
73 const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
74 const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
75 const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
76 const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
77 const __m128i vdr89ABCDEF = _mm_sub_epi16(vbr89ABCDEF, vtr89ABCDEF);
78 const __m128i vt89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
79 const __m128i vdl89ABCDEF = _mm_sub_epi16(vbl89ABCDEF, vtl89ABCDEF);
80 const __m128i vtCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vtr89ABCDEF, vtl89ABCDEF), valphah);
81
82 const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
83 const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
84 const __m128i vd89AB = _mm_madd_epi16(_mm_unpacklo_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
85 const __m128i vdCDEF = _mm_madd_epi16(_mm_unpackhi_epi16(vdr89ABCDEF, vdl89ABCDEF), valphah);
86
87 __m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
88 __m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
89 __m128i vacc89AB = _mm_slli_epi32(_mm_mulhi_epu16(vd89AB, valphav), 16);
90 __m128i vaccCDEF = _mm_slli_epi32(_mm_mulhi_epu16(vdCDEF, valphav), 16);
91
92 vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
93 vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
94 vacc89AB = _mm_add_epi16(_mm_mullo_epi16(vd89AB, valphav), vacc89AB);
95 vaccCDEF = _mm_add_epi16(_mm_mullo_epi16(vdCDEF, valphav), vaccCDEF);
96
97 vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
98 vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
99 vacc89AB = _mm_add_epi32(_mm_slli_epi32(vt89AB, 11), vacc89AB);
100 vaccCDEF = _mm_add_epi32(_mm_slli_epi32(vtCDEF, 11), vaccCDEF);
101
102 vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
103 vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
104 vacc89AB = _mm_srai_epi32(_mm_add_epi16(vacc89AB, vrounding), 22);
105 vaccCDEF = _mm_srai_epi32(_mm_add_epi16(vaccCDEF, vrounding), 22);
106
107 const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
108 const __m128i vacc89ABCDEF = _mm_packs_epi32(vacc89AB, vaccCDEF);
109
110 const __m128i vo0123456789ABCDEF = _mm_packs_epi16(vacc01234567, vacc89ABCDEF);
111
112 _mm_storeu_si128((__m128i*) output, vo0123456789ABCDEF);
113 output += 16;
114 }
115 for (; c >= 8 * sizeof(int8_t); c -= 8 * sizeof(int8_t)) {
116 __m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
117 i0 += 8;
118 __m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
119 i1 += 8;
120 __m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
121 i2 += 8;
122 __m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
123 i3 += 8;
124
125 vtl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtl01234567, vtl01234567), 8);
126 vtr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtr01234567, vtr01234567), 8);
127 vbl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbl01234567, vbl01234567), 8);
128 vbr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbr01234567, vbr01234567), 8);
129
130 const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
131 const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
132 const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
133 const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
134
135 const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
136 const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
137
138 __m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
139 __m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
140
141 vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
142 vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
143
144 vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
145 vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
146
147 vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
148 vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
149
150 const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
151
152 const __m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
153
154 _mm_storel_epi64((__m128i*) output, vo01234567);
155 output += 8;
156 }
157 if XNN_UNLIKELY(c != 0) {
158 __m128i vtl01234567 = _mm_loadl_epi64((const __m128i*) i0);
159 __m128i vtr01234567 = _mm_loadl_epi64((const __m128i*) i1);
160 __m128i vbl01234567 = _mm_loadl_epi64((const __m128i*) i2);
161 __m128i vbr01234567 = _mm_loadl_epi64((const __m128i*) i3);
162
163 vtl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtl01234567, vtl01234567), 8);
164 vtr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vtr01234567, vtr01234567), 8);
165 vbl01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbl01234567, vbl01234567), 8);
166 vbr01234567 = _mm_srai_epi16(_mm_unpacklo_epi8(vbr01234567, vbr01234567), 8);
167
168 const __m128i vdr01234567 = _mm_sub_epi16(vbr01234567, vtr01234567);
169 const __m128i vt0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vtr01234567, vtl01234567), valphah);
170 const __m128i vdl01234567 = _mm_sub_epi16(vbl01234567, vtl01234567);
171 const __m128i vt4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vtr01234567, vtl01234567), valphah);
172
173 const __m128i vd0123 = _mm_madd_epi16(_mm_unpacklo_epi16(vdr01234567, vdl01234567), valphah);
174 const __m128i vd4567 = _mm_madd_epi16(_mm_unpackhi_epi16(vdr01234567, vdl01234567), valphah);
175
176 __m128i vacc0123 = _mm_slli_epi32(_mm_mulhi_epu16(vd0123, valphav), 16);
177 __m128i vacc4567 = _mm_slli_epi32(_mm_mulhi_epu16(vd4567, valphav), 16);
178
179 vacc0123 = _mm_add_epi16(_mm_mullo_epi16(vd0123, valphav), vacc0123);
180 vacc4567 = _mm_add_epi16(_mm_mullo_epi16(vd4567, valphav), vacc4567);
181
182 vacc0123 = _mm_add_epi32(_mm_slli_epi32(vt0123, 11), vacc0123);
183 vacc4567 = _mm_add_epi32(_mm_slli_epi32(vt4567, 11), vacc4567);
184
185 vacc0123 = _mm_srai_epi32(_mm_add_epi16(vacc0123, vrounding), 22);
186 vacc4567 = _mm_srai_epi32(_mm_add_epi16(vacc4567, vrounding), 22);
187
188 const __m128i vacc01234567 = _mm_packs_epi32(vacc0123, vacc4567);
189
190 __m128i vo01234567 = _mm_packs_epi16(vacc01234567, vacc01234567);
191
192 if (c & (4 * sizeof(int8_t))) {
193 *((uint32_t*) output) = (uint32_t) _mm_cvtsi128_si32(vo01234567);
194 output += 4;
195 vo01234567 = _mm_srli_epi64(vo01234567, 32);
196 }
197 uint32_t vo0123 = (uint32_t) _mm_cvtsi128_si32(vo01234567);
198 if (c & (2 * sizeof(int8_t))) {
199 *((uint16_t*) output) = (uint16_t) vo0123;
200 output += 2;
201 vo0123 >>= 16;
202 }
203 if (c & (1 * sizeof(int8_t))) {
204 *output++ = (uint8_t) vo0123;
205 }
206 }
207
208 output = (int8_t*) ((uintptr_t) output + output_increment);
209 } while (--output_pixels != 0);
210 }
211