• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) Facebook, Inc. and its affiliates.
2 // All rights reserved.
3 //
4 // Copyright 2019 Google LLC
5 //
6 // This source code is licensed under the BSD-style license found in the
7 // LICENSE file in the root directory of this source tree.
8 
9 #include <assert.h>
10 
11 #include <immintrin.h>
12 
13 #include <xnnpack/gemm.h>
14 #include <xnnpack/math.h>
15 
16 
sse_reduce4_i32(__m128i x,__m128i y,__m128i z,__m128i w)17 static inline __m128i sse_reduce4_i32(__m128i x, __m128i y, __m128i z, __m128i w) {
18 #if defined(__SSSE3__) && !defined(__ANDROID__)
19   // xxyy = ( y2 + y3, y0 + y1, x2 + x3, x0 + x1 )
20   const __m128i xxyy = _mm_hadd_epi32(x, y);
21   // zzww = ( w2 + w3, w0 + w1, z2 + z3, z0 + z1 )
22   const __m128i zzww = _mm_hadd_epi32(z, w);
23   // xyzw = ( w0 + w1 + w2 + w3, y0 + y1 + y2 + y3, z0 + z1 + z2 + z3, x0 + x1 + x2 + x3 )
24   return _mm_hadd_epi32(xxyy, zzww);
25 #else
26   // xzxz = ( z1 + z3, x1 + x3, z0 + z2, x0 + x2 )
27   const __m128i xzxz = _mm_add_epi32(_mm_unpacklo_epi32(x, z), _mm_unpackhi_epi32(x, z));
28   // ywyw = ( w1 + w3, y1 + y3, w0 + w2, y0 + y2 )
29   const __m128i ywyw = _mm_add_epi32(_mm_unpacklo_epi32(y, w), _mm_unpackhi_epi32(y, w));
30   // xyzw = ( w0 + w2 + w1 + w3, y0 + y2 + y1 + y3, z0 + z2 + z1 + z3, x0 + x2 + x1 + x3 )
31   return _mm_add_epi32(_mm_unpacklo_epi32(xzxz, ywyw), _mm_unpackhi_epi32(xzxz, ywyw));
32 #endif
33 }
34 
xnn_q8_gemm_ukernel_2x4c8__sse2(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_q8_gemm_params params[restrict static1])35 void xnn_q8_gemm_ukernel_2x4c8__sse2(
36     size_t mr,
37     size_t nc,
38     size_t kc,
39     const uint8_t* restrict a,
40     size_t a_stride,
41     const void* restrict w,
42     uint8_t* restrict c,
43     size_t cm_stride,
44     size_t cn_stride,
45     const union xnn_q8_gemm_params params[restrict static 1])
46 {
47   assert(mr != 0);
48   assert(mr <= 2);
49   assert(nc != 0);
50   assert(kc != 0);
51 
52   const uint8_t* a0 = a;
53   uint8_t* c0 = c;
54   const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
55   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
56   if (mr != 2) {
57     a1 = a0;
58     c1 = c0;
59   }
60 
61   const size_t kc_stride = round_up_po2(kc, 8);
62   const __m128i vb_zero_point = _mm_load_si128((const __m128i*) params->sse2.kernel_zero_point);
63 
64   do {
65     __m128i vacc00 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[0]);
66     __m128i vacc01 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[1]);
67     __m128i vacc02 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[2]);
68     __m128i vacc03 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[3]);
69     __m128i vacc10 = vacc00;
70     __m128i vacc11 = vacc01;
71     __m128i vacc12 = vacc02;
72     __m128i vacc13 = vacc03;
73     w = (const void*) ((uintptr_t) w + 16);
74 
75     const __m128i vzero = _mm_setzero_si128();
76     for (size_t k = 0; k < kc; k += 8 * sizeof(uint8_t)) {
77       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
78       const __m128i vxa0 = _mm_unpacklo_epi8(va0, vzero);
79       a0 += 8;
80       const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
81       const __m128i vxa1 = _mm_unpacklo_epi8(va1, vzero);
82       a1 += 8;
83 
84       const __m128i vb0 = _mm_loadl_epi64((const __m128i*) w);
85       const __m128i vxb0 = _mm_sub_epi16(_mm_unpacklo_epi8(vb0, vzero), vb_zero_point);
86       const __m128i vb1 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 8));
87       const __m128i vxb1 = _mm_sub_epi16(_mm_unpacklo_epi8(vb1, vzero), vb_zero_point);
88       const __m128i vb2 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 16));
89       const __m128i vxb2 = _mm_sub_epi16(_mm_unpacklo_epi8(vb2, vzero), vb_zero_point);
90       const __m128i vb3 = _mm_loadl_epi64((const __m128i*) ((uintptr_t) w + 24));
91       const __m128i vxb3 = _mm_sub_epi16(_mm_unpacklo_epi8(vb3, vzero), vb_zero_point);
92       w = (const void*) ((uintptr_t) w + 32);
93 
94       vacc00 = _mm_add_epi32(vacc00, _mm_madd_epi16(vxa0, vxb0));
95       vacc01 = _mm_add_epi32(vacc01, _mm_madd_epi16(vxa0, vxb1));
96       vacc02 = _mm_add_epi32(vacc02, _mm_madd_epi16(vxa0, vxb2));
97       vacc03 = _mm_add_epi32(vacc03, _mm_madd_epi16(vxa0, vxb3));
98       vacc10 = _mm_add_epi32(vacc10, _mm_madd_epi16(vxa1, vxb0));
99       vacc11 = _mm_add_epi32(vacc11, _mm_madd_epi16(vxa1, vxb1));
100       vacc12 = _mm_add_epi32(vacc12, _mm_madd_epi16(vxa1, vxb2));
101       vacc13 = _mm_add_epi32(vacc13, _mm_madd_epi16(vxa1, vxb3));
102     }
103 
104     __m128i vacc0x0123 = sse_reduce4_i32(vacc00, vacc01, vacc02, vacc03);
105     __m128i vacc1x0123 = sse_reduce4_i32(vacc10, vacc11, vacc12, vacc13);
106 
107     const __m128i vmultiplier = _mm_load_si128((const __m128i*) params->sse2.multiplier);
108     const __m128i vrounding = _mm_load_si128((const __m128i*) params->sse2.rounding);
109 
110     const __m128i vnmask0x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc0x0123);
111     const __m128i vnmask1x0123 = _mm_cmpgt_epi32(_mm_setzero_si128(), vacc1x0123);
112 
113     const __m128i vabsacc0x0123 = _mm_sub_epi32(_mm_xor_si128(vacc0x0123, vnmask0x0123), vnmask0x0123);
114     const __m128i vabsacc1x0123 = _mm_sub_epi32(_mm_xor_si128(vacc1x0123, vnmask1x0123), vnmask1x0123);
115 
116     const __m128i vabsacc0x1032 = _mm_shuffle_epi32(vabsacc0x0123, _MM_SHUFFLE(2, 3, 0, 1));
117     const __m128i vabsacc1x1032 = _mm_shuffle_epi32(vabsacc1x0123, _MM_SHUFFLE(2, 3, 0, 1));
118 
119     const __m128i vabsprod0x02 = _mm_mul_epu32(vabsacc0x0123, vmultiplier);
120     const __m128i vabsprod1x02 = _mm_mul_epu32(vabsacc1x0123, vmultiplier);
121 
122     const __m128i vnmask0x02 = _mm_shuffle_epi32(vnmask0x0123, _MM_SHUFFLE(2, 2, 0, 0));
123     const __m128i vnmask1x02 = _mm_shuffle_epi32(vnmask1x0123, _MM_SHUFFLE(2, 2, 0, 0));
124 
125     const __m128i vprod0x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod0x02, vnmask0x02), vnmask0x02);
126     const __m128i vprod1x02 = _mm_sub_epi64(_mm_xor_si128(vabsprod1x02, vnmask1x02), vnmask1x02);
127 
128     const __m128i vq31prod0x02 = _mm_srli_epi64(_mm_add_epi64(vprod0x02, vrounding), 31);
129     const __m128i vq31prod1x02 = _mm_srli_epi64(_mm_add_epi64(vprod1x02, vrounding), 31);
130 
131     const __m128i vabsprod0x13 = _mm_mul_epu32(vabsacc0x1032, vmultiplier);
132     const __m128i vabsprod1x13 = _mm_mul_epu32(vabsacc1x1032, vmultiplier);
133 
134     const __m128i vnmask0x13 = _mm_shuffle_epi32(vnmask0x0123, _MM_SHUFFLE(3, 3, 1, 1));
135     const __m128i vnmask1x13 = _mm_shuffle_epi32(vnmask1x0123, _MM_SHUFFLE(3, 3, 1, 1));
136 
137     const __m128i vprod0x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod0x13, vnmask0x13), vnmask0x13);
138     const __m128i vprod1x13 = _mm_sub_epi64(_mm_xor_si128(vabsprod1x13, vnmask1x13), vnmask1x13);
139 
140     const __m128i vq31prod0x13 = _mm_srli_epi64(_mm_add_epi64(vprod0x13, vrounding), 31);
141     const __m128i vq31prod1x13 = _mm_srli_epi64(_mm_add_epi64(vprod1x13, vrounding), 31);
142 
143     const __m128i vq31prod0x0213 = _mm_castps_si128(_mm_shuffle_ps(
144         _mm_castsi128_ps(vq31prod0x02), _mm_castsi128_ps(vq31prod0x13), _MM_SHUFFLE(2, 0, 2, 0)));
145     const __m128i vq31prod1x0213 = _mm_castps_si128(_mm_shuffle_ps(
146         _mm_castsi128_ps(vq31prod1x02), _mm_castsi128_ps(vq31prod1x13), _MM_SHUFFLE(2, 0, 2, 0)));
147 
148     const __m128i vq31prod0x0123 = _mm_shuffle_epi32(vq31prod0x0213, _MM_SHUFFLE(3, 1, 2, 0));
149     const __m128i vq31prod1x0123 = _mm_shuffle_epi32(vq31prod1x0213, _MM_SHUFFLE(3, 1, 2, 0));
150 
151     const __m128i vremainder_mask = _mm_load_si128((const __m128i*) params->sse2.remainder_mask);
152 
153     const __m128i vrem0x0123 =
154       _mm_add_epi32(_mm_and_si128(vq31prod0x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod0x0123));
155     const __m128i vrem1x0123 =
156       _mm_add_epi32(_mm_and_si128(vq31prod1x0123, vremainder_mask), _mm_cmpgt_epi32(_mm_setzero_si128(), vq31prod1x0123));
157 
158     const __m128i vremainder_threshold = _mm_load_si128((const __m128i*) params->sse2.remainder_threshold);
159     const __m128i vshift = _mm_load_si128((const __m128i*) params->sse2.shift);
160 
161     vacc0x0123 =
162       _mm_sub_epi32(_mm_sra_epi32(vq31prod0x0123, vshift), _mm_cmpgt_epi32(vrem0x0123, vremainder_threshold));
163     vacc1x0123 =
164       _mm_sub_epi32(_mm_sra_epi32(vq31prod1x0123, vshift), _mm_cmpgt_epi32(vrem1x0123, vremainder_threshold));
165 
166     const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->sse2.output_zero_point);
167     const __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
168     __m128i vout = _mm_packus_epi16(vacc01x0123, vacc01x0123);
169     vout = _mm_min_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_max));
170     vout = _mm_max_epu8(vout, _mm_load_si128((const __m128i*) params->sse2.output_min));
171 
172     if (nc >= 4) {
173       *((uint32_t*) c0) = (uint32_t) _mm_cvtsi128_si32(vout);
174       *((uint32_t*) c1) = (uint32_t) _mm_cvtsi128_si32(_mm_srli_epi64(vout, 32));
175 
176       a0 = (const uint8_t*) ((uintptr_t) a0 - kc_stride);
177       a1 = (const uint8_t*) ((uintptr_t) a1 - kc_stride);
178 
179       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
180       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
181 
182       nc -= 4;
183     } else {
184       if (nc & 2) {
185         *((uint16_t*) c0) = (uint16_t) _mm_extract_epi16(vout, 0);
186         c0 += 2;
187         *((uint16_t*) c1) = (uint16_t) _mm_extract_epi16(vout, 2);
188         c1 += 2;
189         vout = _mm_srli_epi32(vout, 16);
190       }
191       if (nc & 1) {
192         *((uint8_t*) c0) = (uint8_t) _mm_cvtsi128_si32(vout);
193         *((uint8_t*) c1) = (uint8_t) _mm_extract_epi16(vout, 2);
194       }
195 
196       nc = 0;
197     }
198   } while (nc != 0);
199 }
200