• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-gemm/MRx4c8-sse.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2020 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <smmintrin.h>
13 
14 #include <xnnpack/gemm.h>
15 #include <xnnpack/math.h>
16 
17 
xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c8__avx(size_t mr,size_t nc,size_t kc,const int8_t * restrict a,size_t a_stride,const void * restrict w,int8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])18 void xnn_qs8_gemm_xw_minmax_fp32_ukernel_3x4c8__avx(
19     size_t mr,
20     size_t nc,
21     size_t kc,
22     const int8_t* restrict a,
23     size_t a_stride,
24     const void* restrict w,
25     int8_t* restrict c,
26     size_t cm_stride,
27     size_t cn_stride,
28     const union xnn_qs8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)]) XNN_OOB_READS
29 {
30   assert(mr != 0);
31   assert(mr <= 3);
32   assert(nc != 0);
33   assert(kc != 0);
34   assert(kc % sizeof(int8_t) == 0);
35   assert(a != NULL);
36   assert(w != NULL);
37   assert(c != NULL);
38 
39   kc = round_up_po2(kc, 8);
40   const int8_t* a0 = a;
41   int8_t* c0 = c;
42   const int8_t* a1 = (const int8_t*) ((uintptr_t) a0 + a_stride);
43   int8_t* c1 = (int8_t*) ((uintptr_t) c0 + cm_stride);
44   if XNN_UNPREDICTABLE(mr < 2) {
45     a1 = a0;
46     c1 = c0;
47   }
48   const int8_t* a2 = (const int8_t*) ((uintptr_t) a1 + a_stride);
49   int8_t* c2 = (int8_t*) ((uintptr_t) c1 + cm_stride);
50   if XNN_UNPREDICTABLE(mr <= 2) {
51     a2 = a1;
52     c2 = c1;
53   }
54 
55   do {
56     __m128i vacc0x0 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[0]);
57     __m128i vacc0x1 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[1]);
58     __m128i vacc0x2 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[2]);
59     __m128i vacc0x3 = _mm_cvtsi32_si128((int) ((const int32_t*) w)[3]);
60     __m128i vacc1x0 = vacc0x0;
61     __m128i vacc1x1 = vacc0x1;
62     __m128i vacc1x2 = vacc0x2;
63     __m128i vacc1x3 = vacc0x3;
64     __m128i vacc2x0 = vacc0x0;
65     __m128i vacc2x1 = vacc0x1;
66     __m128i vacc2x2 = vacc0x2;
67     __m128i vacc2x3 = vacc0x3;
68     w = (const void*) ((const int32_t*) w + 4);
69 
70     size_t k = 0;
71     while (k < kc) {
72       const __m128i va0 = _mm_loadl_epi64((const __m128i*) a0);
73       const __m128i vxa0 = _mm_cvtepi8_epi16(va0);
74       a0 += 8;
75       const __m128i va1 = _mm_loadl_epi64((const __m128i*) a1);
76       const __m128i vxa1 = _mm_cvtepi8_epi16(va1);
77       a1 += 8;
78       const __m128i va2 = _mm_loadl_epi64((const __m128i*) a2);
79       const __m128i vxa2 = _mm_cvtepi8_epi16(va2);
80       a2 += 8;
81 
82       const __m128i vxb0 = _mm_load_si128((const __m128i*) w);
83 
84       vacc0x0 = _mm_add_epi32(vacc0x0, _mm_madd_epi16(vxa0, vxb0));
85       vacc1x0 = _mm_add_epi32(vacc1x0, _mm_madd_epi16(vxa1, vxb0));
86       vacc2x0 = _mm_add_epi32(vacc2x0, _mm_madd_epi16(vxa2, vxb0));
87       const __m128i vxb1 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 8));
88 
89       vacc0x1 = _mm_add_epi32(vacc0x1, _mm_madd_epi16(vxa0, vxb1));
90       vacc1x1 = _mm_add_epi32(vacc1x1, _mm_madd_epi16(vxa1, vxb1));
91       vacc2x1 = _mm_add_epi32(vacc2x1, _mm_madd_epi16(vxa2, vxb1));
92       const __m128i vxb2 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 16));
93 
94       vacc0x2 = _mm_add_epi32(vacc0x2, _mm_madd_epi16(vxa0, vxb2));
95       vacc1x2 = _mm_add_epi32(vacc1x2, _mm_madd_epi16(vxa1, vxb2));
96       vacc2x2 = _mm_add_epi32(vacc2x2, _mm_madd_epi16(vxa2, vxb2));
97       const __m128i vxb3 = _mm_load_si128((const __m128i*) ((const int16_t*) w + 24));
98 
99       vacc0x3 = _mm_add_epi32(vacc0x3, _mm_madd_epi16(vxa0, vxb3));
100       vacc1x3 = _mm_add_epi32(vacc1x3, _mm_madd_epi16(vxa1, vxb3));
101       vacc2x3 = _mm_add_epi32(vacc2x3, _mm_madd_epi16(vxa2, vxb3));
102 
103       w = (const void*) ((const int16_t*) w + 32);
104       k += 8 * sizeof(int8_t);
105     }
106 
107     const __m128i vacc0x01 = _mm_hadd_epi32(vacc0x0, vacc0x1);
108     const __m128i vacc0x23 = _mm_hadd_epi32(vacc0x2, vacc0x3);
109     const __m128i vacc1x01 = _mm_hadd_epi32(vacc1x0, vacc1x1);
110     const __m128i vacc1x23 = _mm_hadd_epi32(vacc1x2, vacc1x3);
111     const __m128i vacc2x01 = _mm_hadd_epi32(vacc2x0, vacc2x1);
112     const __m128i vacc2x23 = _mm_hadd_epi32(vacc2x2, vacc2x3);
113 
114     __m128i vacc0x0123 = _mm_hadd_epi32(vacc0x01, vacc0x23);
115     __m128i vacc1x0123 = _mm_hadd_epi32(vacc1x01, vacc1x23);
116     __m128i vacc2x0123 = _mm_hadd_epi32(vacc2x01, vacc2x23);
117 
118     __m128 vscaled0x0123 = _mm_cvtepi32_ps(vacc0x0123);
119     __m128 vscaled1x0123 = _mm_cvtepi32_ps(vacc1x0123);
120     __m128 vscaled2x0123 = _mm_cvtepi32_ps(vacc2x0123);
121 
122     const __m128 vscale = _mm_load_ps(params->fp32_sse4.scale);
123     vscaled0x0123 = _mm_mul_ps(vscaled0x0123, vscale);
124     vscaled1x0123 = _mm_mul_ps(vscaled1x0123, vscale);
125     vscaled2x0123 = _mm_mul_ps(vscaled2x0123, vscale);
126 
127     const __m128 voutput_max_less_zero_point = _mm_load_ps(params->fp32_sse4.output_max_less_zero_point);
128     vscaled0x0123 = _mm_min_ps(vscaled0x0123, voutput_max_less_zero_point);
129     vscaled1x0123 = _mm_min_ps(vscaled1x0123, voutput_max_less_zero_point);
130     vscaled2x0123 = _mm_min_ps(vscaled2x0123, voutput_max_less_zero_point);
131 
132     vacc0x0123 = _mm_cvtps_epi32(vscaled0x0123);
133     vacc1x0123 = _mm_cvtps_epi32(vscaled1x0123);
134     vacc2x0123 = _mm_cvtps_epi32(vscaled2x0123);
135 
136     const __m128i voutput_zero_point = _mm_load_si128((const __m128i*) params->fp32_sse4.output_zero_point);
137     __m128i vacc01x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc0x0123, vacc1x0123), voutput_zero_point);
138     __m128i vacc22x0123 = _mm_adds_epi16(_mm_packs_epi32(vacc2x0123, vacc2x0123), voutput_zero_point);
139 
140 
141     __m128i vout = _mm_packs_epi16(vacc01x0123, vacc22x0123);
142 
143     vout = _mm_max_epi8(vout, _mm_load_si128((const __m128i*) params->fp32_sse4.output_min));
144 
145     if (nc >= 4) {
146       *((uint32_t*) c0) = (uint32_t) _mm_cvtsi128_si32(vout);
147       *((uint32_t*) c1) = (uint32_t) _mm_extract_epi32(vout, 1);
148       *((uint32_t*) c2) = (uint32_t) _mm_extract_epi32(vout, 2);
149 
150       c0 = (int8_t*) ((uintptr_t) c0 + cn_stride);
151       c1 = (int8_t*) ((uintptr_t) c1 + cn_stride);
152       c2 = (int8_t*) ((uintptr_t) c2 + cn_stride);
153 
154       a0 = (const int8_t*) ((uintptr_t) a0 - kc);
155       a1 = (const int8_t*) ((uintptr_t) a1 - kc);
156       a2 = (const int8_t*) ((uintptr_t) a2 - kc);
157 
158       nc -= 4;
159     } else {
160       if (nc & 2) {
161         *((uint16_t*) c0) = (uint16_t) _mm_extract_epi16(vout, 0);
162         c0 += 2;
163         *((uint16_t*) c1) = (uint16_t) _mm_extract_epi16(vout, 2);
164         c1 += 2;
165         *((uint16_t*) c2) = (uint16_t) _mm_extract_epi16(vout, 4);
166         c2 += 2;
167         vout = _mm_srli_epi32(vout, 16);
168       }
169       if (nc & 1) {
170         *c0 = (int8_t) _mm_extract_epi8(vout, 0);
171         *c1 = (int8_t) _mm_extract_epi8(vout, 4);
172         *c2 = (int8_t) _mm_extract_epi8(vout, 8);
173       }
174 
175       nc = 0;
176     }
177   } while (nc != 0);
178 }
179