• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 
8 #include <xnnpack/scalar-utils.h>
9 #include <xnnpack/gemm.h>
10 
11 
xnn_q8_gemm_ukernel_2x2__scalar(size_t mr,size_t nc,size_t kc,const uint8_t * restrict a,size_t a_stride,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,const union xnn_q8_gemm_params params[restrict static1])12 void xnn_q8_gemm_ukernel_2x2__scalar(
13     size_t mr,
14     size_t nc,
15     size_t kc,
16     const uint8_t* restrict a,
17     size_t a_stride,
18     const void* restrict w,
19     uint8_t* restrict c,
20     size_t cm_stride,
21     size_t cn_stride,
22     const union xnn_q8_gemm_params params[restrict static 1])
23 {
24   assert(mr != 0);
25   assert(mr <= 2);
26   assert(nc != 0);
27   assert(kc != 0);
28 
29   const uint8_t* a0 = a;
30   uint8_t* c0 = c;
31   const uint8_t* a1 = (const uint8_t*) ((uintptr_t) a0 + a_stride);
32   uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
33   if (mr != 2) {
34     a1 = a0;
35     c1 = c0;
36   }
37 
38   const int32_t vb_zero_point = params->scalar.kernel_zero_point;
39 
40   do {
41     int32_t vacc0x0 = ((const int32_t*) w)[0];
42     int32_t vacc0x1 = ((const int32_t*) w)[1];
43     int32_t vacc1x0 = vacc0x0;
44     int32_t vacc1x1 = vacc0x1;
45     w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
46 
47     size_t k = kc;
48     do {
49       const int32_t va0 = (int32_t) (uint32_t) *a0++;
50       const int32_t va1 = (int32_t) (uint32_t) *a1++;
51 
52       const uint32_t vb0 = ((const uint8_t*) w)[0];
53       const uint32_t vb1 = ((const uint8_t*) w)[1];
54       w = (const void*) ((uintptr_t) w + 2 * sizeof(uint8_t));
55 
56       const int32_t vxb0 = (int32_t) vb0 - vb_zero_point;
57       const int32_t vxb1 = (int32_t) vb1 - vb_zero_point;
58 
59       vacc0x0 += va0 * vxb0;
60       vacc0x1 += va0 * vxb1;
61       vacc1x0 += va1 * vxb0;
62       vacc1x1 += va1 * vxb1;
63 
64       k -= sizeof(uint8_t);
65     } while (k != 0);
66 
67     const int32_t vmultiplier = params->scalar.multiplier;
68     const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
69     const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
70     const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
71     const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
72 
73     const int64_t vq31rounding = INT64_C(0x40000000);
74     const int32_t vq31product0x0 = (int32_t) (uint32_t) ((uint64_t) (vproduct0x0 + vq31rounding) >> 31);
75     const int32_t vq31product0x1 = (int32_t) (uint32_t) ((uint64_t) (vproduct0x1 + vq31rounding) >> 31);
76     const int32_t vq31product1x0 = (int32_t) (uint32_t) ((uint64_t) (vproduct1x0 + vq31rounding) >> 31);
77     const int32_t vq31product1x1 = (int32_t) (uint32_t) ((uint64_t) (vproduct1x1 + vq31rounding) >> 31);
78 
79     const int32_t vremainder_mask = params->scalar.remainder_mask;
80     const int32_t vremainder0x0 = (vq31product0x0 & vremainder_mask) - (int32_t) (vq31product0x0 < 0);
81     const int32_t vremainder0x1 = (vq31product0x1 & vremainder_mask) - (int32_t) (vq31product0x1 < 0);
82     const int32_t vremainder1x0 = (vq31product1x0 & vremainder_mask) - (int32_t) (vq31product1x0 < 0);
83     const int32_t vremainder1x1 = (vq31product1x1 & vremainder_mask) - (int32_t) (vq31product1x1 < 0);
84 
85     const uint32_t vshift = params->scalar.shift;
86     const int32_t vremainder_threshold = params->scalar.remainder_threshold;
87     int32_t vout0x0 = asr_s32(vq31product0x0, vshift) + (int32_t) (vremainder0x0 > vremainder_threshold);
88     int32_t vout0x1 = asr_s32(vq31product0x1, vshift) + (int32_t) (vremainder0x1 > vremainder_threshold);
89     int32_t vout1x0 = asr_s32(vq31product1x0, vshift) + (int32_t) (vremainder1x0 > vremainder_threshold);
90     int32_t vout1x1 = asr_s32(vq31product1x1, vshift) + (int32_t) (vremainder1x1 > vremainder_threshold);
91 
92     const int32_t vout_min = params->scalar.output_min_less_zero_point;
93     vout0x0 = vout0x0 < vout_min ? vout_min : vout0x0;
94     vout0x1 = vout0x1 < vout_min ? vout_min : vout0x1;
95     vout1x0 = vout1x0 < vout_min ? vout_min : vout1x0;
96     vout1x1 = vout1x1 < vout_min ? vout_min : vout1x1;
97 
98     const int32_t vout_max = params->scalar.output_max_less_zero_point;
99     vout0x0 = vout0x0 > vout_max ? vout_max : vout0x0;
100     vout0x1 = vout0x1 > vout_max ? vout_max : vout0x1;
101     vout1x0 = vout1x0 > vout_max ? vout_max : vout1x0;
102     vout1x1 = vout1x1 > vout_max ? vout_max : vout1x1;
103 
104     const int32_t voutput_zero_point = params->scalar.output_zero_point;
105     vout0x0 += voutput_zero_point;
106     vout0x1 += voutput_zero_point;
107     vout1x0 += voutput_zero_point;
108     vout1x1 += voutput_zero_point;
109 
110     if XNN_LIKELY(nc >= 2) {
111       c0[0] = (uint8_t) vout0x0;
112       c0[1] = (uint8_t) vout0x1;
113       c1[0] = (uint8_t) vout1x0;
114       c1[1] = (uint8_t) vout1x1;
115 
116       a0 = (const uint8_t*) ((uintptr_t) a0 - kc);
117       a1 = (const uint8_t*) ((uintptr_t) a1 - kc);
118 
119       c0 = (uint8_t*) ((uintptr_t) c0 + cn_stride);
120       c1 = (uint8_t*) ((uintptr_t) c1 + cn_stride);
121 
122       nc -= 2;
123     } else {
124       c0[0] = (uint8_t) vout0x0;
125       c1[0] = (uint8_t) vout1x0;
126 
127       nc = 0;
128     }
129   } while (nc != 0);
130 }
131