1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5
6 #include <assert.h>
7
8 #include <xnnpack/scalar-utils.h>
9 #include <xnnpack/igemm.h>
10
11
xnn_q8_igemm_ukernel_2x2__scalar(size_t mr,size_t nc,size_t kc,size_t ks,const uint8_t ** restrict a,const void * restrict w,uint8_t * restrict c,size_t cm_stride,size_t cn_stride,size_t a_offset,const uint8_t * zero,const union xnn_q8_gemm_params params[restrict static1])12 void xnn_q8_igemm_ukernel_2x2__scalar(
13 size_t mr,
14 size_t nc,
15 size_t kc,
16 size_t ks,
17 const uint8_t** restrict a,
18 const void* restrict w,
19 uint8_t* restrict c,
20 size_t cm_stride,
21 size_t cn_stride,
22 size_t a_offset,
23 const uint8_t* zero,
24 const union xnn_q8_gemm_params params[restrict static 1])
25 {
26 assert(mr != 0);
27 assert(mr <= 2);
28 assert(nc != 0);
29 assert(kc != 0);
30 assert(ks != 0);
31 assert(ks % (2 * sizeof(void*)) == 0);
32
33 uint8_t* c0 = c;
34 uint8_t* c1 = (uint8_t*) ((uintptr_t) c0 + cm_stride);
35 if (mr != 2) {
36 c1 = c0;
37 }
38
39 const int32_t vb_zero_point = params->scalar.kernel_zero_point;
40
41 do {
42 int32_t vacc0x0 = ((const int32_t*) w)[0];
43 int32_t vacc0x1 = ((const int32_t*) w)[1];
44 int32_t vacc1x0 = vacc0x0;
45 int32_t vacc1x1 = vacc0x1;
46 w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t));
47
48 size_t p = ks;
49 do {
50 const uint8_t* restrict a0 = a[0];
51 if XNN_UNPREDICTABLE(a0 != zero) {
52 a0 = (const uint8_t*) ((uintptr_t) a0 + a_offset);
53 }
54 const uint8_t* restrict a1 = a[1];
55 if XNN_UNPREDICTABLE(a1 != zero) {
56 a1 = (const uint8_t*) ((uintptr_t) a1 + a_offset);
57 }
58 a += 2;
59
60 size_t k = kc;
61 do {
62 const int32_t va0 = (int32_t) (uint32_t) *a0++;
63 const int32_t va1 = (int32_t) (uint32_t) *a1++;
64
65 const uint32_t vb0 = ((const uint8_t*) w)[0];
66 const uint32_t vb1 = ((const uint8_t*) w)[1];
67 w = (const void*) ((uintptr_t) w + 2 * sizeof(uint8_t));
68
69 const int32_t vxb0 = (int32_t) vb0 - vb_zero_point;
70 const int32_t vxb1 = (int32_t) vb1 - vb_zero_point;
71
72 vacc0x0 += va0 * vxb0;
73 vacc0x1 += va0 * vxb1;
74 vacc1x0 += va1 * vxb0;
75 vacc1x1 += va1 * vxb1;
76
77 } while (--k != 0);
78 p -= 2 * sizeof(void*);
79 } while (p != 0);
80
81 const int32_t vmultiplier = params->scalar.multiplier;
82 const int64_t vproduct0x0 = (int64_t) vacc0x0 * (int64_t) vmultiplier;
83 const int64_t vproduct0x1 = (int64_t) vacc0x1 * (int64_t) vmultiplier;
84 const int64_t vproduct1x0 = (int64_t) vacc1x0 * (int64_t) vmultiplier;
85 const int64_t vproduct1x1 = (int64_t) vacc1x1 * (int64_t) vmultiplier;
86
87 const int64_t vq31rounding = INT64_C(0x40000000);
88 const int32_t vq31product0x0 = (int32_t) (uint32_t) ((uint64_t) (vproduct0x0 + vq31rounding) >> 31);
89 const int32_t vq31product0x1 = (int32_t) (uint32_t) ((uint64_t) (vproduct0x1 + vq31rounding) >> 31);
90 const int32_t vq31product1x0 = (int32_t) (uint32_t) ((uint64_t) (vproduct1x0 + vq31rounding) >> 31);
91 const int32_t vq31product1x1 = (int32_t) (uint32_t) ((uint64_t) (vproduct1x1 + vq31rounding) >> 31);
92
93 const int32_t vremainder_mask = params->scalar.remainder_mask;
94 const int32_t vremainder0x0 = (vq31product0x0 & vremainder_mask) - (int32_t) (vq31product0x0 < 0);
95 const int32_t vremainder0x1 = (vq31product0x1 & vremainder_mask) - (int32_t) (vq31product0x1 < 0);
96 const int32_t vremainder1x0 = (vq31product1x0 & vremainder_mask) - (int32_t) (vq31product1x0 < 0);
97 const int32_t vremainder1x1 = (vq31product1x1 & vremainder_mask) - (int32_t) (vq31product1x1 < 0);
98
99 const uint32_t vshift = params->scalar.shift;
100 const int32_t vremainder_threshold = params->scalar.remainder_threshold;
101 int32_t vout0x0 = asr_s32(vq31product0x0, vshift) + (int32_t) (vremainder0x0 > vremainder_threshold);
102 int32_t vout0x1 = asr_s32(vq31product0x1, vshift) + (int32_t) (vremainder0x1 > vremainder_threshold);
103 int32_t vout1x0 = asr_s32(vq31product1x0, vshift) + (int32_t) (vremainder1x0 > vremainder_threshold);
104 int32_t vout1x1 = asr_s32(vq31product1x1, vshift) + (int32_t) (vremainder1x1 > vremainder_threshold);
105
106 const int32_t vout_min = params->scalar.output_min_less_zero_point;
107 vout0x0 = vout0x0 < vout_min ? vout_min : vout0x0;
108 vout0x1 = vout0x1 < vout_min ? vout_min : vout0x1;
109 vout1x0 = vout1x0 < vout_min ? vout_min : vout1x0;
110 vout1x1 = vout1x1 < vout_min ? vout_min : vout1x1;
111
112 const int32_t vout_max = params->scalar.output_max_less_zero_point;
113 vout0x0 = vout0x0 > vout_max ? vout_max : vout0x0;
114 vout0x1 = vout0x1 > vout_max ? vout_max : vout0x1;
115 vout1x0 = vout1x0 > vout_max ? vout_max : vout1x0;
116 vout1x1 = vout1x1 > vout_max ? vout_max : vout1x1;
117
118 const int32_t voutput_zero_point = params->scalar.output_zero_point;
119 vout0x0 += voutput_zero_point;
120 vout0x1 += voutput_zero_point;
121 vout1x0 += voutput_zero_point;
122 vout1x1 += voutput_zero_point;
123
124 if XNN_LIKELY(nc >= 2) {
125 c1[0] = (uint8_t) vout1x0;
126 c1[1] = (uint8_t) vout1x1;
127 c0[0] = (uint8_t) vout0x0;
128 c0[1] = (uint8_t) vout0x1;
129
130 c1 += cn_stride;
131 c0 += cn_stride;
132
133 a = (const uint8_t**restrict) ((uintptr_t) a - ks);
134
135 nc -= 2;
136 } else {
137 c1[0] = (uint8_t) vout1x0;
138 c0[0] = (uint8_t) vout0x0;
139
140 nc = 0;
141 }
142 } while (nc != 0);
143 }
144