• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Auto-generated file. Do not edit!
2 //   Template: src/qs8-dwconv/unipass-scalar.c.in
3 //   Generator: tools/xngen
4 //
5 // Copyright 2021 Google LLC
6 //
7 // This source code is licensed under the BSD-style license found in the
8 // LICENSE file in the root directory of this source tree.
9 
10 #include <assert.h>
11 
12 #include <xnnpack/dwconv.h>
13 #include <xnnpack/math.h>
14 #include <xnnpack/unaligned.h>
15 
16 
xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__scalar_imagic(size_t channels,size_t output_width,const int8_t ** input,const void * weights,int8_t * output,size_t input_stride,size_t output_increment,size_t input_offset,const int8_t * zero,const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS (1)])17 void xnn_qc8_dwconv_minmax_fp32_ukernel_up2x3__scalar_imagic(
18     size_t channels,
19     size_t output_width,
20     const int8_t** input,
21     const void* weights,
22     int8_t* output,
23     size_t input_stride,
24     size_t output_increment,
25     size_t input_offset,
26     const int8_t* zero,
27     const union xnn_qc8_conv_minmax_params params[restrict XNN_MIN_ELEMENTS(1)])
28 {
29   assert(channels != 0);
30   assert(output_width != 0);
31 
32   const float vmagic_bias = params->fp32_scalar_imagic.magic_bias;
33   const int32_t vmagic_min = params->fp32_scalar_imagic.magic_min;
34   const int32_t vmagic_max = params->fp32_scalar_imagic.magic_max;
35   const int32_t vmagic_bias_less_zero_point = params->fp32_scalar_imagic.magic_bias_less_zero_point;
36   do {
37     const int8_t* i0 = input[0];
38     assert(i0 != NULL);
39     if XNN_UNPREDICTABLE(i0 != zero) {
40       i0 = (const int8_t*) ((uintptr_t) i0 + input_offset);
41     }
42     const int8_t* i1 = input[1];
43     assert(i1 != NULL);
44     if XNN_UNPREDICTABLE(i1 != zero) {
45       i1 = (const int8_t*) ((uintptr_t) i1 + input_offset);
46     }
47     const int8_t* i2 = input[2];
48     assert(i2 != NULL);
49     if XNN_UNPREDICTABLE(i2 != zero) {
50       i2 = (const int8_t*) ((uintptr_t) i2 + input_offset);
51     }
52     input = (const int8_t**) ((uintptr_t) input + input_stride);
53 
54     size_t c = channels;
55     const void* w = weights;
56     for (; c >= 2; c -= 2) {
57       int32_t vacc0 = unaligned_indexed_load_s32(w, 0);
58       int32_t vacc1 = unaligned_indexed_load_s32(w, 1);
59 
60 
61       const int32_t vi0x0 = (int32_t) i0[0];
62       const int32_t vi0x1 = (int32_t) i0[1];
63       i0 += 2;
64 
65       const int32_t vk0x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
66       const int32_t vk0x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[1];
67 
68       vacc0 += vi0x0 * vk0x0;
69       vacc1 += vi0x1 * vk0x1;
70 
71       const int32_t vi1x0 = (int32_t) i1[0];
72       const int32_t vi1x1 = (int32_t) i1[1];
73       i1 += 2;
74 
75       const int32_t vk1x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
76       const int32_t vk1x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[3];
77 
78       vacc0 += vi1x0 * vk1x0;
79       vacc1 += vi1x1 * vk1x1;
80 
81       const int32_t vi2x0 = (int32_t) i2[0];
82       const int32_t vi2x1 = (int32_t) i2[1];
83       i2 += 2;
84 
85       const int32_t vk2x0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
86       const int32_t vk2x1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[5];
87 
88       vacc0 += vi2x0 * vk2x0;
89       vacc1 += vi2x1 * vk2x1;
90 
91       w = (const void*) ((uintptr_t) w + 2 * sizeof(int32_t) + 6 * sizeof(int8_t));
92 
93       float vfpacc0 = (float) vacc0;
94       float vfpacc1 = (float) vacc1;
95 
96       const float vscale0 = unaligned_indexed_load_f32(w, 0);
97       const float vscale1 = unaligned_indexed_load_f32(w, 1);
98       w = (const void*) ((const float*) w + 2);
99 
100       vfpacc0 *= vscale0;
101       vfpacc1 *= vscale1;
102 
103       vfpacc0 += vmagic_bias;
104       vfpacc1 += vmagic_bias;
105 
106       int32_t vout0 = (int32_t) float_as_uint32(vfpacc0);
107       int32_t vout1 = (int32_t) float_as_uint32(vfpacc1);
108 
109       vout0 = math_max_s32(vout0, vmagic_min);
110       vout1 = math_max_s32(vout1, vmagic_min);
111 
112       vout0 = math_min_s32(vout0, vmagic_max);
113       vout1 = math_min_s32(vout1, vmagic_max);
114 
115       vout0 -= vmagic_bias_less_zero_point;
116       vout1 -= vmagic_bias_less_zero_point;
117 
118       output[0] = (int8_t) vout0;
119       output[1] = (int8_t) vout1;
120       output += 2;
121     }
122     if XNN_UNLIKELY(c != 0) {
123       int32_t vacc = unaligned_load_s32(w);
124 
125       const int32_t vi0 = (int32_t) *i0;
126       const int32_t vk0 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[0];
127       vacc += vi0 * vk0;
128       const int32_t vi1 = (int32_t) *i1;
129       const int32_t vk1 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[2];
130       vacc += vi1 * vk1;
131       const int32_t vi2 = (int32_t) *i2;
132       const int32_t vk2 = (int32_t) ((const int8_t*) ((uintptr_t) w + 2 * sizeof(int32_t)))[4];
133       vacc += vi2 * vk2;
134 
135       typedef XNN_UNALIGNED float unaligned_float;
136       const float vscale = *((const unaligned_float*) ((uintptr_t) w + 2 * sizeof(int32_t) + 6 * sizeof(int8_t)));
137       float vfpacc = (float) vacc * vscale;
138 
139       vfpacc += vmagic_bias;
140       int32_t vout = (int32_t) float_as_uint32(vfpacc);
141       vout = math_max_s32(vout, vmagic_min);
142       vout = math_min_s32(vout, vmagic_max);
143       vout -= vmagic_bias_less_zero_point;
144 
145       *output++ = (int8_t) vout;
146     }
147 
148     output = (int8_t*) ((uintptr_t) output + output_increment);
149   } while (--output_width != 0);
150 }
151