• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 #include <math.h>
8 #include <stdbool.h>
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <string.h>
13 
14 #include <xnnpack.h>
15 #include <xnnpack/allocator.h>
16 #include <xnnpack/common.h>
17 #include <xnnpack/compute.h>
18 #include <xnnpack/indirection.h>
19 #include <xnnpack/log.h>
20 #include <xnnpack/math.h>
21 #include <xnnpack/operator.h>
22 #include <xnnpack/pack.h>
23 #include <xnnpack/params-init.h>
24 #include <xnnpack/params.h>
25 
26 
compute_output_dimension(size_t padded_input_dimension,size_t kernel_dimension,size_t dilation_dimension,size_t subsampling_dimension)27 static inline size_t compute_output_dimension(
28     size_t padded_input_dimension,
29     size_t kernel_dimension,
30     size_t dilation_dimension,
31     size_t subsampling_dimension)
32 {
33   const size_t effective_kernel_dimension = (kernel_dimension - 1) * dilation_dimension + 1;
34   return doz(padded_input_dimension, effective_kernel_dimension) / subsampling_dimension + 1;
35 }
36 
xnn_create_convolution2d_nchw_f32(uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t kernel_height,uint32_t kernel_width,uint32_t subsampling_height,uint32_t subsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,size_t input_channel_stride,size_t output_channel_stride,const float * kernel,const float * bias,float output_min,float output_max,uint32_t flags,xnn_operator_t * convolution_op_out)37 enum xnn_status xnn_create_convolution2d_nchw_f32(
38     uint32_t input_padding_top,
39     uint32_t input_padding_right,
40     uint32_t input_padding_bottom,
41     uint32_t input_padding_left,
42     uint32_t kernel_height,
43     uint32_t kernel_width,
44     uint32_t subsampling_height,
45     uint32_t subsampling_width,
46     uint32_t dilation_height,
47     uint32_t dilation_width,
48     uint32_t groups,
49     size_t group_input_channels,
50     size_t group_output_channels,
51     size_t input_channel_stride,
52     size_t output_channel_stride,
53     const float* kernel,
54     const float* bias,
55     float output_min,
56     float output_max,
57     uint32_t flags,
58     xnn_operator_t* convolution_op_out)
59 {
60   xnn_operator_t convolution_op = NULL;
61   enum xnn_status status = xnn_status_uninitialized;
62 
63   if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
64     xnn_log_error("failed to create %s operator: XNNPACK is not initialized",
65       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
66     goto error;
67   }
68 
69   status = xnn_status_invalid_parameter;
70 
71   if (kernel_width == 0 || kernel_height == 0) {
72     xnn_log_error(
73       "failed to create %s operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
74       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), kernel_width, kernel_height);
75     goto error;
76   }
77 
78   if (subsampling_width == 0 || subsampling_height == 0) {
79     xnn_log_error(
80       "failed to create %s operator with %" PRIu32 "x%" PRIu32 " subsampling: subsampling dimensions must be non-zero",
81       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), subsampling_width, subsampling_height);
82     goto error;
83   }
84 
85   if (dilation_width == 0 || dilation_height == 0) {
86     xnn_log_error(
87       "failed to create %s operator with %" PRIu32 "x%" PRIu32 " dilation: dilation dimensions must be non-zero",
88       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), dilation_width, dilation_height);
89     goto error;
90   }
91 
92   if (groups == 0) {
93     xnn_log_error(
94       "failed to create %s operator with %" PRIu32 " groups: number of groups must be non-zero",
95       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), groups);
96     goto error;
97   }
98 
99   if (group_input_channels == 0) {
100     xnn_log_error(
101       "failed to create %s operator with %zu input channels per group: number of channels must be non-zero",
102       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), group_input_channels);
103     goto error;
104   }
105 
106   if (group_output_channels == 0) {
107     xnn_log_error(
108       "failed to create %s operator with %zu output channels per group: number of channels must be non-zero",
109       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), group_output_channels);
110     goto error;
111   }
112 
113   const size_t input_channels = groups * group_input_channels;
114   if (input_channel_stride < input_channels) {
115     xnn_log_error(
116       "failed to create %s operator with input channel stride of %zu: "
117       "stride must be at least as large as the number of input channels (%" PRIu32 "x%zu)",
118       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32),
119       input_channel_stride, groups, group_input_channels);
120     goto error;
121   }
122 
123   const size_t output_channels = groups * group_output_channels;
124   if (output_channel_stride < output_channels) {
125     xnn_log_error(
126       "failed to create %s operator with output channel stride of %zu: "
127       "stride must be at least as large as the number of output channels (%" PRIu32 "x%zu)",
128       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32),
129       output_channel_stride, groups, group_output_channels);
130     goto error;
131   }
132 
133   if (isnan(output_min)) {
134     xnn_log_error(
135       "failed to create %s operator with NaN output lower bound: lower bound must be non-NaN",
136       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
137     goto error;
138   }
139 
140   if (isnan(output_max)) {
141     xnn_log_error(
142       "failed to create %s operator with NaN output upper bound: upper bound must be non-NaN",
143       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
144     goto error;
145   }
146 
147   if (output_min >= output_max) {
148     xnn_log_error(
149       "failed to create %s operator with [%.7g, %.7g] output range: lower bound must be below upper bound",
150       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), output_min, output_max);
151     goto error;
152   }
153 
154   if ((flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) != 0 && group_input_channels != 1) {
155     xnn_log_error(
156       "failed to create depthwise %s operator with %zu input channels per group: "
157       "depthwise convolution must have exactly 1 input channel per group",
158       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), group_input_channels);
159     goto error;
160   }
161 
162   status = xnn_status_unsupported_parameter;
163 
164   enum xnn_ukernel_type ukernel_type;
165   struct dwconv2d_chw_parameters* dwconv2d_parameters = NULL;
166   // Supported cases:
167   // + 1x1 convolution (no groups)
168   // + 3x3 stride-2 with 3 input channels and NHWC input layout
169   // + 3x3 stride-2 depthwise convolution with horizontal padding 1 & no vertical padding
170   // + 3x3 stride-1 depthwise convolution with horizontal padding 1 & no vertical padding
171   // + 5x5 stride-2 depthwise convolution with horizontal padding 2 & no vertical padding
172   // + 5x5 stride-1 depthwise convolution with horizontal padding 2 & no vertical padding
173   const bool any_padding = (input_padding_left | input_padding_top | input_padding_right | input_padding_bottom) != 0;
174   const bool is_1x1 = kernel_width == 1 && kernel_height == 1 && subsampling_height == 1 && subsampling_width == 1;
175   const bool is_3x3 = kernel_width == 3 && kernel_height == 3 && dilation_height == 1 && dilation_width == 1;
176   const bool is_5x5 = kernel_width == 5 && kernel_height == 5 && dilation_height == 1 && dilation_width == 1;
177   const bool nhwc_input = (flags & XNN_FLAG_INPUT_NHWC) != 0;
178   if (is_1x1 && !any_padding && !nhwc_input && groups == 1) {
179     ukernel_type = xnn_ukernel_type_spmm;
180   } else if (is_3x3 && subsampling_height == 2 && subsampling_width == 2 &&
181     input_padding_top == 1 && input_padding_left == 1 && input_padding_bottom == 1 && input_padding_right == 1 &&
182     nhwc_input && groups == 1)
183   {
184     ukernel_type = xnn_ukernel_type_conv2d_hwc2chw;
185   } else if (is_3x3 && subsampling_height == 1 && subsampling_width == 1 &&
186     input_padding_top == 1 && input_padding_left == 1 && input_padding_bottom == 1 && input_padding_right == 1 &&
187     !nhwc_input && group_input_channels == 1 && group_output_channels == 1)
188   {
189     ukernel_type = xnn_ukernel_type_dwconv;
190     dwconv2d_parameters = &xnn_params.f32.dwconv2d_chw_3x3;
191   } else if (is_3x3 && subsampling_height == 2 && subsampling_width == 2 &&
192     (input_padding_top == 0 || input_padding_top == 1) && input_padding_left == 1 && input_padding_bottom == 1 && input_padding_right == 1 &&
193     !nhwc_input && group_input_channels == 1 && group_output_channels == 1)
194   {
195     ukernel_type = xnn_ukernel_type_dwconv;
196     dwconv2d_parameters = &xnn_params.f32.dwconv2d_chw_3x3s2;
197   } else if (is_5x5 && subsampling_height == 1 && subsampling_width == 1 &&
198     input_padding_top == 2 && input_padding_left == 2 && input_padding_bottom == 2 && input_padding_right == 2 &&
199     !nhwc_input && group_input_channels == 1 && group_output_channels == 1)
200   {
201     ukernel_type = xnn_ukernel_type_dwconv;
202     dwconv2d_parameters = &xnn_params.f32.dwconv2d_chw_5x5;
203   } else if (is_5x5 && subsampling_height == 2 && subsampling_width == 2 &&
204     (input_padding_top == 1 || input_padding_top == 2) && input_padding_left == 2 && input_padding_bottom == 2 && input_padding_right == 2 &&
205     !nhwc_input && group_input_channels == 1 && group_output_channels == 1)
206   {
207     ukernel_type = xnn_ukernel_type_dwconv;
208     dwconv2d_parameters = &xnn_params.f32.dwconv2d_chw_5x5s2;
209   } else {
210     xnn_log_error(
211       "failed to create %s operator with %" PRIu32 "x%" PRIu32 " kernel, %"PRIu32 "x%" PRIu32 " subsampling, %"PRIu32 "x%" PRIu32 " dilation"
212       ", %" PRIu32 "+%" PRIu32 "x%" PRIu32 "+%" PRIu32" padding, %" PRIu32 "x%zu input channels, and %" PRIu32 "x%zu output channels: "
213       "only selected convolution parameters are supported",
214       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32),
215       kernel_width, kernel_height, subsampling_width, subsampling_height, dilation_width, dilation_height,
216       input_padding_top, input_padding_left, input_padding_bottom, input_padding_right,
217       groups, group_input_channels, groups, group_output_channels);
218     goto error;
219   }
220 
221   status = xnn_status_out_of_memory;
222 
223   convolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
224   if (convolution_op == NULL) {
225     xnn_log_error(
226       "failed to allocate %zu bytes for %s operator descriptor",
227       sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
228     goto error;
229   }
230 
231   switch (ukernel_type) {
232     case xnn_ukernel_type_spmm:
233     {
234       assert(kernel_height == 1);
235       assert(kernel_width == 1);
236       assert(groups == 1);
237 
238       size_t num_nonzeroes = 0;
239       size_t num_nonzero_blocks2 = 0;
240       size_t num_nonzero_blocks4 = 0;
241       for (size_t oc = 0; oc < round_down_po2(group_output_channels, 4); oc += 4) {
242         for (size_t ic = 0; ic < group_input_channels; ic++) {
243           const size_t row0_nonzero = (size_t) (kernel[oc * group_input_channels + ic] != 0.0f);
244           const size_t row1_nonzero = (size_t) (kernel[(oc + 1) * group_input_channels + ic] != 0.0f);
245           const size_t row2_nonzero = (size_t) (kernel[(oc + 2) * group_input_channels + ic] != 0.0f);
246           const size_t row3_nonzero = (size_t) (kernel[(oc + 3) * group_input_channels + ic] != 0.0f);
247           num_nonzeroes += row0_nonzero + row1_nonzero + row2_nonzero + row3_nonzero;
248           num_nonzero_blocks2 += (row0_nonzero | row1_nonzero) + (row2_nonzero | row3_nonzero);
249           num_nonzero_blocks4 += (row0_nonzero | row1_nonzero | row2_nonzero | row3_nonzero);
250         }
251       }
252       const size_t num_block4_nonzeroes = num_nonzeroes;
253       for (size_t oc = round_down_po2(group_output_channels, 4); oc < round_down_po2(group_output_channels, 2); oc += 2) {
254         for (size_t ic = 0; ic < group_input_channels; ic++) {
255           const size_t row0_nonzero = (size_t) (kernel[oc * group_input_channels + ic] != 0.0f);
256           const size_t row1_nonzero = (size_t) (kernel[(oc + 1) * group_input_channels + ic] != 0.0f);
257           num_nonzeroes += row0_nonzero + row1_nonzero;
258           num_nonzero_blocks2 += (row0_nonzero | row1_nonzero);
259         }
260       }
261       const size_t num_block2_nonzeroes = num_nonzeroes;
262       for (size_t oc = round_down_po2(group_output_channels, 2); oc < group_output_channels; oc++) {
263         for (size_t ic = 0; ic < group_input_channels; ic++) {
264           num_nonzeroes += (size_t) (kernel[oc * group_input_channels + ic] != 0.0f);
265         }
266       }
267       size_t output_channels_block_size = 1;
268       size_t num_output_channel_blocks = group_output_channels;
269       size_t num_nonzero_values = num_nonzeroes;
270       size_t num_nonzero_blocks = num_nonzeroes;
271       const struct spmm_parameters* spmm_parameters = &xnn_params.f32.spmm;
272       if (num_block4_nonzeroes * 5 >= num_nonzero_blocks4 * 18 && xnn_params.f32.spmm4.ukernel != NULL) {
273         // 4-channel blocks have 90%+ non-zeroes
274 
275         output_channels_block_size = 4;
276         num_output_channel_blocks = num_output_channel_blocks / 4 + num_output_channel_blocks % 4;
277         spmm_parameters = &xnn_params.f32.spmm4;
278         // Non-zeroes which don't fit into whole 4-channel blocks, processed one-by-one
279         const size_t num_remaining_nonzeroes = num_nonzeroes - num_block4_nonzeroes;
280         num_nonzero_values = num_nonzero_blocks4 * 4 + num_remaining_nonzeroes;
281         num_nonzero_blocks = num_nonzero_blocks4 + num_remaining_nonzeroes;
282       } else if (num_block2_nonzeroes * 5 >= num_nonzero_blocks2 * 9 && xnn_params.f32.spmm2.ukernel != NULL) {
283         // 2-channel blocks have 90%+ non-zeroes
284 
285         output_channels_block_size = 2;
286         num_output_channel_blocks = num_output_channel_blocks / 2 + num_output_channel_blocks % 2;
287         spmm_parameters = &xnn_params.f32.spmm2;
288         // Non-zeroes which don't fit into whole 2-channel blocks, processed one-by-one
289         const size_t num_remaining_nonzeroes = num_nonzeroes - num_block2_nonzeroes;
290         num_nonzero_values = num_nonzero_blocks2 * 2 + num_remaining_nonzeroes;
291         num_nonzero_blocks = num_nonzero_blocks2 + num_remaining_nonzeroes;
292       }
293 
294       // Sparse representation of weights consists of four components:
295       // 1. An array of float values storing non-zero kernel elements, and all (group_output_channels) bias elements.
296       //    All elements within non-zero block are assumed to be non-zero.
297       // 2. An array of int32_t values storing increment for input pointer after each processed tile. This array is
298       //    derived from scaled difference in array 2 using parameters to setup function.
299       // 3. An array of uint32_t values storing the number of non-zero kernel elements per each output channel.
300       // 4. An array of int32_t values storing scaled [by sizeof(input element)] difference between input channels
301       //    corresponding to successive non-zero blocks.
302       const size_t packed_weights_size = num_output_channel_blocks * sizeof(uint32_t) +
303         (num_nonzero_blocks * 2) * sizeof(int32_t) + (num_nonzero_values + group_output_channels) * sizeof(float);
304 
305       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
306       if (convolution_op->packed_weights == NULL) {
307         xnn_log_error(
308           "failed to allocate %zu bytes for %s operator packed weights",
309           packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
310         goto error;
311       }
312       convolution_op->num_nonzero_values = num_nonzero_values;
313       convolution_op->num_nonzero_blocks = num_nonzero_blocks;
314       convolution_op->num_output_channel_blocks = num_output_channel_blocks;
315 
316       float* nonzero_values = convolution_op->packed_weights;
317       int32_t* input_increments = (int32_t*) (nonzero_values + num_nonzero_values + group_output_channels);
318       uint32_t* output_channel_nonzeros = (uint32_t*) (input_increments + num_nonzero_blocks);
319       int32_t* input_channel_diffs = (int32_t*) (output_channel_nonzeros + num_output_channel_blocks);
320       memset(output_channel_nonzeros, 0, num_output_channel_blocks * sizeof(uint32_t));
321 
322       status = xnn_status_unsupported_parameter;
323 
324       size_t first_ic = 0, last_ic = 0;
325       bool first_nonzero = true;
326       for (size_t ocb = 0; ocb < round_down_po2(group_output_channels, output_channels_block_size); ocb += output_channels_block_size) {
327         if XNN_LIKELY(bias != NULL) {
328           for (size_t oco = 0; oco < output_channels_block_size; oco++) {
329             *nonzero_values++ = bias[ocb + oco];
330           }
331         } else {
332           for (size_t oco = 0; oco < output_channels_block_size; oco++) {
333             *nonzero_values++ = 0.0f;
334           }
335         }
336         for (size_t ic = 0; ic < group_input_channels; ic++) {
337           bool is_nonzero_block = false;
338           for (size_t oco = 0; oco < output_channels_block_size; oco++) {
339             is_nonzero_block |= (kernel[(ocb + oco) * group_input_channels + ic] != 0.0f);
340           }
341           if (is_nonzero_block) {
342             for (size_t oco = 0; oco < output_channels_block_size; oco++) {
343               *nonzero_values++ = kernel[(ocb + oco) * group_input_channels + ic];
344             }
345             if (first_nonzero) {
346               first_ic = ic;
347             } else {
348               const int64_t diff = (int64_t) ((uint64_t) ic - (uint64_t) last_ic) * (int64_t) sizeof(float);
349               if (diff != (int64_t) (int32_t) diff) {
350                 xnn_log_error("failed to convert kernel to sparse representation: "
351                   "scaled difference in input channels exceeds int32_t range");
352                 goto error;
353               }
354               *input_channel_diffs++ = (int32_t) diff;
355             }
356             first_nonzero = false;
357             last_ic = ic;
358             *output_channel_nonzeros += 1;
359           }
360         }
361         output_channel_nonzeros += 1;
362       }
363       for (size_t oc = round_down_po2(group_output_channels, output_channels_block_size); oc < group_output_channels; oc++) {
364         if XNN_LIKELY(bias != NULL) {
365           *nonzero_values++ = bias[oc];
366         } else {
367           *nonzero_values++ = 0.0f;
368         }
369         for (size_t ic = 0; ic < group_input_channels; ic++) {
370           const float weight = kernel[oc * group_input_channels + ic];
371           if (weight != 0.0f) {
372             *nonzero_values++ = weight;
373             if (first_nonzero) {
374               first_ic = ic;
375             } else {
376               const int64_t diff = (int64_t) ((uint64_t) ic - (uint64_t) last_ic) * (int64_t) sizeof(float);
377               if (diff != (int64_t) (int32_t) diff) {
378                 xnn_log_error("failed to convert kernel to sparse representation: "
379                   "scaled difference in input channels exceeds int32_t range");
380                 goto error;
381               }
382               *input_channel_diffs++ = (int32_t) diff;
383             }
384             first_nonzero = false;
385             last_ic = ic;
386             *output_channel_nonzeros += 1;
387           }
388         }
389         output_channel_nonzeros += 1;
390       }
391       // If there are any non-zero elements, we have to return to the initial input channel.
392       if (!first_nonzero) {
393         const int64_t diff = (int64_t) ((uint64_t) first_ic - (uint64_t) last_ic) * (int64_t) sizeof(float);
394         if (diff != (int64_t) (int32_t) diff) {
395           xnn_log_error("failed to convert kernel to sparse representation: "
396             "scaled difference in input channels exceeds int32_t range");
397           goto error;
398         }
399         *input_channel_diffs++ = (int32_t) diff;
400       }
401       convolution_op->first_input_channel = first_ic;
402 
403       convolution_op->ukernel.spmm = (struct xnn_ukernel_spmm) {
404         .function = spmm_parameters->ukernel,
405         .mr = spmm_parameters->mr,
406       };
407 
408       break;
409     }
410     case xnn_ukernel_type_conv2d_hwc2chw:
411     {
412       assert(groups == 1);
413 
414       const size_t packed_group_output_channels =
415         round_up(group_output_channels, xnn_params.f32.conv_hwc2chw_3x3c3s2.output_channel_tile);
416       const size_t packed_weights_size = groups * packed_group_output_channels *
417         (group_input_channels * kernel_height * kernel_width + 1 /* bias */) * sizeof(float);
418       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
419       if (convolution_op->packed_weights == NULL) {
420         xnn_log_error(
421           "failed to allocate %zu bytes for %s operator packed weights",
422           packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
423         goto error;
424       }
425 
426       xnn_pack_f32_dconv_oki_w(
427         group_output_channels,
428         group_input_channels,
429         xnn_params.f32.conv_hwc2chw_3x3c3s2.output_channel_tile,
430         kernel_height, kernel_width,
431         kernel, bias, convolution_op->packed_weights, NULL);
432 
433       convolution_op->ukernel.conv2d = (struct xnn_ukernel_conv2d) {
434         .hwc2chw_function = xnn_params.f32.conv_hwc2chw_3x3c3s2.ukernel_with_symm_padding,
435         .output_height_tile = xnn_params.f32.conv_hwc2chw_3x3c3s2.output_height_tile,
436         .output_channel_tile = xnn_params.f32.conv_hwc2chw_3x3c3s2.output_channel_tile,
437       };
438 
439       break;
440     }
441     case xnn_ukernel_type_dwconv:
442     {
443       assert(dwconv2d_parameters != NULL);
444       assert(group_input_channels == 1);
445       assert(group_output_channels == 1);
446 
447       const size_t packed_weights_size = groups * (kernel_height * kernel_width + 1 /* bias */) * sizeof(float);
448       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
449       if (convolution_op->packed_weights == NULL) {
450         xnn_log_error(
451           "failed to allocate %zu bytes for %s operator packed weights",
452           packed_weights_size, xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
453         goto error;
454       }
455 
456       if (flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) {
457         xnn_pack_f32_chw_dwconv_hwg_w(
458           kernel_height * kernel_width, groups,
459           kernel, bias, convolution_op->packed_weights, NULL);
460       } else {
461         xnn_pack_f32_chw_dwconv_ghw_w(
462           kernel_height * kernel_width, groups,
463           kernel, bias, convolution_op->packed_weights, NULL);
464       }
465 
466       convolution_op->ukernel.dwconv2d = (struct xnn_ukernel_dwconv2d) {
467         .chw_function = dwconv2d_parameters->ukernel,
468         .output_width_tile = dwconv2d_parameters->output_width_tile,
469       };
470 
471       break;
472     }
473     default:
474       XNN_UNREACHABLE;
475   }
476 
477   convolution_op->padding_top = input_padding_top;
478   convolution_op->padding_right = input_padding_right;
479   convolution_op->padding_bottom = input_padding_bottom;
480   convolution_op->padding_left = input_padding_left;
481 
482   convolution_op->kernel_height = kernel_height;
483   convolution_op->kernel_width = kernel_width;
484   convolution_op->stride_height = subsampling_height;
485   convolution_op->stride_width = subsampling_width;
486   convolution_op->dilation_height = dilation_height;
487   convolution_op->dilation_width = dilation_width;
488   convolution_op->groups = groups;
489   convolution_op->group_input_channels = group_input_channels;
490   convolution_op->group_output_channels = group_output_channels;
491   convolution_op->input_pixel_stride = input_channel_stride;
492   convolution_op->output_pixel_stride = output_channel_stride;
493 
494   if (ukernel_type == xnn_ukernel_type_dwconv) {
495     convolution_op->params.f32_chw = xnn_init_f32_chw_params(0, output_min, output_max);
496   } else {
497     convolution_op->params.f32_minmax = xnn_init_f32_minmax_params(output_min, output_max);
498   }
499 
500   convolution_op->type = xnn_operator_type_convolution_nchw_f32;
501   convolution_op->ukernel.type = ukernel_type;
502 
503   convolution_op->state = xnn_run_state_invalid;
504 
505   *convolution_op_out = convolution_op;
506   return xnn_status_success;
507 
508 error:
509   xnn_delete_operator(convolution_op);
510   return status;
511 }
512 
setup_convolution2d_nchw(xnn_operator_t convolution_op,size_t batch_size,size_t input_height,size_t input_width,const void * input,void * output,uint32_t log2_input_element_size,uint32_t log2_filter_element_size,uint32_t bias_element_size,uint32_t log2_output_element_size,const void * params,const void * chw_params,size_t num_threads)513 static enum xnn_status setup_convolution2d_nchw(
514   xnn_operator_t convolution_op,
515   size_t batch_size,
516   size_t input_height,
517   size_t input_width,
518   const void* input,
519   void* output,
520   uint32_t log2_input_element_size,
521   uint32_t log2_filter_element_size,
522   uint32_t bias_element_size,
523   uint32_t log2_output_element_size,
524   const void* params,
525   const void* chw_params,
526   size_t num_threads)
527 {
528   convolution_op->state = xnn_run_state_invalid;
529 
530   if ((xnn_params.init_flags & XNN_INIT_FLAG_XNNPACK) == 0) {
531     xnn_log_error("failed to setup %s operator: XNNPACK is not initialized",
532       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
533     return xnn_status_uninitialized;
534   }
535 
536   if (input_width == 0 || input_height == 0) {
537     xnn_log_error(
538       "failed to setup %s operator with %zux%zu input: input dimensions must be non-zero",
539       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32), input_width, input_height);
540     return xnn_status_invalid_parameter;
541   }
542 
543   if (batch_size == 0) {
544     convolution_op->state = xnn_run_state_skip;
545     return xnn_status_success;
546   }
547 
548   convolution_op->batch_size = batch_size;
549   convolution_op->input_height = input_height;
550   convolution_op->input_width = input_width;
551   convolution_op->input = input;
552   convolution_op->output = output;
553 
554   const size_t output_height = compute_output_dimension(
555       convolution_op->padding_top + input_height + convolution_op->padding_bottom,
556       convolution_op->kernel_height,
557       convolution_op->dilation_height,
558       convolution_op->stride_height);
559   const size_t output_width = compute_output_dimension(
560       convolution_op->padding_left + input_width + convolution_op->padding_right,
561       convolution_op->kernel_width,
562       convolution_op->dilation_width,
563       convolution_op->stride_width);
564 
565   const size_t input_batch_stride = (input_height * input_width * convolution_op->input_pixel_stride) << log2_input_element_size;
566   const size_t output_batch_stride = (output_height * output_width * convolution_op->output_pixel_stride) << log2_output_element_size;
567   switch (convolution_op->ukernel.type) {
568     case xnn_ukernel_type_spmm:
569     {
570       const size_t num_nonzero_values = convolution_op->num_nonzero_values;
571       const size_t num_nonzero_blocks = convolution_op->num_nonzero_blocks;
572       const size_t num_output_channel_blocks = convolution_op->num_output_channel_blocks;
573 
574       convolution_op->num_nonzero_values = num_nonzero_values;
575       convolution_op->num_nonzero_blocks = num_nonzero_blocks;
576       convolution_op->num_output_channel_blocks = num_output_channel_blocks;
577 
578       float* nonzero_values = convolution_op->packed_weights;
579       int32_t* input_increments = (int32_t*) (nonzero_values + num_nonzero_values + convolution_op->group_output_channels);
580       uint32_t* output_channel_nonzeros = (uint32_t*) (input_increments + num_nonzero_blocks);
581       int32_t* input_channel_diffs = (int32_t*) (output_channel_nonzeros + num_output_channel_blocks);
582 
583       const size_t input_size = input_height * input_width;
584       for (size_t i = 0; i < num_nonzero_blocks; i++) {
585         const int32_t diff = input_channel_diffs[i];
586         const int64_t increment = (int64_t) diff * input_size;
587         if ((int64_t) (int32_t) increment != increment) {
588           xnn_log_error(
589             "failed to setup %s operator with sparse kernel representation: input increment exceeds int32_t range",
590             xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
591           return xnn_status_unsupported_parameter;
592         }
593         input_increments[i] = (int32_t) increment;
594       }
595 
596       convolution_op->context.spmm = (struct spmm_context) {
597           .n = convolution_op->group_output_channels,
598           .scaled_m = input_size * sizeof(float),
599           .input = (const void*) ((uintptr_t) input + (convolution_op->first_input_channel * input_size * sizeof(float))),
600           .nonzero_weights = nonzero_values,
601           .input_increments = input_increments,
602           .output_channel_nonzeros = output_channel_nonzeros,
603           .output = output,
604           .batched_input_stride = input_batch_stride,
605           .batched_output_stride = output_batch_stride,
606           .ukernel = convolution_op->ukernel.spmm.function,
607       };
608       memcpy(&convolution_op->context.spmm.params, params, sizeof(convolution_op->context.spmm.params));
609 
610       const size_t mr = convolution_op->ukernel.spmm.mr;
611       size_t mc = input_size;
612       if (num_threads > 1) {
613         const size_t target_tiles_per_thread = 5;
614         const size_t max_mc = divide_round_up(input_size, num_threads * target_tiles_per_thread);
615         if (max_mc < mc) {
616           mc = min(mc, divide_round_up(mc, max_mc * mr) * mr);
617         }
618       }
619       convolution_op->compute.type = xnn_parallelization_type_2d_tile_1d;
620       convolution_op->compute.task_2d_tile_1d = (pthreadpool_task_2d_tile_1d_t) xnn_compute_spmm;
621       convolution_op->compute.range[0] = batch_size;
622       convolution_op->compute.range[1] = input_size * sizeof(float);
623       convolution_op->compute.tile[0] = mc * sizeof(float);
624       convolution_op->state = xnn_run_state_ready;
625 
626       return xnn_status_success;
627     }
628     case xnn_ukernel_type_conv2d_hwc2chw:
629     {
630       const size_t zero_size = (input_width * convolution_op->group_input_channels << log2_input_element_size) + XNN_EXTRA_BYTES;
631       void* zero_buffer = xnn_reallocate_memory(convolution_op->zero_buffer, zero_size);
632       if (zero_buffer == NULL) {
633         xnn_log_error(
634           "failed to allocate %zu bytes for %s operator zero padding",
635           sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
636         return xnn_status_out_of_memory;
637       }
638       memset(zero_buffer, 0, zero_size);
639       convolution_op->zero_buffer = zero_buffer;
640 
641       convolution_op->context.conv2d = (struct conv2d_context) {
642         .input_height = input_height,
643         .input_width = input_width,
644         .input = input,
645         .input_batch_stride = input_batch_stride,
646         .zero = zero_buffer,
647         .packed_weights = convolution_op->packed_weights,
648         .output = output,
649         .output_batch_stride = output_batch_stride,
650         .input_padding_top = convolution_op->padding_top,
651         .output_channels = convolution_op->group_output_channels,
652         .output_height_stride = output_width << log2_output_element_size,
653         .output_channel_stride = output_height * output_width << log2_output_element_size,
654         .hwc2chw_ukernel = convolution_op->ukernel.conv2d.hwc2chw_function,
655       };
656       memcpy(&convolution_op->context.conv2d.params, params, sizeof(convolution_op->context.conv2d.params));
657 
658       size_t output_height_slice = output_height;
659       const size_t output_height_tile = convolution_op->ukernel.conv2d.output_height_tile;
660       if (num_threads > 1) {
661         const size_t target_tiles_per_thread = 5;
662         const size_t max_output_height_slice = divide_round_up(output_height, num_threads * target_tiles_per_thread);
663         if (max_output_height_slice < output_height_slice) {
664           output_height_slice = min(output_height_slice,
665             divide_round_up(output_height_slice, max_output_height_slice * output_height_tile) * output_height_tile);
666         }
667       }
668       convolution_op->compute.type = xnn_parallelization_type_2d_tile_1d;
669       convolution_op->compute.task_2d_tile_1d = (pthreadpool_task_2d_tile_1d_t) xnn_compute_conv2d_hwc2chw;
670       convolution_op->compute.range[0] = batch_size;
671       convolution_op->compute.range[1] = output_height;
672       convolution_op->compute.tile[0] = output_height_slice;
673       convolution_op->state = xnn_run_state_ready;
674 
675       return xnn_status_success;
676     }
677     case xnn_ukernel_type_dwconv:
678     {
679       const size_t zero_size = (input_width << log2_input_element_size) + 2 * XNN_EXTRA_BYTES;
680       void* zero_buffer = xnn_reallocate_memory(convolution_op->zero_buffer, zero_size);
681       if (zero_buffer == NULL) {
682         xnn_log_error(
683           "failed to allocate %zu bytes for %s operator zero padding",
684           sizeof(struct xnn_operator), xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32));
685         return xnn_status_out_of_memory;
686       }
687       memset(zero_buffer, 0, zero_size);
688       convolution_op->zero_buffer = zero_buffer;
689 
690       xnn_update_f32_chw_params((union xnn_f32_chw_params*) chw_params, (uint32_t) input_width);
691       convolution_op->context.dwconv2d = (struct dwconv2d_context) {
692         .input_height = input_height,
693         .input_width = input_width << log2_input_element_size,
694         .input = input,
695         .zero = zero_buffer,
696         .input_padding_top = convolution_op->padding_top,
697         .input_channel_stride = input_height * input_width << log2_input_element_size,
698         .input_batch_stride = input_batch_stride,
699         .packed_weights = convolution_op->packed_weights,
700         .weights_channel_stride = bias_element_size +
701           (convolution_op->kernel_height * convolution_op->kernel_width << log2_filter_element_size),
702         .output = output,
703         .output_channel_stride = output_height * output_width << log2_output_element_size,
704         .output_batch_stride = output_batch_stride,
705         .chw_ukernel = convolution_op->ukernel.dwconv2d.chw_function,
706       };
707       memcpy(&convolution_op->context.dwconv2d.params, chw_params, sizeof(convolution_op->context.dwconv2d.params));
708 
709       convolution_op->compute.type = xnn_parallelization_type_2d;
710       convolution_op->compute.task_2d = (pthreadpool_task_2d_t) xnn_compute_dwconv2d_chw;
711       convolution_op->compute.range[0] = batch_size;
712       convolution_op->compute.range[1] = convolution_op->groups;
713       convolution_op->state = xnn_run_state_ready;
714 
715       return xnn_status_success;
716     }
717     default:
718       XNN_UNREACHABLE;
719   }
720 }
721 
xnn_setup_convolution2d_nchw_f32(xnn_operator_t convolution_op,size_t batch_size,size_t input_height,size_t input_width,const float * input,float * output,pthreadpool_t threadpool)722 enum xnn_status xnn_setup_convolution2d_nchw_f32(
723     xnn_operator_t convolution_op,
724     size_t batch_size,
725     size_t input_height,
726     size_t input_width,
727     const float* input,
728     float* output,
729     pthreadpool_t threadpool)
730 {
731   if (convolution_op->type != xnn_operator_type_convolution_nchw_f32) {
732     xnn_log_error("failed to setup operator: operator type mismatch (expected %s, got %s)",
733       xnn_operator_type_to_string(xnn_operator_type_convolution_nchw_f32),
734       xnn_operator_type_to_string(convolution_op->type));
735     return xnn_status_invalid_parameter;
736   }
737 
738   return setup_convolution2d_nchw(
739     convolution_op,
740     batch_size, input_height, input_width,
741     input, output,
742     2 /* log2(sizeof(input element)) = log2(sizeof(float)) */,
743     2 /* log2(sizeof(filter element)) = log2(sizeof(float)) */,
744     sizeof(float) /* sizeof(bias element) */,
745     2 /* log2(sizeof(output element)) = log2(sizeof(float)) */,
746     &convolution_op->params.f32_minmax,
747     &convolution_op->params.f32_chw,
748     pthreadpool_get_threads_count(threadpool));
749 }
750