• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2019 Google LLC
2 //
3 // This source code is licensed under the BSD-style license found in the
4 // LICENSE file in the root directory of this source tree.
5 
6 #include <assert.h>
7 #include <math.h>
8 #include <stdbool.h>
9 #include <stddef.h>
10 #include <stdint.h>
11 #include <stdlib.h>
12 #include <string.h>
13 
14 #include <xnnpack.h>
15 #include <xnnpack/allocator.h>
16 #include <xnnpack/common.h>
17 #include <xnnpack/compute.h>
18 #include <xnnpack/indirection.h>
19 #include <xnnpack/log.h>
20 #include <xnnpack/math.h>
21 #include <xnnpack/operator.h>
22 #include <xnnpack/pack.h>
23 #include <xnnpack/params-init.h>
24 #include <xnnpack/params.h>
25 
26 
compute_output_dimension(size_t padded_input_dimension,size_t kernel_dimension,size_t dilation_dimension,size_t subsampling_dimension)27 static inline size_t compute_output_dimension(
28     size_t padded_input_dimension,
29     size_t kernel_dimension,
30     size_t dilation_dimension,
31     size_t subsampling_dimension)
32 {
33   const size_t effective_kernel_dimension = (kernel_dimension - 1) * dilation_dimension + 1;
34   return doz(padded_input_dimension, effective_kernel_dimension) / subsampling_dimension + 1;
35 }
36 
xnn_create_convolution2d_nchw_f32(uint32_t input_padding_top,uint32_t input_padding_right,uint32_t input_padding_bottom,uint32_t input_padding_left,uint32_t kernel_height,uint32_t kernel_width,uint32_t subsampling_height,uint32_t subsampling_width,uint32_t dilation_height,uint32_t dilation_width,uint32_t groups,size_t group_input_channels,size_t group_output_channels,const float * kernel,const float * bias,float output_min,float output_max,uint32_t flags,xnn_operator_t * convolution_op_out)37 enum xnn_status xnn_create_convolution2d_nchw_f32(
38     uint32_t input_padding_top,
39     uint32_t input_padding_right,
40     uint32_t input_padding_bottom,
41     uint32_t input_padding_left,
42     uint32_t kernel_height,
43     uint32_t kernel_width,
44     uint32_t subsampling_height,
45     uint32_t subsampling_width,
46     uint32_t dilation_height,
47     uint32_t dilation_width,
48     uint32_t groups,
49     size_t group_input_channels,
50     size_t group_output_channels,
51     const float* kernel,
52     const float* bias,
53     float output_min,
54     float output_max,
55     uint32_t flags,
56     xnn_operator_t* convolution_op_out)
57 {
58   xnn_operator_t convolution_op = NULL;
59   enum xnn_status status = xnn_status_uninitialized;
60 
61   if (!xnn_params.initialized) {
62     xnn_log_error("failed to create Convolution operator: XNNPACK is not initialized");
63     goto error;
64   }
65 
66   status = xnn_status_invalid_parameter;
67 
68   if (kernel_width == 0 || kernel_height == 0) {
69     xnn_log_error(
70       "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " kernel: kernel dimensions must be non-zero",
71       kernel_width, kernel_height);
72     goto error;
73   }
74 
75   if (subsampling_width == 0 || subsampling_height == 0) {
76     xnn_log_error(
77       "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " subsampling: "
78       "subsampling dimensions must be non-zero",
79       subsampling_width, subsampling_height);
80     goto error;
81   }
82 
83   if (dilation_width == 0 || dilation_height == 0) {
84     xnn_log_error(
85       "failed to create Convolution operator with %" PRIu32 "x%" PRIu32 " dilation: "
86       "dilation dimensions must be non-zero",
87       dilation_width, dilation_height);
88     goto error;
89   }
90 
91   if (groups == 0) {
92     xnn_log_error(
93       "failed to create Convolution operator with %" PRIu32 " groups: number of groups must be non-zero", groups);
94     goto error;
95   }
96 
97   if (group_input_channels == 0) {
98     xnn_log_error(
99       "failed to create Convolution operator with %zu input channels per group: "
100       "number of channels must be non-zero",
101       group_input_channels);
102     goto error;
103   }
104 
105   if (group_output_channels == 0) {
106     xnn_log_error(
107       "failed to create Convolution operator with %zu output channels per group: "
108       "number of channels must be non-zero",
109       group_output_channels);
110     goto error;
111   }
112 
113   if (isnan(output_min)) {
114     xnn_log_error(
115       "failed to create Convolution operator with NaN output lower bound: lower bound must be non-NaN");
116     goto error;
117   }
118 
119   if (isnan(output_max)) {
120     xnn_log_error(
121       "failed to create Convolution operator with NaN output upper bound: upper bound must be non-NaN");
122     goto error;
123   }
124 
125   if (output_min >= output_max) {
126     xnn_log_error(
127       "failed to create Convolution operator with [%.7g, %.7g] output range: "
128       "lower bound must be below upper bound",
129       output_min, output_max);
130     goto error;
131   }
132 
133   if ((flags & XNN_FLAG_DEPTHWISE_CONVOLUTION) != 0 && group_input_channels != 1) {
134     xnn_log_error(
135       "failed to create Depthwise Convolution operator with %zu input channels per group: "
136       "Depthwise Convolution must have exactly 1 input channel per group",
137       group_input_channels);
138     goto error;
139   }
140 
141   status = xnn_status_unsupported_parameter;
142 
143   enum xnn_ukernel_type ukernel_type;
144   struct spchw_dwconv_parameters* dwconv_parameters = NULL;
145   // Supported cases:
146   // + 1x1 convolution (no groups)
147   // + 3x3 stride-2 with 3 input channels and NHWC input layout
148   // + 3x3 stride-2 depthwise convolution with horizontal padding 1 & no vertical padding
149   // - 3x3 stride-1 depthwise convolution with horizontal padding 1 & no vertical padding
150   // - 5x5 stride-2 depthwise convolution with horizontal padding 2 & no vertical padding
151   // - 5x5 stride-1 depthwise convolution with horizontal padding 2 & no vertical padding
152   const bool any_padding = (input_padding_left | input_padding_top | input_padding_right | input_padding_bottom) != 0;
153   const bool is_1x1 = kernel_width == 1 && kernel_height == 1 && subsampling_height == 1 && subsampling_width == 1;
154   const bool is_3x3 = kernel_width == 3 && kernel_height == 3 && dilation_height == 1 && dilation_width == 1;
155   const bool is_5x5 = kernel_width == 5 && kernel_height == 5 && dilation_height == 1 && dilation_width == 1;
156   const bool nhwc_input = (flags & XNN_FLAG_INPUT_NHWC) != 0;
157   if (is_1x1 && !any_padding && !nhwc_input && groups == 1 && xnn_params.f32.spmm.ukernel != NULL) {
158     ukernel_type = xnn_ukernel_type_spmm;
159   } else if (is_3x3 && subsampling_height == 2 && subsampling_width == 2 &&
160     input_padding_top == 1 && input_padding_left == 1 && input_padding_bottom == 1 && input_padding_right == 1 &&
161     nhwc_input && groups == 1 && xnn_params.f32.hwc2spchw_dconv3x3c3s2.ukernel_with_symm_padding != NULL)
162   {
163     ukernel_type = xnn_ukernel_type_dconv2d_hwc2spchw;
164   } else if (is_3x3 && subsampling_height == 1 && subsampling_width == 1 &&
165     input_padding_top == 0 && input_padding_left == 1 && input_padding_bottom == 0 && input_padding_right == 1 &&
166     !nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.spchw_dwconv3x3.ukernel != NULL)
167   {
168     ukernel_type = xnn_ukernel_type_dwconv;
169     dwconv_parameters = &xnn_params.f32.spchw_dwconv3x3;
170   } else if (is_3x3 && subsampling_height == 2 && subsampling_width == 2 &&
171     input_padding_top == 0 && input_padding_left == 1 && input_padding_bottom == 0 && input_padding_right == 1 &&
172     !nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.spchw_dwconv3x3s2.ukernel != NULL)
173   {
174     ukernel_type = xnn_ukernel_type_dwconv;
175     dwconv_parameters = &xnn_params.f32.spchw_dwconv3x3s2;
176   } else if (is_5x5 && subsampling_height == 1 && subsampling_width == 1 &&
177     input_padding_top == 0 && input_padding_left == 2 && input_padding_bottom == 0 && input_padding_right == 2 &&
178     !nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.spchw_dwconv5x5.ukernel != NULL)
179   {
180     ukernel_type = xnn_ukernel_type_dwconv;
181     dwconv_parameters = &xnn_params.f32.spchw_dwconv5x5;
182   } else if (is_5x5 && subsampling_height == 2 && subsampling_width == 2 &&
183     input_padding_top == 0 && input_padding_left == 2 && input_padding_bottom == 0 && input_padding_right == 2 &&
184     !nhwc_input && group_input_channels == 1 && group_output_channels == 1 && xnn_params.f32.spchw_dwconv5x5s2.ukernel != NULL)
185   {
186     ukernel_type = xnn_ukernel_type_dwconv;
187     dwconv_parameters = &xnn_params.f32.spchw_dwconv5x5s2;
188   } else {
189     xnn_log_error(
190       "failed to create Convolution operator: only selected Convolution parameters are supported");
191     goto error;
192   }
193 
194   status = xnn_status_out_of_memory;
195 
196   convolution_op = xnn_allocate_zero_simd_memory(sizeof(struct xnn_operator));
197   if (convolution_op == NULL) {
198     xnn_log_error("failed to allocate %zu bytes for Convolution operator descriptor", sizeof(struct xnn_operator));
199     goto error;
200   }
201 
202   switch (ukernel_type) {
203     case xnn_ukernel_type_spmm:
204     {
205       assert(kernel_height == 1);
206       assert(kernel_width == 1);
207       assert(groups == 1);
208 
209       size_t num_nonzeroes = 0;
210       size_t num_nonzero_blocks2 = 0;
211       size_t num_nonzero_blocks4 = 0;
212       for (size_t oc = 0; oc < round_down_po2(group_output_channels, 4); oc += 4) {
213         for (size_t ic = 0; ic < group_input_channels; ic++) {
214           const size_t row0_nonzero = (size_t) (kernel[oc * group_input_channels + ic] != 0.0f);
215           const size_t row1_nonzero = (size_t) (kernel[(oc + 1) * group_input_channels + ic] != 0.0f);
216           const size_t row2_nonzero = (size_t) (kernel[(oc + 2) * group_input_channels + ic] != 0.0f);
217           const size_t row3_nonzero = (size_t) (kernel[(oc + 3) * group_input_channels + ic] != 0.0f);
218           num_nonzeroes += row0_nonzero + row1_nonzero + row2_nonzero + row3_nonzero;
219           num_nonzero_blocks2 += (row0_nonzero | row1_nonzero) + (row2_nonzero | row3_nonzero);
220           num_nonzero_blocks4 += (row0_nonzero | row1_nonzero | row2_nonzero | row3_nonzero);
221         }
222       }
223       const size_t num_block4_nonzeroes = num_nonzeroes;
224       for (size_t oc = round_down_po2(group_output_channels, 4); oc < round_down_po2(group_output_channels, 2); oc += 2) {
225         for (size_t ic = 0; ic < group_input_channels; ic++) {
226           const size_t row0_nonzero = (size_t) (kernel[oc * group_input_channels + ic] != 0.0f);
227           const size_t row1_nonzero = (size_t) (kernel[(oc + 1) * group_input_channels + ic] != 0.0f);
228           num_nonzeroes += row0_nonzero + row1_nonzero;
229           num_nonzero_blocks2 += (row0_nonzero | row1_nonzero);
230         }
231       }
232       const size_t num_block2_nonzeroes = num_nonzeroes;
233       for (size_t oc = round_down_po2(group_output_channels, 2); oc < group_output_channels; oc++) {
234         for (size_t ic = 0; ic < group_input_channels; ic++) {
235           num_nonzeroes += (size_t) (kernel[oc * group_input_channels + ic] != 0.0f);
236         }
237       }
238       size_t output_channels_block_size = 1;
239       size_t num_output_channel_blocks = group_output_channels;
240       size_t num_nonzero_values = num_nonzeroes;
241       size_t num_nonzero_blocks = num_nonzeroes;
242       const struct spmm_parameters* spmm_parameters = &xnn_params.f32.spmm;
243       if (num_block4_nonzeroes * 5 >= num_nonzero_blocks4 * 18 && xnn_params.f32.spmm4.ukernel != NULL) {
244         // 4-channel blocks have 90%+ non-zeroes
245 
246         output_channels_block_size = 4;
247         num_output_channel_blocks = num_output_channel_blocks / 4 + num_output_channel_blocks % 4;
248         spmm_parameters = &xnn_params.f32.spmm4;
249         // Non-zeroes which don't fit into whole 4-channel blocks, processed one-by-one
250         const size_t num_remaining_nonzeroes = num_nonzeroes - num_block4_nonzeroes;
251         num_nonzero_values = num_nonzero_blocks4 * 4 + num_remaining_nonzeroes;
252         num_nonzero_blocks = num_nonzero_blocks4 + num_remaining_nonzeroes;
253       } else if (num_block2_nonzeroes * 5 >= num_nonzero_blocks2 * 9 && xnn_params.f32.spmm2.ukernel != NULL) {
254         // 2-channel blocks have 90%+ non-zeroes
255 
256         output_channels_block_size = 2;
257         num_output_channel_blocks = num_output_channel_blocks / 2 + num_output_channel_blocks % 2;
258         spmm_parameters = &xnn_params.f32.spmm2;
259         // Non-zeroes which don't fit into whole 2-channel blocks, processed one-by-one
260         const size_t num_remaining_nonzeroes = num_nonzeroes - num_block2_nonzeroes;
261         num_nonzero_values = num_nonzero_blocks2 * 2 + num_remaining_nonzeroes;
262         num_nonzero_blocks = num_nonzero_blocks2 + num_remaining_nonzeroes;
263       }
264 
265       // Sparse representation of weights consists of four components:
266       // 1. An array of float values storing non-zero kernel elements, and all (group_output_channels) bias elements.
267       //    All elements within non-zero block are assumed to be non-zero.
268       // 2. An array of int32_t values storing increment for input pointer after each processed tile. This array is
269       //    derived from scaled difference in array 2 using parameters to setup function.
270       // 3. An array of uint32_t values storing the number of non-zero kernel elements per each output channel.
271       // 4. An array of int32_t values storing scaled [by sizeof(input element)] difference between input channels
272       //    corresponding to successive non-zero blocks.
273       const size_t packed_weights_size = num_output_channel_blocks * sizeof(uint32_t) +
274         (num_nonzero_blocks * 2) * sizeof(int32_t) + (num_nonzero_values + group_output_channels) * sizeof(float);
275 
276       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
277       if (convolution_op->packed_weights == NULL) {
278         xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
279         goto error;
280       }
281       convolution_op->num_nonzero_values = num_nonzero_values;
282       convolution_op->num_nonzero_blocks = num_nonzero_blocks;
283       convolution_op->num_output_channel_blocks = num_output_channel_blocks;
284 
285       float* nonzero_values = convolution_op->packed_weights;
286       int32_t* input_increments = (int32_t*) (nonzero_values + num_nonzero_values + group_output_channels);
287       uint32_t* output_channel_nonzeros = (uint32_t*) (input_increments + num_nonzero_blocks);
288       int32_t* input_channel_diffs = (int32_t*) (output_channel_nonzeros + num_output_channel_blocks);
289       memset(output_channel_nonzeros, 0, num_output_channel_blocks * sizeof(uint32_t));
290 
291       status = xnn_status_unsupported_parameter;
292 
293       size_t first_ic = 0, last_ic = 0;
294       bool first_nonzero = true;
295       for (size_t ocb = 0; ocb < round_down_po2(group_output_channels, output_channels_block_size); ocb += output_channels_block_size) {
296         if XNN_LIKELY(bias != NULL) {
297           for (size_t oco = 0; oco < output_channels_block_size; oco++) {
298             *nonzero_values++ = bias[ocb + oco];
299           }
300         } else {
301           for (size_t oco = 0; oco < output_channels_block_size; oco++) {
302             *nonzero_values++ = 0.0f;
303           }
304         }
305         for (size_t ic = 0; ic < group_input_channels; ic++) {
306           bool is_nonzero_block = false;
307           for (size_t oco = 0; oco < output_channels_block_size; oco++) {
308             is_nonzero_block |= (kernel[(ocb + oco) * group_input_channels + ic] != 0.0f);
309           }
310           if (is_nonzero_block) {
311             for (size_t oco = 0; oco < output_channels_block_size; oco++) {
312               *nonzero_values++ = kernel[(ocb + oco) * group_input_channels + ic];
313             }
314             if (first_nonzero) {
315               first_ic = ic;
316             } else {
317               const int64_t diff = (int64_t) ((uint64_t) ic - (uint64_t) last_ic) * (int64_t) sizeof(float);
318               if (diff != (int64_t) (int32_t) diff) {
319                 xnn_log_error("failed to convert kernel to sparse representation: "
320                   "scaled difference in input channels exceeds int32_t range");
321                 goto error;
322               }
323               *input_channel_diffs++ = (int32_t) diff;
324             }
325             first_nonzero = false;
326             last_ic = ic;
327             *output_channel_nonzeros += 1;
328           }
329         }
330         output_channel_nonzeros += 1;
331       }
332       for (size_t oc = round_down_po2(group_output_channels, output_channels_block_size); oc < group_output_channels; oc++) {
333         if XNN_LIKELY(bias != NULL) {
334           *nonzero_values++ = bias[oc];
335         } else {
336           *nonzero_values++ = 0.0f;
337         }
338         for (size_t ic = 0; ic < group_input_channels; ic++) {
339           const float weight = kernel[oc * group_input_channels + ic];
340           if (weight != 0.0f) {
341             *nonzero_values++ = weight;
342             if (first_nonzero) {
343               first_ic = ic;
344             } else {
345               const int64_t diff = (int64_t) ((uint64_t) ic - (uint64_t) last_ic) * (int64_t) sizeof(float);
346               if (diff != (int64_t) (int32_t) diff) {
347                 xnn_log_error("failed to convert kernel to sparse representation: "
348                   "scaled difference in input channels exceeds int32_t range");
349                 goto error;
350               }
351               *input_channel_diffs++ = (int32_t) diff;
352             }
353             first_nonzero = false;
354             last_ic = ic;
355             *output_channel_nonzeros += 1;
356           }
357         }
358         output_channel_nonzeros += 1;
359       }
360       // If there are any non-zero elements, we have to return to the initial input channel.
361       if (!first_nonzero) {
362         const int64_t diff = (int64_t) ((uint64_t) first_ic - (uint64_t) last_ic) * (int64_t) sizeof(float);
363         if (diff != (int64_t) (int32_t) diff) {
364           xnn_log_error("failed to convert kernel to sparse representation: "
365             "scaled difference in input channels exceeds int32_t range");
366           goto error;
367         }
368         *input_channel_diffs++ = (int32_t) diff;
369       }
370       convolution_op->first_input_channel = first_ic;
371 
372       convolution_op->ukernel.spmm = (struct xnn_ukernel_spmm) {
373         .function = spmm_parameters->ukernel,
374         .mr = spmm_parameters->mr,
375       };
376 
377       break;
378     }
379     case xnn_ukernel_type_dconv2d_hwc2spchw:
380     {
381       assert(groups == 1);
382 
383       const size_t packed_group_output_channels =
384         round_up(group_output_channels, xnn_params.f32.hwc2spchw_dconv3x3c3s2.output_channel_tile);
385       const size_t packed_weights_size = groups * packed_group_output_channels *
386         (group_input_channels * kernel_height * kernel_width + 1 /* bias */) * sizeof(float);
387       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
388       if (convolution_op->packed_weights == NULL) {
389         xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
390         goto error;
391       }
392 
393       xnn_pack_f32_dconv_oki_w(
394         group_output_channels,
395         group_input_channels,
396         xnn_params.f32.hwc2spchw_dconv3x3c3s2.output_channel_tile,
397         kernel_height, kernel_width,
398         kernel, bias, convolution_op->packed_weights);
399 
400       convolution_op->ukernel.dconv2d = (struct xnn_ukernel_dconv2d) {
401         .hwc2spchw_function = xnn_params.f32.hwc2spchw_dconv3x3c3s2.ukernel_with_symm_padding,
402         .output_height_tile = xnn_params.f32.hwc2spchw_dconv3x3c3s2.output_height_tile,
403         .output_channel_tile = xnn_params.f32.hwc2spchw_dconv3x3c3s2.output_channel_tile,
404       };
405 
406       break;
407     }
408     case xnn_ukernel_type_dwconv:
409     {
410       assert(dwconv_parameters != NULL);
411       assert(group_input_channels == 1);
412       assert(group_output_channels == 1);
413 
414       const size_t packed_weights_size = groups * (kernel_height * kernel_width + 1 /* bias */) * sizeof(float);
415       convolution_op->packed_weights = xnn_allocate_simd_memory(packed_weights_size);
416       if (convolution_op->packed_weights == NULL) {
417         xnn_log_error("failed to allocate %zu bytes for packed weights", packed_weights_size);
418         goto error;
419       }
420 
421       xnn_pack_f32_spchw_dwconv_ghw_w(
422         kernel_height * kernel_width, groups,
423         kernel, bias, convolution_op->packed_weights);
424 
425       convolution_op->ukernel.dwconv2d = (struct xnn_ukernel_dwconv2d) {
426         .spchw_function = dwconv_parameters->ukernel,
427         .input_width_tile = dwconv_parameters->input_width_tile,
428         .output_width_tile = dwconv_parameters->output_width_tile,
429       };
430 
431       break;
432     }
433     default:
434       XNN_UNREACHABLE;
435   }
436 
437   convolution_op->padding_top = input_padding_top;
438   convolution_op->padding_right = input_padding_right;
439   convolution_op->padding_bottom = input_padding_bottom;
440   convolution_op->padding_left = input_padding_left;
441 
442   convolution_op->kernel_height = kernel_height;
443   convolution_op->kernel_width = kernel_width;
444   convolution_op->stride_height = subsampling_height;
445   convolution_op->stride_width = subsampling_width;
446   convolution_op->dilation_height = dilation_height;
447   convolution_op->dilation_width = dilation_width;
448   convolution_op->groups = groups;
449   convolution_op->group_input_channels = group_input_channels;
450   convolution_op->group_output_channels = group_output_channels;
451 
452   if (ukernel_type == xnn_ukernel_type_dwconv) {
453     convolution_op->f32_spchw_params = xnn_init_f32_spchw_params(0, output_min, output_max);
454   } else {
455     convolution_op->f32_output_params = xnn_init_f32_output_params(output_min, output_max);
456   }
457 
458   convolution_op->type = xnn_operator_type_convolution_nchw_f32;
459   convolution_op->ukernel.type = ukernel_type;
460 
461   convolution_op->state = xnn_run_state_invalid;
462 
463   *convolution_op_out = convolution_op;
464   return xnn_status_success;
465 
466 error:
467   xnn_delete_operator(convolution_op);
468   return status;
469 }
470 
setup_convolution2d_nchw(xnn_operator_t convolution_op,size_t batch_size,size_t input_batch_stride,size_t output_batch_stride,size_t input_height,size_t input_width,const void * input,void * output,uint32_t log2_input_element_size,uint32_t log2_filter_element_size,uint32_t bias_element_size,uint32_t log2_output_element_size,const void * params,size_t num_threads)471 static enum xnn_status setup_convolution2d_nchw(
472   xnn_operator_t convolution_op,
473   size_t batch_size,
474   size_t input_batch_stride,
475   size_t output_batch_stride,
476   size_t input_height,
477   size_t input_width,
478   const void* input,
479   void* output,
480   uint32_t log2_input_element_size,
481   uint32_t log2_filter_element_size,
482   uint32_t bias_element_size,
483   uint32_t log2_output_element_size,
484   const void* params,
485   size_t num_threads)
486 {
487   convolution_op->state = xnn_run_state_invalid;
488 
489   if (!xnn_params.initialized) {
490     xnn_log_error("failed to setup Convolution operator: XNNPACK is not initialized");
491     return xnn_status_uninitialized;
492   }
493 
494   if (input_width == 0 || input_height == 0) {
495     xnn_log_error(
496       "failed to setup Convolution operator with %zux%zu input: input dimensions must be non-zero",
497       input_width, input_height);
498     return xnn_status_invalid_parameter;
499   }
500 
501   const uint32_t groups = convolution_op->groups;
502   const size_t group_input_channels = convolution_op->group_input_channels;
503   const size_t input_neurons = groups * group_input_channels * input_height * input_width;
504   if (input_batch_stride < input_neurons) {
505     xnn_log_error(
506       "failed to setup Convolution operator with input batch stride of %zu: "
507       "stride must be at least as large as the number of input neurons (%" PRIu32 "x%zux%zux%zu)",
508       input_batch_stride, groups, group_input_channels, input_height, input_width);
509     return xnn_status_invalid_parameter;
510   }
511 
512   const size_t output_height = compute_output_dimension(
513       convolution_op->padding_top + input_height + convolution_op->padding_bottom,
514       convolution_op->kernel_height,
515       convolution_op->dilation_height,
516       convolution_op->stride_height);
517   const size_t output_width = compute_output_dimension(
518       convolution_op->padding_left + input_width + convolution_op->padding_right,
519       convolution_op->kernel_width,
520       convolution_op->dilation_width,
521       convolution_op->stride_width);
522 
523   const size_t group_output_channels = convolution_op->group_output_channels;
524   const size_t output_neurons = groups * group_output_channels * output_height * output_width;
525   if (output_batch_stride < output_neurons) {
526     xnn_log_error(
527       "failed to setup Convolution operator with output batch stride of %zu: "
528       "stride must be at least as large as the number of output neurons (%" PRIu32 "x%zux%zux%zu)",
529       output_batch_stride, groups, group_output_channels, output_height, output_width);
530     return xnn_status_invalid_parameter;
531   }
532 
533   if (batch_size == 0) {
534     convolution_op->state = xnn_run_state_skip;
535     return xnn_status_success;
536   }
537 
538   convolution_op->batch_size = batch_size;
539   convolution_op->input_height = input_height;
540   convolution_op->input_width = input_width;
541   convolution_op->input = input;
542   convolution_op->output = output;
543 
544   switch (convolution_op->ukernel.type) {
545     case xnn_ukernel_type_spmm:
546     {
547       const size_t num_nonzero_values = convolution_op->num_nonzero_values;
548       const size_t num_nonzero_blocks = convolution_op->num_nonzero_blocks;
549       const size_t num_output_channel_blocks = convolution_op->num_output_channel_blocks;
550 
551       convolution_op->num_nonzero_values = num_nonzero_values;
552       convolution_op->num_nonzero_blocks = num_nonzero_blocks;
553       convolution_op->num_output_channel_blocks = num_output_channel_blocks;
554 
555       float* nonzero_values = convolution_op->packed_weights;
556       int32_t* input_increments = (int32_t*) (nonzero_values + num_nonzero_values + convolution_op->group_output_channels);
557       uint32_t* output_channel_nonzeros = (uint32_t*) (input_increments + num_nonzero_blocks);
558       int32_t* input_channel_diffs = (int32_t*) (output_channel_nonzeros + num_output_channel_blocks);
559 
560       // const uint32_t* output_channel_nonzeros = convolution_op->packed_weights;
561       // const int32_t* input_channel_diffs = (const int32_t*) (output_channel_nonzeros + num_output_channel_blocks);
562       // int32_t* input_increments = (int32_t*) (input_channel_diffs + num_nonzero_blocks);
563       // const void* packed_weights = (const void*) (input_increments + num_nonzero_blocks);
564 
565       const size_t input_size = input_height * input_width;
566       for (size_t i = 0; i < num_nonzero_blocks; i++) {
567         const int32_t diff = input_channel_diffs[i];
568         const int64_t increment = (int64_t) diff * input_size;
569         if ((int64_t) (int32_t) increment != increment) {
570           xnn_log_error("failed to setup Convolution operator with sparse kernel representation: "
571             "input increment exceeds int32_t range");
572           return xnn_status_unsupported_parameter;
573         }
574         input_increments[i] = (int32_t) increment;
575       }
576 
577       convolution_op->context.spmm = (struct spmm_context) {
578           .n = group_output_channels,
579           .a = input + (convolution_op->first_input_channel * input_size * sizeof(float)),
580           .packed_weights = nonzero_values,
581           .input_increments = input_increments,
582           .output_channel_nonzeros = output_channel_nonzeros,
583           .c = output,
584           .batched_a_stride = input_batch_stride << log2_input_element_size,
585           .batched_c_stride = output_batch_stride << log2_output_element_size,
586           .ukernel = convolution_op->ukernel.spmm.function,
587       };
588       memcpy(&convolution_op->context.spmm.params, params, sizeof(convolution_op->context.spmm.params));
589 
590       const size_t mr = convolution_op->ukernel.spmm.mr;
591       size_t mc = input_size;
592       if (num_threads > 1) {
593         const size_t target_tiles_per_thread = 5;
594         const size_t max_mc = divide_round_up(input_size, num_threads * target_tiles_per_thread);
595         if (max_mc < mc) {
596           mc = min(mc, divide_round_up(mc, max_mc * mr) * mr);
597         }
598       }
599       convolution_op->compute.type = xnn_parallelization_type_2d_tile_1d;
600       convolution_op->compute.task_2d_tile_1d = (pthreadpool_task_2d_tile_1d_t) xnn_compute_spmm;
601       convolution_op->compute.range[0] = batch_size;
602       convolution_op->compute.range[1] = input_size;
603       convolution_op->compute.tile[0] = mc;
604       convolution_op->state = xnn_run_state_ready;
605 
606       return xnn_status_success;
607     }
608     case xnn_ukernel_type_dconv2d_hwc2spchw:
609     {
610       const size_t zero_size = (input_width * convolution_op->group_input_channels << log2_input_element_size) + XNN_EXTRA_BYTES;
611       void* zero_buffer = xnn_reallocate_memory(convolution_op->zero_buffer, zero_size);
612       if (zero_buffer == NULL) {
613         xnn_log_error("failed to allocate %zu bytes for zero padding", sizeof(struct xnn_operator));
614         return xnn_status_out_of_memory;
615       }
616       memset(zero_buffer, 0, zero_size);
617       convolution_op->zero_buffer = zero_buffer;
618 
619       convolution_op->context.dconv2d = (struct dconv2d_context) {
620         .input_height = input_height,
621         .input_width = input_width,
622         .input = input,
623         .input_batch_stride = input_batch_stride << log2_input_element_size,
624         .zero = zero_buffer,
625         .packed_weights = convolution_op->packed_weights,
626         .output = output,
627         .output_batch_stride = output_batch_stride << log2_input_element_size,
628         .input_padding_top = convolution_op->padding_top,
629         .output_channels = convolution_op->group_output_channels,
630         .output_height_stride = output_width << log2_output_element_size,
631         .output_channel_stride = output_height * output_width << log2_output_element_size,
632         .hwc2spchw_ukernel = convolution_op->ukernel.dconv2d.hwc2spchw_function,
633       };
634       memcpy(&convolution_op->context.dconv2d.params, params, sizeof(convolution_op->context.dconv2d.params));
635 
636       size_t output_height_slice = output_height;
637       const size_t output_height_tile = convolution_op->ukernel.dconv2d.output_height_tile;
638       if (num_threads > 1) {
639         const size_t target_tiles_per_thread = 5;
640         const size_t max_output_height_slice = divide_round_up(output_height, num_threads * target_tiles_per_thread);
641         if (max_output_height_slice < output_height_slice) {
642           output_height_slice = min(output_height_slice,
643             divide_round_up(output_height_slice, max_output_height_slice * output_height_tile) * output_height_tile);
644         }
645       }
646       convolution_op->compute.type = xnn_parallelization_type_2d_tile_1d;
647       convolution_op->compute.task_2d_tile_1d = (pthreadpool_task_2d_tile_1d_t) xnn_compute_dconv2d_hwc2spchw;
648       convolution_op->compute.range[0] = batch_size;
649       convolution_op->compute.range[1] = output_height;
650       convolution_op->compute.tile[0] = output_height_slice;
651       convolution_op->state = xnn_run_state_ready;
652 
653       return xnn_status_success;
654     }
655     case xnn_ukernel_type_dwconv:
656     {
657       xnn_update_f32_spchw_params((union xnn_f32_spchw_params*) params, input_width);
658       convolution_op->context.dwconv2d = (struct dwconv2d_context) {
659         .output_height = output_height,
660         .input_width = input_width,
661         .input = input,
662         .input_channel_stride = input_height * input_width << log2_input_element_size,
663         .input_batch_stride = input_batch_stride << log2_input_element_size,
664         .packed_weights = convolution_op->packed_weights,
665         .weights_channel_stride = bias_element_size +
666           (convolution_op->kernel_height * convolution_op->kernel_width << log2_filter_element_size),
667         .output = output,
668         .output_channel_stride = output_height * output_width << log2_output_element_size,
669         .output_batch_stride = output_batch_stride << log2_output_element_size,
670         .input_tuple_stride = convolution_op->ukernel.dwconv2d.input_width_tile << log2_input_element_size,
671         .output_tuple_stride = convolution_op->ukernel.dwconv2d.output_width_tile << log2_output_element_size,
672         .input_pixel_stride = input_width << log2_input_element_size,
673         .output_pixel_stride = output_width << log2_output_element_size,
674         .spchw_ukernel = convolution_op->ukernel.dwconv2d.spchw_function,
675       };
676       memcpy(&convolution_op->context.dwconv2d.params, params, sizeof(convolution_op->context.dwconv2d.params));
677 
678       convolution_op->compute.type = xnn_parallelization_type_2d;
679       convolution_op->compute.task_2d = (pthreadpool_task_2d_t) xnn_compute_dwconv2d_spchw;
680       convolution_op->compute.range[0] = batch_size;
681       convolution_op->compute.range[1] = groups;
682       convolution_op->state = xnn_run_state_ready;
683 
684       return xnn_status_success;
685     }
686     default:
687       XNN_UNREACHABLE;
688   }
689 }
690 
xnn_setup_convolution2d_nchw_f32(xnn_operator_t convolution_op,size_t batch_size,size_t input_batch_stride,size_t output_batch_stride,size_t input_height,size_t input_width,const float * input,float * output,pthreadpool_t threadpool)691 enum xnn_status xnn_setup_convolution2d_nchw_f32(
692     xnn_operator_t convolution_op,
693     size_t batch_size,
694     size_t input_batch_stride,
695     size_t output_batch_stride,
696     size_t input_height,
697     size_t input_width,
698     const float* input,
699     float* output,
700     pthreadpool_t threadpool)
701 {
702   if (convolution_op->type != xnn_operator_type_convolution_nchw_f32) {
703     xnn_log_error("failed to setup Convolution (NCHW, F32) operator: operator type mismatch");
704     return xnn_status_invalid_parameter;
705   }
706 
707   return setup_convolution2d_nchw(
708     convolution_op,
709     batch_size, input_batch_stride, output_batch_stride,
710     input_height, input_width,
711     input, output,
712     2 /* log2(sizeof(input element)) = log2(sizeof(float)) */,
713     2 /* log2(sizeof(filter element)) = log2(sizeof(float)) */,
714     sizeof(float) /* sizeof(bias element) */,
715     2 /* log2(sizeof(output element)) = log2(sizeof(float)) */,
716     &convolution_op->f32_output_params,
717     pthreadpool_get_threads_count(threadpool));
718 }
719