• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "nnacl/kernel/group_convolution.h"
18 #include "nnacl/kernel/convolution_delegate.h"
19 #include "nnacl/base/conv_common_base.h"
20 #include "nnacl/tensor_c_utils.h"
21 
GroupConvBasePrepare(GroupConvolutionStruct * group_conv)22 int GroupConvBasePrepare(GroupConvolutionStruct *group_conv) {
23   for (int i = 0; i < group_conv->group_; ++i) {
24     KernelBase *sub_conv = group_conv->group_convs_[i];
25     NNACL_CHECK_NULL_RETURN_ERR(sub_conv);
26     int ret = sub_conv->Prepare(sub_conv);
27     if (ret != NNACL_OK) {
28       return ret;
29     }
30   }
31   return NNACL_OK;
32 }
33 
GroupConvCreatorNewInputTensor(GroupConvolutionStruct * group_conv,KernelBase * new_conv)34 int GroupConvCreatorNewInputTensor(GroupConvolutionStruct *group_conv, KernelBase *new_conv) {
35   TensorC *in_tensor = (TensorC *)malloc(sizeof(TensorC));
36   NNACL_MALLOC_CHECK_NULL_RETURN_ERR(in_tensor);
37   in_tensor->format_ = Format_NHWC;
38   in_tensor->category_ = VarTensor;
39   in_tensor->data_type_ = group_conv->data_type_;
40   in_tensor->shape_size_ = DIMENSION_4D;
41   in_tensor->shape_[Index0] = INVALID_SHAPE;
42   new_conv->in_[FIRST_INPUT] = in_tensor;
43   return NNACL_OK;
44 }
45 
GroupConvCreatorNewOutputTensor(GroupConvolutionStruct * group_conv,KernelBase * new_conv)46 int GroupConvCreatorNewOutputTensor(GroupConvolutionStruct *group_conv, KernelBase *new_conv) {
47   TensorC *out_tensor = (TensorC *)malloc(sizeof(TensorC));
48   NNACL_MALLOC_CHECK_NULL_RETURN_ERR(out_tensor);
49   out_tensor->format_ = Format_NHWC;
50   out_tensor->category_ = VarTensor;
51   out_tensor->data_type_ = group_conv->data_type_;
52   out_tensor->shape_size_ = DIMENSION_4D;
53   out_tensor->shape_[Index0] = INVALID_SHAPE;
54   new_conv->out_[OUTPUT_INDEX] = out_tensor;
55   return NNACL_OK;
56 }
57 
CreateConstTensor(const TensorC * tensor,const int * shape,const int shape_size,const int index)58 TensorC *CreateConstTensor(const TensorC *tensor, const int *shape, const int shape_size, const int index) {
59   NNACL_CHECK_NULL_RETURN_NULL(tensor->data_);
60 
61   TensorC *new_tensor = (TensorC *)malloc(sizeof(TensorC));
62   NNACL_MALLOC_CHECK_NULL_RETURN_NULL(new_tensor);
63   new_tensor->data_type_ = tensor->data_type_;
64   new_tensor->format_ = Format_NHWC;
65   new_tensor->category_ = ConstTensor;
66   new_tensor->shape_size_ = shape_size;
67   memcpy(new_tensor->shape_, shape, shape_size * sizeof(int));
68 
69   int size = GetSize(new_tensor);
70   if (size <= 0) {
71     free(new_tensor);
72     return NULL;
73   }
74 
75   void *data = malloc(size);
76   if (data == NULL) {
77     free(new_tensor);
78     return NULL;
79   }
80   new_tensor->data_ = data;
81 
82   uint8_t *new_tensor_data = (uint8_t *)tensor->data_ + index * size;
83   memcpy(new_tensor->data_, new_tensor_data, size);
84   return new_tensor;
85 }
86 
GroupConvCreatorNewConstTensor(GroupConvolutionStruct * group_conv,KernelBase * new_conv,int group_id)87 int GroupConvCreatorNewConstTensor(GroupConvolutionStruct *group_conv, KernelBase *new_conv, int group_id) {
88   TensorC *origin_weight = group_conv->conv_base_.base_.in_[SECOND_INPUT];
89   int shape[] = {group_conv->sub_out_c_, GetHeight(origin_weight), GetWidth(origin_weight), group_conv->sub_in_c_};
90   TensorC *weight_tensor = CreateConstTensor(origin_weight, shape, DIMENSION_4D, group_id);
91   NNACL_MALLOC_CHECK_NULL_RETURN_ERR(weight_tensor);
92   new_conv->in_[SECOND_INPUT] = weight_tensor;
93 
94   if (group_conv->conv_base_.base_.in_size_ == THREE_TENSOR) {
95     TensorC *bias_weight = group_conv->conv_base_.base_.in_[THIRD_INPUT];
96     TensorC *bias_tensor = CreateConstTensor(bias_weight, &group_conv->sub_out_c_, DIMENSION_1D, group_id);
97     NNACL_MALLOC_CHECK_NULL_RETURN_ERR(bias_tensor);
98     new_conv->in_[THIRD_INPUT] = bias_tensor;
99   }
100   return NNACL_OK;
101 }
102 
GroupConvCreatorSetShapeOfTensors(GroupConvolutionStruct * group_conv)103 int GroupConvCreatorSetShapeOfTensors(GroupConvolutionStruct *group_conv) {
104   ConvParameter *origin_conv_param = (ConvParameter *)group_conv->conv_base_.base_.param_;
105   NNACL_CHECK_NULL_RETURN_ERR(origin_conv_param);
106   ConvParameter *new_conv_param = &group_conv->new_conv_param_;
107   NNACL_CHECK_NULL_RETURN_ERR(new_conv_param);
108   memcpy(new_conv_param, origin_conv_param, sizeof(ConvParameter));
109 
110   TensorC *weight_tensor = group_conv->conv_base_.base_.in_[SECOND_INPUT];
111   NNACL_CHECK_NULL_RETURN_ERR(weight_tensor);
112   NNACL_CHECK_FALSE(origin_conv_param->group_ == 0, NNACL_GROUP_CONVOLUTION_GROUP_INVALID);
113   NNACL_CHECK_FALSE(weight_tensor->shape_size_ != DIMENSION_4D, NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID);
114   NNACL_CHECK_FALSE(origin_conv_param->kernel_h_ != GetHeight(weight_tensor), NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID);
115   NNACL_CHECK_FALSE(origin_conv_param->kernel_w_ != GetWidth(weight_tensor), NNACL_CONVOLUTION_WEIGHT_SHAPE_INVALID);
116 
117   ConvComputeParam *compute = &group_conv->conv_base_.compute_;
118   group_conv->ori_in_c_ = compute->in_c_;
119   group_conv->ori_out_c_ = compute->out_c_;
120   group_conv->sub_in_c_ = compute->in_c_ / group_conv->group_;
121   group_conv->sub_out_c_ = compute->out_c_ / group_conv->group_;
122 
123   new_conv_param->input_channel_ = group_conv->sub_in_c_;
124   new_conv_param->output_channel_ = group_conv->sub_out_c_;
125   new_conv_param->group_ = origin_conv_param->group_;
126 
127   return NNACL_OK;
128 }
129 
GroupConvSetSubConvInfo(GroupConvolutionStruct * group_conv,KernelBase * new_conv,int group_id)130 int GroupConvSetSubConvInfo(GroupConvolutionStruct *group_conv, KernelBase *new_conv, int group_id) {
131   NNACL_CHECK_NULL_RETURN_ERR(group_conv);
132   NNACL_CHECK_NULL_RETURN_ERR(new_conv);
133 
134   ConvolutionBaseStruct *sub_conv = (ConvolutionBaseStruct *)new_conv;
135   (void)ConvBaseUpdateParamInfo(&sub_conv->compute_, &group_conv->new_conv_param_);
136 
137   sub_conv->infershape_done_ = group_conv->conv_base_.infershape_done_;
138   sub_conv->shaing_manager_ = group_conv->conv_base_.shaing_manager_;
139   sub_conv->get_sharing_weight_ = group_conv->conv_base_.get_sharing_weight_;
140   sub_conv->free_sharing_weight_ = group_conv->conv_base_.free_sharing_weight_;
141   sub_conv->is_sharing_pack_ = group_conv->conv_base_.is_sharing_pack_;
142 
143   new_conv->env_ = group_conv->conv_base_.base_.env_;
144   new_conv->param_ = &group_conv->new_conv_param_.op_parameter_;
145   new_conv->thread_nr_ = group_conv->conv_base_.base_.thread_nr_;
146   new_conv->train_session_ = group_conv->conv_base_.base_.train_session_;
147   new_conv->UpdateThread = group_conv->conv_base_.base_.UpdateThread;
148   new_conv->in_size_ = group_conv->conv_base_.base_.in_size_;
149   new_conv->out_size_ = group_conv->conv_base_.base_.out_size_;
150 
151   new_conv->in_ = (TensorC **)malloc(new_conv->in_size_ * sizeof(TensorC *));
152   NNACL_MALLOC_CHECK_NULL_RETURN_ERR(new_conv->in_);
153   memset(new_conv->in_, 0, new_conv->in_size_ * sizeof(TensorC *));
154   new_conv->out_ = (TensorC **)malloc(new_conv->out_size_ * sizeof(TensorC *));
155   NNACL_MALLOC_CHECK_NULL_RETURN_ERR(new_conv->out_);
156   memset(new_conv->out_, 0, new_conv->out_size_ * sizeof(TensorC *));
157 
158   // create new input for each group
159   int ret = GroupConvCreatorNewInputTensor(group_conv, new_conv);
160   if (ret != NNACL_OK) {
161     group_conv->conv_base_.base_.Release((KernelBase *)group_conv);
162     return ret;
163   }
164 
165   // const tensor
166   ret = GroupConvCreatorNewConstTensor(group_conv, new_conv, group_id);
167   if (ret != NNACL_OK) {
168     group_conv->conv_base_.base_.Release((KernelBase *)group_conv);
169     return ret;
170   }
171 
172   // create new output tensor
173   ret = GroupConvCreatorNewOutputTensor(group_conv, new_conv);
174   if (ret != NNACL_OK) {
175     group_conv->conv_base_.base_.Release((KernelBase *)group_conv);
176     return ret;
177   }
178   return NNACL_OK;
179 }
180 
GroupConvConcatOutputRun(void * cdata,int task_id,float l,float r)181 int GroupConvConcatOutputRun(void *cdata, int task_id, float l, float r) {
182   NNACL_CHECK_NULL_RETURN_ERR(cdata);
183   GroupConvolutionStruct *group_conv = (GroupConvolutionStruct *)cdata;
184 
185   int plane_step = UP_DIV(group_conv->conv_base_.compute_.out_hw_, group_conv->conv_base_.base_.thread_nr_);
186   NNACL_CHECK_INT_MUL_NOT_OVERFLOW(plane_step, task_id, NNACL_ERR);
187   int begin_plane = plane_step * task_id;
188   int end_plane = NNACL_MIN(group_conv->conv_base_.compute_.out_hw_, plane_step * (task_id + 1));
189   NNACL_CHECK_INT_MUL_NOT_OVERFLOW(begin_plane, group_conv->sub_out_c_, NNACL_ERR);
190   float *src_ptr = group_conv->sub_out_src_ + begin_plane * group_conv->sub_out_c_;
191   float *dst_ptr = group_conv->sub_out_dst_ + begin_plane * group_conv->ori_out_c_;
192   for (int i = begin_plane; i < end_plane; ++i) {
193     (void)memcpy(dst_ptr, src_ptr, group_conv->sub_out_c_ * sizeof(float));
194     src_ptr += group_conv->sub_out_c_;
195     dst_ptr += group_conv->ori_out_c_;
196   }
197   return NNACL_OK;
198 }
199 
GroupConvPostConcat(GroupConvolutionStruct * group_conv,int group_id)200 int GroupConvPostConcat(GroupConvolutionStruct *group_conv, int group_id) {
201   group_conv->sub_out_src_ = (float *)group_conv->group_convs_[group_id]->out_[OUTPUT_INDEX]->data_;
202   NNACL_CHECK_NULL_RETURN_ERR(group_conv->sub_out_src_);
203 
204   NNACL_CHECK_INT_MUL_NOT_OVERFLOW(group_id, group_conv->sub_out_c_, NNACL_ERR);
205   group_conv->sub_out_dst_ = (float *)(group_conv->origin_output_data_) + group_id * group_conv->sub_out_c_;
206   NNACL_CHECK_NULL_RETURN_ERR(group_conv->sub_out_dst_);
207 
208   return group_conv->conv_base_.base_.env_->ParallelLaunch(group_conv->conv_base_.base_.env_->thread_pool_,
209                                                            GroupConvConcatOutputRun, group_conv,
210                                                            group_conv->conv_base_.base_.thread_nr_);
211 }
212 
GroupConvSeparateInputRun(void * cdata,int task_id,float l,float r)213 int GroupConvSeparateInputRun(void *cdata, int task_id, float l, float r) {
214   NNACL_CHECK_NULL_RETURN_ERR(cdata);
215   GroupConvolutionStruct *group_conv = (GroupConvolutionStruct *)cdata;
216 
217   int plane_step = UP_DIV(group_conv->conv_base_.compute_.in_hw_, group_conv->conv_base_.base_.thread_nr_);
218   NNACL_CHECK_INT_MUL_NOT_OVERFLOW(plane_step, task_id, NNACL_ERR);
219   int begin_plane = plane_step * task_id;
220   int end_plane = NNACL_MIN(group_conv->conv_base_.compute_.in_hw_, plane_step * (task_id + 1));
221   NNACL_CHECK_INT_MUL_NOT_OVERFLOW(begin_plane, group_conv->ori_in_c_, NNACL_ERR);
222   NNACL_CHECK_INT_MUL_NOT_OVERFLOW(begin_plane, group_conv->sub_in_c_, NNACL_ERR);
223   float *src_ptr = group_conv->sub_in_src_ + begin_plane * group_conv->ori_in_c_;
224   float *dst_ptr = group_conv->sub_in_dst_ + begin_plane * group_conv->sub_in_c_;
225   for (int i = begin_plane; i < end_plane; ++i) {
226     (void)memcpy(dst_ptr, src_ptr, group_conv->sub_in_c_ * sizeof(float));
227     src_ptr += group_conv->ori_in_c_;
228     dst_ptr += group_conv->sub_in_c_;
229   }
230 
231   return NNACL_OK;
232 }
233 
GroupConvSeparateInput(GroupConvolutionStruct * group_conv,int group_id)234 int GroupConvSeparateInput(GroupConvolutionStruct *group_conv, int group_id) {
235   NNACL_CHECK_INT_MUL_NOT_OVERFLOW(group_id, group_conv->sub_in_c_, NNACL_ERR);
236 
237   group_conv->sub_in_src_ = (float *)(group_conv->origin_input_data_) + group_id * group_conv->sub_in_c_;
238   NNACL_CHECK_NULL_RETURN_ERR(group_conv->sub_in_src_);
239   group_conv->sub_in_dst_ = (float *)(group_conv->group_convs_[group_id]->in_[FIRST_INPUT]->data_);
240   NNACL_CHECK_NULL_RETURN_ERR(group_conv->sub_in_dst_);
241 
242   return group_conv->conv_base_.base_.env_->ParallelLaunch(group_conv->conv_base_.base_.env_->thread_pool_,
243                                                            GroupConvSeparateInputRun, group_conv,
244                                                            group_conv->conv_base_.base_.thread_nr_);
245 }
246 
GroupConvUpdateShape(GroupConvolutionStruct * group_conv)247 void GroupConvUpdateShape(GroupConvolutionStruct *group_conv) {
248   for (int i = 0; i < group_conv->group_; i++) {
249     TensorC *in_tensor = group_conv->conv_base_.base_.in_[FIRST_INPUT];
250     int in_shape[] = {GetBatch(in_tensor), GetHeight(in_tensor), GetWidth(in_tensor), group_conv->sub_in_c_};
251     memcpy(group_conv->group_convs_[i]->in_[FIRST_INPUT]->shape_, in_shape, DIMENSION_4D * sizeof(float));
252 
253     TensorC *out_tensor = group_conv->conv_base_.base_.out_[OUTPUT_INDEX];
254     int out_shape[] = {GetBatch(out_tensor), GetHeight(out_tensor), GetWidth(out_tensor), group_conv->sub_out_c_};
255     memcpy(group_conv->group_convs_[i]->out_[OUTPUT_INDEX]->shape_, out_shape, DIMENSION_4D * sizeof(float));
256   }
257   return;
258 }
259 
GroupConvolutionResize(KernelBase * self)260 int GroupConvolutionResize(KernelBase *self) {
261   GroupConvolutionStruct *group_conv = (GroupConvolutionStruct *)self;
262   NNACL_CHECK_NULL_RETURN_ERR(group_conv);
263 
264   (void)ConvBaseUpdateComputeInfo(&group_conv->conv_base_);
265   self->thread_nr_ = NNACL_MIN(NNACL_MAX(1, self->thread_nr_), group_conv->conv_base_.compute_.in_hw_);
266   self->thread_nr_ = NNACL_MIN(NNACL_MAX(1, self->thread_nr_), group_conv->conv_base_.compute_.in_hw_);
267 
268   GroupConvUpdateShape(group_conv);
269 
270   for (int i = 0; i < group_conv->group_; ++i) {
271     group_conv->group_convs_[i]->thread_nr_ = self->thread_nr_;
272     int ret = group_conv->group_convs_[i]->Resize(group_conv->group_convs_[i]);
273     if (ret != NNACL_OK) {
274       return ret;
275     }
276   }
277   return NNACL_OK;
278 }
279 
GroupConvolutionCompute(KernelBase * self)280 int GroupConvolutionCompute(KernelBase *self) {
281   GroupConvolutionStruct *group_conv = (GroupConvolutionStruct *)self;
282   NNACL_CHECK_NULL_RETURN_ERR(group_conv);
283 
284   group_conv->origin_input_data_ = self->in_[FIRST_INPUT]->data_;
285   NNACL_CHECK_NULL_RETURN_ERR(group_conv->origin_input_data_);
286   group_conv->origin_output_data_ = self->out_[OUTPUT_INDEX]->data_;
287   NNACL_CHECK_NULL_RETURN_ERR(group_conv->origin_output_data_);
288 
289   for (int i = 0; i < group_conv->group_; ++i) {
290     // first, malloc data for sub_kernel's tensors.
291     TensorC *sub_kernel_in_tensor = group_conv->group_convs_[i]->in_[FIRST_INPUT];
292     sub_kernel_in_tensor->data_ = self->env_->Alloc(self->env_->allocator_, GetSize(sub_kernel_in_tensor));
293     NNACL_MALLOC_CHECK_NULL_RETURN_ERR(sub_kernel_in_tensor->data_);
294 
295     TensorC *sub_kernel_out_tensor = group_conv->group_convs_[i]->out_[OUTPUT_INDEX];
296     sub_kernel_out_tensor->data_ = self->env_->Alloc(self->env_->allocator_, GetSize(sub_kernel_out_tensor));
297     NNACL_MALLOC_CHECK_NULL_RETURN_ERR(sub_kernel_out_tensor->data_);
298 
299     // second, separate group conv input into several parts. This step must be in runtime stage.
300     int ret = GroupConvSeparateInput(group_conv, i);
301     if (ret != NNACL_OK) {
302       return ret;
303     }
304 
305     // sun kernels run
306     ret = group_conv->group_convs_[i]->Compute(group_conv->group_convs_[i]);
307     if (ret != NNACL_OK) {
308       return ret;
309     }
310 
311     // post process, concat all outputs of sub-kernels into one output
312     ret = GroupConvPostConcat(group_conv, i);
313     if (ret != NNACL_OK) {
314       return ret;
315     }
316 
317     // Free data
318     self->env_->Free(self->env_->allocator_, sub_kernel_in_tensor->data_);
319     sub_kernel_in_tensor->data_ = NULL;
320     self->env_->Free(self->env_->allocator_, sub_kernel_out_tensor->data_);
321     sub_kernel_out_tensor->data_ = NULL;
322   }
323   return NNACL_OK;
324 }
325 
GroupConvolutionPrepare(KernelBase * self)326 int GroupConvolutionPrepare(KernelBase *self) {
327   NNACL_CHECK_FALSE(self->in_size_ < TWO_TENSOR, NNACL_INPUT_TENSOR_ERROR);
328   NNACL_CHECK_FALSE(self->out_size_ != ONE_TENSOR, NNACL_OUTPUT_TENSOR_ERROR);
329   GroupConvolutionStruct *group_conv = (GroupConvolutionStruct *)self;
330   NNACL_CHECK_NULL_RETURN_ERR(group_conv);
331   NNACL_CHECK_FALSE(group_conv->group_ == 0, NNACL_GROUP_CONVOLUTION_GROUP_INVALID);
332 
333   GroupConvCreatorSetShapeOfTensors(group_conv);
334 
335   group_conv->group_convs_ = (KernelBase **)malloc(group_conv->group_ * sizeof(KernelBase *));
336   NNACL_MALLOC_CHECK_NULL_RETURN_ERR(group_conv->group_convs_);
337   memset(group_conv->group_convs_, 0, group_conv->group_ * sizeof(KernelBase *));
338 
339   for (int i = 0; i < group_conv->group_; ++i) {
340     KernelBase *new_conv = CreateConvlutionDelegate(&group_conv->new_conv_param_);
341     NNACL_MALLOC_CHECK_NULL_RETURN_ERR(new_conv);
342     group_conv->group_convs_[i] = new_conv;
343 
344     int ret = GroupConvSetSubConvInfo(group_conv, new_conv, i);
345     if (ret != NNACL_OK) {
346       return ret;
347     }
348   }
349   return GroupConvBasePrepare(group_conv);
350 }
351 
GroupConvReleaseSubConv(KernelBase * current_conv)352 void GroupConvReleaseSubConv(KernelBase *current_conv) {
353   (void)current_conv->Release(current_conv);
354 
355   if (current_conv->in_ != NULL) {
356     for (int j = 0; j < current_conv->in_size_; j++) {
357       if (IsConst(current_conv->in_[j])) {
358         free(current_conv->in_[j]->data_);
359         current_conv->in_[j]->data_ = NULL;
360       }
361       if (current_conv->in_[j] != NULL) {
362         free(current_conv->in_[j]);
363         current_conv->in_[j] = NULL;
364       }
365     }
366     free(current_conv->in_);
367     current_conv->in_ = NULL;
368   }
369 
370   if (current_conv->out_ != NULL) {
371     for (int j = 0; j < current_conv->out_size_; j++) {
372       if (current_conv->out_[j] != NULL) {
373         free(current_conv->out_[j]);
374         current_conv->out_[j] = NULL;
375       }
376     }
377     free(current_conv->out_);
378     current_conv->out_ = NULL;
379   }
380 }
381 
GroupConvolutionRelease(KernelBase * self)382 int GroupConvolutionRelease(KernelBase *self) {
383   GroupConvolutionStruct *group_conv = (GroupConvolutionStruct *)self;
384   NNACL_CHECK_NULL_RETURN_ERR(group_conv);
385   ConvParameter *conv_param = (ConvParameter *)self->param_;
386   NNACL_CHECK_NULL_RETURN_ERR(conv_param);
387 
388   if (group_conv->group_convs_ != NULL) {
389     for (int i = 0; i < conv_param->group_; i++) {
390       if (group_conv->group_convs_[i] != NULL) {
391         GroupConvReleaseSubConv(group_conv->group_convs_[i]);
392         free(group_conv->group_convs_[i]);
393         group_conv->group_convs_[i] = NULL;
394       }
395     }
396     free(group_conv->group_convs_);
397     group_conv->group_convs_ = NULL;
398   }
399   return NNACL_OK;
400 }
401 
CreateGroupConvolution(ConvParameter * conv_param,TypeIdC data_type)402 KernelBase *CreateGroupConvolution(ConvParameter *conv_param, TypeIdC data_type) {
403   GroupConvolutionStruct *group_conv = (GroupConvolutionStruct *)malloc(sizeof(GroupConvolutionStruct));
404   NNACL_MALLOC_CHECK_NULL_RETURN_NULL(group_conv);
405   memset(group_conv, 0, sizeof(GroupConvolutionStruct));
406 
407   group_conv->data_type_ = data_type;
408   group_conv->group_ = conv_param->group_;
409   group_conv->conv_base_.base_.Compute = GroupConvolutionCompute;
410   group_conv->conv_base_.base_.Resize = GroupConvolutionResize;
411   group_conv->conv_base_.base_.Prepare = GroupConvolutionPrepare;
412   group_conv->conv_base_.base_.Release = GroupConvolutionRelease;
413   return (KernelBase *)group_conv;
414 }
415