1 /**
2 * Copyright 2023 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either convolutionress or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #ifdef ENABLE_AVX
18 #include "nnacl/kernel/convolution_sw_avx.h"
19 #include "nnacl/kernel/convolution_slidewindow.h"
20 #include "nnacl/fp32/conv_1x1_avx_fp32.h"
21 #include "nnacl/fp32/conv_sw_avx_fp32.h"
22
ConvSWAVXInitGlobalVariable(ConvolutionBaseStruct * conv)23 void ConvSWAVXInitGlobalVariable(ConvolutionBaseStruct *conv) {
24 ConvolutionSWStruct *conv_sw = (ConvolutionSWStruct *)conv;
25 NNACL_CHECK_NULL_RETURN_VOID(conv_sw);
26 ConvParameter *conv_param = (ConvParameter *)conv->base_.param_;
27 NNACL_CHECK_NULL_RETURN_VOID(conv_param);
28
29 conv_sw->oc_tile_ = C8NUM;
30 conv_sw->oc_res_ = conv_param->output_channel_ % conv_sw->oc_tile_;
31 if (conv_param->kernel_h_ == 1 && conv_param->kernel_w_ == 1) {
32 // 1x1 conv is aligned to C8NUM
33 conv_sw->in_tile_ = C8NUM;
34 conv_sw->ic_res_ = conv_param->input_channel_ % conv_sw->in_tile_;
35 }
36 }
37
ConvSWAVXRunImpl(ConvolutionBaseStruct * conv,int task_id)38 int ConvSWAVXRunImpl(ConvolutionBaseStruct *conv, int task_id) {
39 ConvolutionSWStruct *conv_sw = (ConvolutionSWStruct *)conv;
40 NNACL_CHECK_NULL_RETURN_ERR(conv_sw);
41 ConvParameter *conv_param = (ConvParameter *)conv->base_.param_;
42 NNACL_CHECK_NULL_RETURN_ERR(conv_param);
43
44 if (conv_param->kernel_w_ == 1 && conv_param->kernel_h_ == 1) {
45 Conv1x1SWAVXFp32(conv_sw->input_data_, (float *)conv->packed_weight_, (float *)conv->bias_data_,
46 conv_sw->output_data_, task_id, conv_param, &conv_sw->sw_param_);
47 } else {
48 ConvSWAVXFp32(conv_sw->input_data_, (float *)conv->packed_weight_, (float *)conv->bias_data_, conv_sw->output_data_,
49 task_id, conv_param, &conv_sw->sw_param_);
50 }
51 return NNACL_OK;
52 }
53
CreateConvolutionSWAVX(ConvParameter * conv_param)54 ConvolutionBaseStruct *CreateConvolutionSWAVX(ConvParameter *conv_param) {
55 ConvolutionSWStruct *sw = (ConvolutionSWStruct *)malloc(sizeof(ConvolutionSWStruct));
56 NNACL_MALLOC_CHECK_NULL_RETURN_NULL(sw);
57 memset(sw, 0, sizeof(ConvolutionSWStruct));
58
59 sw->conv_.run_impl_ = ConvSWAVXRunImpl;
60 sw->conv_.init_global_variable_ = ConvSWAVXInitGlobalVariable;
61 sw->conv_.pack_weight_ = ConvSWPackWeight;
62 sw->conv_.malloc_weight_bias_ = ConvSWMallocWeightBiasData;
63
64 sw->conv_.base_.Compute = ConvolutionSWCompute;
65 sw->conv_.base_.Prepare = ConvolutionSWPrepare;
66 sw->conv_.base_.Release = ConvolutionSWRelease;
67 sw->conv_.base_.Resize = ConvolutionSWResize;
68
69 return (ConvolutionBaseStruct *)sw;
70 }
71 #endif
72