1 /**
2 * Copyright 2022 Huawei Technologies Co., Ltd
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "src/litert/delegate/nnapi/op/pooling_nnapi.h"
18 #include "src/litert/delegate/nnapi/nnapi_utils.h"
19 #include "nnacl/op_base.h"
20
21 namespace mindspore {
22 namespace lite {
SetPoolingParams(const flatbuffers::Vector<int64_t> * pads,const flatbuffers::Vector<int64_t> * strides,const flatbuffers::Vector<int64_t> * kernel_size,bool is_global)23 int NNAPIPooling::SetPoolingParams(const flatbuffers::Vector<int64_t> *pads,
24 const flatbuffers::Vector<int64_t> *strides,
25 const flatbuffers::Vector<int64_t> *kernel_size, bool is_global) {
26 MS_ASSERT(pads != nullptr && strides != nullptr && kernel_size != nullptr);
27 MS_CHECK_TRUE_RET(pads->size() == DIMENSION_4D, RET_ERROR);
28 pad_list_.push_back(static_cast<int>(*(pads->begin() + PAD_LEFT)));
29 pad_list_.push_back(static_cast<int>(*(pads->begin() + PAD_RIGHT)));
30 pad_list_.push_back(static_cast<int>(*(pads->begin() + PAD_UP)));
31 pad_list_.push_back(static_cast<int>(*(pads->begin() + PAD_DOWN)));
32
33 MS_CHECK_TRUE_RET(strides->size() == DIMENSION_2D, RET_ERROR);
34 strides_.push_back(static_cast<int>(*(strides->begin() + 1)));
35 strides_.push_back(static_cast<int>(*(strides->begin())));
36 if (is_global) {
37 MS_CHECK_TRUE_RET(in_tensors_.at(0).Shape().size() == DIMENSION_4D, RET_ERROR);
38 kernel_size_.at(0) = in_tensors_.at(0).Shape().at(2);
39 kernel_size_.at(1) = in_tensors_.at(0).Shape().at(1);
40 } else if (kernel_size != nullptr && kernel_size->size() == DIMENSION_2D) {
41 kernel_size_.at(0) = static_cast<int>(*(kernel_size->begin()));
42 kernel_size_.at(1) = static_cast<int>(*(kernel_size->begin() + 1));
43 }
44 return RET_OK;
45 }
46
InitParams()47 int NNAPIPooling::InitParams() {
48 bool is_global = false;
49 const flatbuffers::Vector<int64_t> *pads = nullptr;
50 const flatbuffers::Vector<int64_t> *strides = nullptr;
51 const flatbuffers::Vector<int64_t> *kernel_size = nullptr;
52 if (type_ == schema::PrimitiveType_AvgPoolFusion) {
53 auto pool = op_primitive_->value_as_AvgPoolFusion();
54 MS_ASSERT(pool != nullptr);
55 act_type_ = pool->activation_type();
56 pads = pool->pad();
57 strides = pool->strides();
58 kernel_size = pool->kernel_size();
59 is_global = pool->global();
60 } else {
61 auto pool = op_primitive_->value_as_MaxPoolFusion();
62 MS_ASSERT(pool != nullptr);
63 act_type_ = pool->activation_type();
64 pads = pool->pad();
65 strides = pool->strides();
66 kernel_size = pool->kernel_size();
67 is_global = pool->global();
68 }
69
70 return SetPoolingParams(pads, strides, kernel_size, is_global);
71 }
72
AddOpToNNAPIModel(ANeuralNetworksModel * nnapi_model,std::vector<mindspore::MSTensor> * all_tensors)73 int NNAPIPooling::AddOpToNNAPIModel(ANeuralNetworksModel *nnapi_model, std::vector<mindspore::MSTensor> *all_tensors) {
74 MS_ASSERT(nnapi_model != nullptr && all_tensors != nullptr);
75 OperationCode node_type =
76 type_ == schema::PrimitiveType_AvgPoolFusion ? ANEURALNETWORKS_AVERAGE_POOL_2D : ANEURALNETWORKS_MAX_POOL_2D;
77
78 if (InitNNAPIOpInOut(*all_tensors) != RET_OK) {
79 MS_LOG(ERROR) << "InitNNAPINodeInfo failed.";
80 return RET_ERROR;
81 }
82 for (auto pad : pad_list_) {
83 if (AddScalarToNNAPIModel<int>(nnapi_model, all_tensors, "pad", DataType::kNumberTypeInt32, pad) != RET_OK) {
84 MS_LOG(ERROR) << "Add paddings for conv to NNAPI model failed.";
85 return RET_ERROR;
86 }
87 }
88 for (auto stride : strides_) {
89 if (AddScalarToNNAPIModel<int>(nnapi_model, all_tensors, "stride", DataType::kNumberTypeInt32, stride) != RET_OK) {
90 MS_LOG(ERROR) << "Add pad mode for conv to NNAPI model failed.";
91 return RET_ERROR;
92 }
93 }
94 for (auto kernel_size : kernel_size_) {
95 if (AddScalarToNNAPIModel<int>(nnapi_model, all_tensors, "kernel_size", DataType::kNumberTypeInt32, kernel_size) !=
96 RET_OK) {
97 MS_LOG(ERROR) << "Add pad mode for conv to NNAPI model failed.";
98 return RET_ERROR;
99 }
100 }
101 // convert act_type to an input of nnapi node.
102 if (AddScalarToNNAPIModel<int>(nnapi_model, all_tensors, "act_type", DataType::kNumberTypeInt32, act_type_) !=
103 RET_OK) {
104 MS_LOG(ERROR) << "Add activation type for add to NNAPI model failed.";
105 return RET_ERROR;
106 }
107 // set nchw to an input of nnapi node.
108 if (AddScalarToNNAPIModel<bool>(nnapi_model, all_tensors, "nchw", DataType::kNumberTypeBool, false) != RET_OK) {
109 MS_LOG(ERROR) << "set nchw format for add to NNAPI model failed.";
110 return RET_ERROR;
111 }
112 if (nnapi_->ANeuralNetworksModel_addOperation(nnapi_model, node_type, input_indices_.size(), input_indices_.data(),
113 output_indices_.size(),
114 output_indices_.data()) != ANEURALNETWORKS_NO_ERROR) {
115 MS_LOG(ERROR) << "Add operation to NNAPI model failed: " << op_name_;
116 return RET_ERROR;
117 }
118 return RET_OK;
119 }
120 } // namespace lite
121 } // namespace mindspore
122