• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_LITE_SRC_LITERT_DELEGATE_NNAPI_OP_NNAPI_OP_H_
17 #define MINDSPORE_LITE_SRC_LITERT_DELEGATE_NNAPI_OP_NNAPI_OP_H_
18 
19 #include <string>
20 #include <vector>
21 #include <utility>
22 #include "include/api/kernel.h"
23 #include "include/api/data_type.h"
24 #include "include/errorcode.h"
25 #include "src/common/log_adapter.h"
26 #include "src/litert/delegate/nnapi/NeuralNetworksTypes.h"
27 #include "src/litert/delegate/nnapi/nnapi_utils.h"
28 #include "schema/ops_generated.h"
29 
30 namespace mindspore {
31 namespace lite {
32 struct MSTensorInfo {
33   std::string name_;
34   DataType type_;
35   std::vector<int64_t> shape_;
36   void *data_;
37   size_t data_len_;
38 };
39 class NNAPIOp {
40  public:
NNAPIOp(const std::string & name,const schema::Primitive * primitive,std::vector<mindspore::MSTensor> in_tensors,std::vector<mindspore::MSTensor> out_tensors,schema::QuantType quant_type)41   explicit NNAPIOp(const std::string &name, const schema::Primitive *primitive,
42                    std::vector<mindspore::MSTensor> in_tensors, std::vector<mindspore::MSTensor> out_tensors,
43                    schema::QuantType quant_type)
44       : op_name_(name),
45         op_primitive_(primitive),
46         in_tensors_(std::move(in_tensors)),
47         out_tensors_(std::move(out_tensors)),
48         quant_type_(quant_type) {
49     if (primitive != nullptr) {
50       this->type_ = primitive->value_type();
51     }
52   }
53 
~NNAPIOp()54   virtual ~NNAPIOp() {
55     for (auto tensor : op_attribute_tensors_) {
56       if (tensor != nullptr) {
57         delete tensor;
58         tensor = nullptr;
59       }
60     }
61   }
62 
63   virtual bool IsSupport() = 0;
64   virtual int InitParams() = 0;
65   virtual int ConvertInOutQuantSymmToASymm();
66   virtual int AddOpToNNAPIModel(ANeuralNetworksModel *nnapi_model, std::vector<mindspore::MSTensor> *all_tensors) = 0;
67   int InitNNAPIOpInOut(const std::vector<mindspore::MSTensor> &all_tensors);
68 
inputs()69   const std::vector<mindspore::MSTensor> &inputs() { return this->in_tensors_; }
outputs()70   const std::vector<mindspore::MSTensor> &outputs() { return this->out_tensors_; }
set_inputs(const std::vector<mindspore::MSTensor> & inputs)71   void set_inputs(const std::vector<mindspore::MSTensor> &inputs) { this->in_tensors_ = inputs; }
set_outputs(const std::vector<mindspore::MSTensor> & outputs)72   void set_outputs(const std::vector<mindspore::MSTensor> &outputs) { this->out_tensors_ = outputs; }
73 
in_ops()74   const std::vector<NNAPIOp *> &in_ops() { return this->in_ops_; }
out_ops()75   const std::vector<NNAPIOp *> &out_ops() { return this->out_ops_; }
set_in_ops(const std::vector<NNAPIOp * > & in_ops)76   void set_in_ops(const std::vector<NNAPIOp *> &in_ops) { this->in_ops_ = in_ops; }
set_out_ops(const std::vector<NNAPIOp * > & out_ops)77   void set_out_ops(const std::vector<NNAPIOp *> &out_ops) { this->out_ops_ = out_ops; }
78 
name()79   const std::string name() { return op_name_; }
get_quant_type()80   schema::QuantType get_quant_type() { return quant_type_; }
81 
82  protected:
83   template <typename T>
AddScalarToNNAPIModel(ANeuralNetworksModel * nnapi_model,std::vector<mindspore::MSTensor> * all_tensors,std::string name,DataType type,T value)84   int AddScalarToNNAPIModel(ANeuralNetworksModel *nnapi_model, std::vector<mindspore::MSTensor> *all_tensors,
85                             std::string name, DataType type, T value) {
86     auto tensor = MSTensor::CreateTensor(name, type, {}, &value, sizeof(T));
87     if (tensor == nullptr) {
88       MS_LOG(ERROR) << "Create tensor failed.";
89       return RET_ERROR;
90     }
91     if (AddNNAPIOperand(nnapi_model, *tensor, static_cast<int>(all_tensors->size()), 0, true) != RET_OK) {
92       MS_LOG(ERROR) << "Add NNAPI operand failed.";
93       delete tensor;
94       return RET_ERROR;
95     }
96     input_indices_.push_back(all_tensors->size());
97     all_tensors->push_back(*tensor);
98     op_attribute_tensors_.push_back(tensor);
99     return RET_OK;
100   }
101   int AddTensorToNNAPIModel(ANeuralNetworksModel *nnapi_model, std::vector<mindspore::MSTensor> *all_tensors,
102                             MSTensorInfo data_info);
103 
104   std::string op_name_;
105   const schema::Primitive *op_primitive_ = nullptr;
106   std::vector<mindspore::MSTensor> in_tensors_;
107   std::vector<mindspore::MSTensor> out_tensors_;
108   schema::PrimitiveType type_ = schema::PrimitiveType_NONE;
109   schema::QuantType quant_type_ = schema::QuantType_QUANT_NONE;
110 
111   std::vector<NNAPIOp *> in_ops_;
112   std::vector<NNAPIOp *> out_ops_;
113 
114   std::vector<uint32_t> input_indices_;
115   std::vector<uint32_t> output_indices_;
116   std::vector<MSTensor *> op_attribute_tensors_;
117 };
118 
119 typedef NNAPIOp *(*NNAPIGetOp)(const std::string &name, const schema::Primitive *primitive,
120                                const std::vector<mindspore::MSTensor> &in_tensors,
121                                const std::vector<mindspore::MSTensor> &out_tensors, schema::QuantType quant_type);
122 
123 template <class T>
GetNNAPIOp(const std::string & name,const schema::Primitive * primitive,const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors,schema::QuantType quant_type)124 NNAPIOp *GetNNAPIOp(const std::string &name, const schema::Primitive *primitive,
125                     const std::vector<mindspore::MSTensor> &in_tensors,
126                     const std::vector<mindspore::MSTensor> &out_tensors, schema::QuantType quant_type) {
127   MS_ASSERT(primitive != nullptr);
128   auto *op = new (std::nothrow) T(name, primitive, in_tensors, out_tensors, quant_type);
129   if (op == nullptr) {
130     MS_LOG(ERROR) << "op is nullptr.";
131     return nullptr;
132   }
133   auto ret = op->InitParams();
134   if (ret != RET_OK) {
135     MS_LOG(WARNING) << "NPU op init failed.";
136     delete op;
137     return nullptr;
138   }
139   if (!op->IsSupport()) {
140     MS_LOG(WARNING) << "NNAPI op is not supported.";
141     delete op;
142     return nullptr;
143   }
144   return op;
145 }
146 
147 class NNAPICommon : public NNAPIOp {
148  public:
NNAPICommon(const std::string & name,const schema::Primitive * primitive,const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors,schema::QuantType quant_type)149   NNAPICommon(const std::string &name, const schema::Primitive *primitive,
150               const std::vector<mindspore::MSTensor> &in_tensors, const std::vector<mindspore::MSTensor> &out_tensors,
151               schema::QuantType quant_type)
152       : NNAPIOp(name, primitive, in_tensors, out_tensors, quant_type) {}
153 
~NNAPICommon()154   ~NNAPICommon() override {}
155 
IsSupport()156   bool IsSupport() override { return true; };
InitParams()157   int InitParams() override { return RET_OK; };
158   int AddOpToNNAPIModel(ANeuralNetworksModel *nnapi_model, std::vector<mindspore::MSTensor> *all_tensors) override;
159 };
160 }  // namespace lite
161 }  // namespace mindspore
162 #endif  // MINDSPORE_LITE_SRC_LITERT_DELEGATE_NNAPI_OP_NNAPI_OP_H_
163