• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020-2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_OP_NPU_OP_
18 #define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_OP_NPU_OP_
19 #include <utility>
20 #include <vector>
21 #include <string>
22 #include <set>
23 #include <unordered_map>
24 #include "include/graph/graph.h"
25 #include "schema/model_generated.h"
26 #include "include/errorcode.h"
27 #include "include/api/types.h"
28 #include "include/api/data_type.h"
29 #include "src/common/log_adapter.h"
30 #include "src/common/log_util.h"
31 #include "nnacl/op_base.h"
32 using mindspore::lite::RET_ERROR;
33 using mindspore::lite::RET_NOT_SUPPORT;
34 using mindspore::lite::RET_OK;
35 namespace mindspore {
36 constexpr int NPU_SHAPE_SIZE = 4;
37 constexpr int INPUT_SIZE2 = 2;
38 constexpr int INPUT_SIZE3 = 3;
39 
40 class NPUOp {
41  public:
NPUOp(const schema::Primitive * primitive,const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors,std::string name)42   NPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
43         const std::vector<mindspore::MSTensor> &out_tensors, std::string name)
44       : inputs_(std::move(in_tensors)), outputs_(std::move(out_tensors)), name_(name) {
45     if (primitive != nullptr) {
46       type_ = primitive->value_type();
47     }
48   }
49 
50   virtual ~NPUOp() = default;
51 
IsSupport(const schema::Primitive * primitive,const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors)52   virtual int IsSupport(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
53                         const std::vector<mindspore::MSTensor> &out_tensors) {
54     return RET_ERROR;
55   }
56 
Init(const schema::Primitive * primitive,const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors)57   virtual int Init(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
58                    const std::vector<mindspore::MSTensor> &out_tensors) {
59     return RET_ERROR;
60   }
61 
SetNPUInputs(const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors,const std::vector<ge::Operator * > & npu_inputs)62   virtual int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors,
63                            const std::vector<mindspore::MSTensor> &out_tensors,
64                            const std::vector<ge::Operator *> &npu_inputs) {
65     return RET_ERROR;
66   }
67 
SetNPUInputs(const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors,const std::vector<ge::Operator * > & npu_inputs,const std::unordered_map<int,std::pair<ge::Operator *,int>> & index2_multi_out_index)68   virtual int SetNPUInputs(const std::vector<mindspore::MSTensor> &in_tensors,
69                            const std::vector<mindspore::MSTensor> &out_tensors,
70                            const std::vector<ge::Operator *> &npu_inputs,
71                            const std::unordered_map<int, std::pair<ge::Operator *, int>> &index2_multi_out_index) {
72     if (index2_multi_out_index.empty()) {
73       return SetNPUInputs(in_tensors, out_tensors, npu_inputs);
74     }
75     MS_LOG(ERROR) << "The input operator of npu op: " << this->name() << " has multiple outputs. Override this method.";
76     return RET_ERROR;
77   }
78 
GetNPUOp()79   virtual ge::Operator *GetNPUOp() { return nullptr; }
80 
set_inputs(const std::vector<mindspore::MSTensor> & in_tensors)81   void set_inputs(const std::vector<mindspore::MSTensor> &in_tensors) { this->inputs_ = in_tensors; }
82 
set_input(mindspore::MSTensor in_tensor,int index)83   void set_input(mindspore::MSTensor in_tensor, int index) {
84     MS_ASSERT(index < inputs_.size());
85     this->inputs_[index] = in_tensor;
86   }
87 
set_outputs(const std::vector<mindspore::MSTensor> & out_tensors)88   void set_outputs(const std::vector<mindspore::MSTensor> &out_tensors) { this->outputs_ = out_tensors; }
89 
inputs()90   const std::vector<mindspore::MSTensor> &inputs() { return this->inputs_; }
91 
outputs()92   const std::vector<mindspore::MSTensor> &outputs() { return this->outputs_; }
93 
set_in_ops(const std::vector<NPUOp * > & in_ops)94   void set_in_ops(const std::vector<NPUOp *> &in_ops) { this->in_ops_ = in_ops; }
95 
set_out_ops(const std::vector<NPUOp * > & out_ops)96   void set_out_ops(const std::vector<NPUOp *> &out_ops) { this->out_ops_ = out_ops; }
97 
in_ops()98   const std::vector<NPUOp *> &in_ops() const { return this->in_ops_; }
99 
out_ops()100   const std::vector<NPUOp *> &out_ops() const { return this->out_ops_; }
101 
type()102   schema::PrimitiveType type() const { return type_; }
103 
name()104   std::string name() const { return this->name_; }
105 
set_name(const std::string & name)106   void set_name(const std::string &name) { this->name_ = name; }
107 
108  protected:
109   std::vector<mindspore::MSTensor> inputs_;
110   std::vector<mindspore::MSTensor> outputs_;
111   std::vector<NPUOp *> in_ops_;
112   std::vector<NPUOp *> out_ops_;
113   schema::PrimitiveType type_ = schema::PrimitiveType_NONE;
114   std::string name_;
115 };
116 
117 typedef NPUOp *(*NPUGetOp)(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
118                            const std::vector<mindspore::MSTensor> &out_tensors, std::string name);
119 
120 template <class T>
GetNPUOp(const schema::Primitive * primitive,const std::vector<mindspore::MSTensor> & in_tensors,const std::vector<mindspore::MSTensor> & out_tensors,std::string name)121 NPUOp *GetNPUOp(const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &in_tensors,
122                 const std::vector<mindspore::MSTensor> &out_tensors, std::string name) {
123   auto shape = out_tensors.front().Shape();
124   if (std::find(shape.begin(), shape.end(), -1) != shape.end()) {
125     MS_LOG(ERROR) << "NPU does not support runtime inference shape.";
126     return nullptr;
127   }
128 
129   if (in_tensors[0].Shape().size() > NPU_SHAPE_SIZE) {
130     MS_LOG(ERROR) << "Npu does not support input tensor dims greater than 4";
131     return nullptr;
132   }
133 
134   std::set<schema::PrimitiveType> int32_lists = {schema::PrimitiveType_Cast, schema::PrimitiveType_StridedSlice};
135   auto support_int32 = in_tensors[0].DataType() == DataType::kNumberTypeInt32 &&
136                        find(int32_lists.begin(), int32_lists.end(), primitive->value_type()) != int32_lists.end();
137   if (in_tensors[0].DataType() != DataType::kNumberTypeFloat32 &&
138       in_tensors[0].DataType() != DataType::kNumberTypeFloat16 && !support_int32) {
139     MS_LOG(ERROR) << "Npu does not support datatype " << static_cast<int>(in_tensors[0].DataType()) << " for op type "
140                   << primitive->value_type();
141     return nullptr;
142   }
143 
144   auto *op = new (std::nothrow) T(primitive, in_tensors, out_tensors, name);
145   if (op == nullptr) {
146     MS_LOG(ERROR) << "op is nullptr.";
147     return nullptr;
148   }
149   auto ret = op->IsSupport(primitive, in_tensors, out_tensors);
150   if (ret != RET_OK) {
151     MS_LOG(WARNING) << "NPU op is not supported.";
152     delete op;
153     return nullptr;
154   }
155   ret = op->Init(primitive, in_tensors, out_tensors);
156   if (ret != RET_OK) {
157     MS_LOG(WARNING) << "NPU op init failed.";
158     delete op;
159     return nullptr;
160   }
161   return op;
162 }
163 }  // namespace mindspore
164 #endif  // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_OP_NPU_OP_
165