• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_LITE_MICRO_CODER_OPCODER_H_
17 #define MINDSPORE_LITE_MICRO_CODER_OPCODER_H_
18 
19 #include <vector>
20 #include <set>
21 #include <string>
22 #include <memory>
23 #include "coder/context.h"
24 #include "coder/graph.h"
25 #include "coder/allocator/allocator.h"
26 #include "include/errorcode.h"
27 #include "src/lite_kernel.h"
28 #include "src/common/version_manager.h"
29 #include "securec/include/securec.h"
30 #include "coder/opcoders/op_coder_register.h"
31 #include "coder/log.h"
32 
33 namespace mindspore::lite::micro {
34 constexpr int kPrecision = 19;
35 
36 class OperatorCoder {
37  public:
OperatorCoder(const std::vector<Tensor * > & in_tensors,const std::vector<Tensor * > & out_tensors,const Model::Node * node,size_t node_index,Target target)38   OperatorCoder(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
39                 const Model::Node *node, size_t node_index, Target target)
40       : input_tensors_(in_tensors),
41         output_tensors_(out_tensors),
42         target_(target),
43         node_(node),
44         node_index_(node_index) {
45     allocator_ = MemoryAllocator::GetInstance();
46     input_tensor_ = input_tensors_.at(kInputIndex);
47     output_tensor_ = output_tensors_.at(kOutputIndex);
48   }
49 
name()50   std::string name() const { return node_->name_; }
51 
52   void set_input_tensor_indices(const std::vector<uint32_t> &input_indices);
53   void set_output_tensor_indices(const std::vector<uint32_t> &output_indices);
54 
55   const std::vector<uint32_t> input_tensor_indices() const;
56   const std::vector<uint32_t> output_tensor_indices() const;
57 
58   const std::vector<Tensor *> input_tensors() const;
59   const std::vector<Tensor *> output_tensors() const;
60 
AddInputOp(OperatorCoder * op)61   void AddInputOp(OperatorCoder *op) { input_ops_.push_back(op); }
AddOutputOp(OperatorCoder * op)62   void AddOutputOp(OperatorCoder *op) { output_ops_.push_back(op); }
input_ops()63   const std::vector<OperatorCoder *> input_ops() const { return input_ops_; }
output_ops()64   const std::vector<OperatorCoder *> output_ops() const { return output_ops_; }
65 
set_type(int type)66   void set_type(int type) { type_ = type; }
type()67   const int type() const { return type_; }
68 
69   size_t node_index() const;
70 
71   void set_parameter(OpParameter *parameter);
72 
node()73   const Model::Node *node() const { return this->node_; }
74 
AddInitialParameters(Tensor * parameter)75   void AddInitialParameters(Tensor *parameter) { initial_parameters_.push_back(parameter); }
76 
initial_parameters()77   const std::vector<Tensor *> initial_parameters() const { return initial_parameters_; }
78 
SetSchemaVersion(int schema_version)79   void SetSchemaVersion(int schema_version) { schema_version_ = schema_version; }
80 
81   // context
82   virtual int Prepare(CoderContext *const context) = 0;
83 
84   virtual int DoCode(CoderContext *const context) = 0;
85 
86   virtual ~OperatorCoder();
87 
88   void set_thread_num(int thread_num);
89 
90  protected:
91   std::vector<Tensor *> input_tensors_;
92   std::vector<Tensor *> output_tensors_;
93   Target target_{kTargetUnknown};
94   const Model::Node *node_{nullptr};
95   Tensor *input_tensor_{nullptr};
96   Tensor *output_tensor_{nullptr};
97 
98   OpParameter *parameter_{nullptr};
99 
100   MemoryAllocator *allocator_{nullptr};
101 
102   bool support_parallel_{false};
103   int thread_num_{1};
104   int schema_version_ = lite::SCHEMA_VERSION::SCHEMA_CUR;
105 
106  private:
107   size_t node_index_{0};
108   std::vector<uint32_t> input_tensor_indices_;
109   std::vector<uint32_t> output_tensor_indices_;
110 
111   std::vector<OperatorCoder *> input_ops_;
112   std::vector<OperatorCoder *> output_ops_;
113   std::vector<Tensor *> initial_parameters_;
114   int type_{schema::PrimitiveType_NONE};
115 };
116 
117 // a template func for normal op_coder creator
118 template <typename T>
CPUOpCoderCreator(const std::vector<Tensor * > & in_tensors,const std::vector<Tensor * > & out_tensors,const Model::Node * node,size_t node_index,Target target,int schema_version)119 std::unique_ptr<OperatorCoder> CPUOpCoderCreator(const std::vector<Tensor *> &in_tensors,
120                                                  const std::vector<Tensor *> &out_tensors, const Model::Node *node,
121                                                  size_t node_index, Target target, int schema_version) {
122   if (node == nullptr) {
123     MS_LOG(ERROR) << "node is null";
124     return nullptr;
125   }
126   std::unique_ptr<T> coder = std::make_unique<T>(in_tensors, out_tensors, node, node_index, target);
127   if (coder == nullptr) {
128     return nullptr;
129   }
130   coder->SetSchemaVersion(schema_version);
131   return coder;
132 }
133 }  // namespace mindspore::lite::micro
134 #endif  // MINDSPORE_LITE_MICRO_CODER_OPCODER_H_
135