• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_INCLUDE_API_DELEGATE_H
18 #define MINDSPORE_INCLUDE_API_DELEGATE_H
19 
20 #include <map>
21 #include <vector>
22 #include <memory>
23 #include <string>
24 #include <utility>
25 #include "schema/model_generated.h"
26 #include "include/api/kernel.h"
27 #include "include/api/delegate_api.h"
28 
29 namespace mindspore {
30 typedef enum {
31   SCHEMA_INVALID = -1, /**< invalid version */
32   SCHEMA_CUR,          /**< current version for ms model defined in model.fbs*/
33   SCHEMA_V0,           /**< previous version for ms model defined in model_v0.fbs*/
34 } SchemaVersion;
35 
36 using KernelIter = std::vector<kernel::Kernel *>::iterator;
37 
38 template <class T>
39 class MS_API DelegateModel {
40  public:
41   DelegateModel() = default;
42   /// \brief Constructor of MindSpore Lite DelegateModel.
DelegateModel(std::vector<kernel::Kernel * > * kernels,const std::vector<MSTensor> & inputs,const std::vector<MSTensor> & outputs,const std::map<kernel::Kernel *,const T * > & primitives,SchemaVersion version)43   DelegateModel(std::vector<kernel::Kernel *> *kernels, const std::vector<MSTensor> &inputs,
44                 const std::vector<MSTensor> &outputs, const std::map<kernel::Kernel *, const T *> &primitives,
45                 SchemaVersion version)
46       : kernels_(kernels), inputs_(inputs), outputs_(outputs), primitives_(primitives), version_(version) {}
47 
48   /// \brief Destructor of MindSpore Lite DelegateModel.
49   ~DelegateModel() = default;
50 
51   /// \brief Get Primitive of kernel::Kernel.
52   ///
53   /// \param[in] kernel kernel in DelegateModel kernels vector.
54   ///
55   /// \return The Primitive of The kernel.
GetPrimitive(kernel::Kernel * kernel)56   const T *GetPrimitive(kernel::Kernel *kernel) const {
57     if (primitives_.find(kernel) != primitives_.end()) {
58       return primitives_.at(kernel);
59     } else {
60       return nullptr;
61     }
62   }
63 
64   /// \brief Get the begin iterator of the DelegateModel kernels vector.
65   ///
66   /// \return The begin iterator of the DelegateModel kernels vector.
BeginKernelIterator()67   KernelIter BeginKernelIterator() { return kernels_->begin(); }
68 
69   /// \brief Get the end iterator of the DelegateModel kernels vector.
70   ///
71   /// \return The end iterator of the DelegateModel kernels vector.
EndKernelIterator()72   KernelIter EndKernelIterator() { return kernels_->end(); }
73 
74   /// \brief Replace the continuous kernel supported by the delegate with a delegate graph kernel.
75   ///
76   /// \param[in] from Define the begin iterator of continuous kernel supported by the delegate.
77   /// \param[in] end Define the end iterator of continuous kernel supported by the delegate.
78   ///
79   /// \return The next iterator after graph_kernel, point to the next kernel that is not visited.
Replace(KernelIter from,KernelIter end,kernel::Kernel * graph_kernel)80   KernelIter Replace(KernelIter from, KernelIter end, kernel::Kernel *graph_kernel) {
81     size_t insert_index = from - BeginKernelIterator();
82     if (insert_index >= kernels_->size()) {
83       return BeginKernelIterator();
84     }
85     kernels_->erase(from, end);
86     kernels_->insert(BeginKernelIterator() + insert_index, graph_kernel);
87     return BeginKernelIterator() + insert_index + 1;
88   }
89 
90   /// \brief Get the nodes of DelegateModel.
91   ///
92   /// \return The pointer to nodes vector of DelegateModel.
nodes()93   std::vector<kernel::Kernel *> *nodes() { return kernels_; }
94 
95   /// \brief Get the input tensors of DelegateModel.
96   ///
97   /// \return The input tensor vector of DelegateModel.
inputs()98   const std::vector<mindspore::MSTensor> &inputs() { return this->inputs_; }
99 
100   /// \brief Get the output tensors of DelegateModel.
101   ///
102   /// \return The ioutput tensor vector of DelegateModel.
outputs()103   const std::vector<mindspore::MSTensor> &outputs() { return this->outputs_; }
104 
105   /// \brief Get the ms model version.
106   ///
107   /// \return The schema version for the primitives map.
GetVersion()108   SchemaVersion GetVersion() const { return version_; }
109 
110  protected:
111   std::vector<kernel::Kernel *> *kernels_;
112   const std::vector<mindspore::MSTensor> &inputs_;
113   const std::vector<mindspore::MSTensor> &outputs_;
114   const std::map<kernel::Kernel *, const T *> &primitives_;
115   SchemaVersion version_;
116 };
117 
118 // lite delegate use kernel::Kernel as graph node.
119 using LiteDelegateGraph = DelegateModel<schema::Primitive>;
120 class Delegate : public IDelegate<LiteDelegateGraph, kernel::Kernel, kernel::Kernel> {
121  public:
122   Delegate() = default;
Delegate(const std::vector<mindspore::MSTensor> & inputs,const std::vector<mindspore::MSTensor> & outputs)123   Delegate(const std::vector<mindspore::MSTensor> &inputs, const std::vector<mindspore::MSTensor> &outputs)
124       : IDelegate<LiteDelegateGraph, kernel::Kernel, kernel::Kernel>(inputs, outputs) {}
125   virtual ~Delegate() = default;
126   /// \brief Init delegate.
127   ///
128   /// \note Init will be called in Model::Build.
129   ///
130   /// \return Status. If Status is kLiteNotSupport, the program will return to the MindSpore Lite inner inference.
131   virtual Status Init() = 0;
132 
CreateKernel(const std::shared_ptr<kernel::Kernel> & node)133   std::shared_ptr<kernel::Kernel> CreateKernel(const std::shared_ptr<kernel::Kernel> &node) override {
134     // return node as kernel since they are same one.
135     return node;
136   }
137 
IsDelegateNode(const std::shared_ptr<kernel::Kernel> & node)138   bool IsDelegateNode(const std::shared_ptr<kernel::Kernel> &node) override { return false; }
139 
140   /// \brief Replace the nodes in model with delegate nodes, delegate will create kernels by its delegate nodes.
141   ///
142   /// \param[in] graph The graph to be built.
ReplaceNodes(const std::shared_ptr<LiteDelegateGraph> & graph)143   void ReplaceNodes(const std::shared_ptr<LiteDelegateGraph> &graph) override {}
144 
145   /// \brief Build delegate graph for MindSpore model.
146   ///
147   /// \note Build will be called in Model::Build.
148   ///
149   /// \param[in] model Define the delegate model to be built.
150   ///
151   /// \note deprecated, use ReplaceNodes and CreateKernel to build delegate model.
152   virtual Status Build(LiteDelegateGraph *model) = 0;
153 };
154 
155 class MS_API CoreMLDelegate : public Delegate {
156  public:
157   /// \brief Constructor of MindSpore Lite CoreML Delegate.
158   CoreMLDelegate();
159 
160   /// \brief Init CoreML delegate.
161   ///
162   /// \note Init will be called in Model::Build.
163   ///
164   /// \return Status. If Status is kLiteNotSupport, the program will return to the MindSpore Lite inner inference.
165   Status Init() override;
166 
167   /// \brief Build CoreML delegate graph for MindSpore Lite model.
168   ///
169   /// \note Build will be called in Model::Build.
170   ///
171   /// \param[in] model Define the delegate model to be built.
172   Status Build(LiteDelegateGraph *model) override;
173 
174  protected:
175   std::shared_ptr<Delegate> impl_;
176 };
177 }  // namespace mindspore
178 #endif  // MINDSPORE_INCLUDE_API_DELEGATE_H
179