• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2020-2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_SCHEDULER_H_
18 #define MINDSPORE_LITE_SRC_SCHEDULER_H_
19 
20 #include <utility>
21 #include <vector>
22 #include <memory>
23 #include <map>
24 #include <deque>
25 #include <unordered_map>
26 #include <set>
27 #include <string>
28 #include "src/sub_graph_kernel.h"
29 #include "src/inner_context.h"
30 #include "include/model.h"
31 #include "src/scheduler_cb.h"
32 #ifndef DELEGATE_CLIP
33 #include "include/api/delegate.h"
34 #endif
35 
36 namespace mindspore::lite {
37 constexpr int kDefaultDeviceType = -1;
38 const constexpr int kSwitchTrueBranch = 1;
39 const constexpr int kSwitchFalseBranch = 2;
40 class Scheduler {
41  public:
42   Scheduler(const InnerContext *ctx, const mindspore::Context *ms_ctx, Model *src_model,
43             std::vector<Tensor *> *src_tensors, const std::vector<Tensor *> &input_tensors,
44             const std::vector<Tensor *> &output_tensors, bool is_train_session,
45             std::map<std::string, TypeId> *executions, std::shared_ptr<Delegate> delegate = nullptr,
46             int delegate_device_type = -1)
context_(ctx)47       : context_(ctx),
48         ms_context_(ms_ctx),
49         src_model_(src_model),
50         src_tensors_(src_tensors),
51         inputs_(input_tensors),
52         outputs_(output_tensors),
53         is_train_session_(is_train_session),
54         delegate_(delegate),
55         delegate_device_type_(delegate_device_type),
56         execution_plan_(executions) {}
57   ~Scheduler() = default;
58   int Schedule(std::vector<kernel::LiteKernel *> *dst_kernels);
SetupSchedulerCb(std::unique_ptr<SchedulerCb> cb)59   void SetupSchedulerCb(std::unique_ptr<SchedulerCb> cb) { sched_cb_ = std::move(cb); }
60 
61  private:
62   int SchedulePreProcess();
63   int CheckInputParam(std::vector<kernel::LiteKernel *> *dst_kernels);
64   void FindNodeInoutTensors(const LiteGraph::Node &node, std::vector<Tensor *> *inputs, std::vector<Tensor *> *outputs);
65   LiteGraph::Node *NodeInputIsPartial(const LiteGraph::Node *node);
66   int InferPartialShape(const LiteGraph::Node *node);
67   int InferCallShape(const LiteGraph::Node *node);
68   int InferNodeShape(const LiteGraph::Node *node);
69   void FreeOpParameters();
70   int InferSubGraphShape(size_t subgraph_index);
71   // schedule a node to kernel according to context and kernels registered
72   int HandleBuildinCpuKernelWeight(kernel::SubGraphType belong_subgraph_type, kernel::LiteKernel *kernel);
73   kernel::LiteKernel *FindBackendKernel(const std::vector<Tensor *> &in_tensors,
74                                         const std::vector<Tensor *> &out_tensors, const LiteGraph::Node *node,
75                                         TypeId prefer_data_type = kTypeUnknown);
76   int FindCpuKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
77                     OpParameter *op_parameter, const kernel::KernelKey &desc, TypeId kernel_data_type,
78                     kernel::LiteKernel **kernel);
79   int CheckCpuValid(std::vector<kernel::LiteKernel *> *dst_kernels);
80   void ResetByExecutionPlan(std::string node_name, TypeId *data_type);
81 
82 #ifdef GPU_OPENCL
83   int FindGpuKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
84                     OpParameter *op_parameter, const kernel::KernelKey &desc, kernel::LiteKernel **kernel,
85                     TypeId prefer_data_type);
86 #endif
87   int FindProviderKernel(const std::vector<Tensor *> &in_tensors, const std::vector<Tensor *> &out_tensors,
88                          const LiteGraph::Node *node, TypeId data_type, kernel::LiteKernel **kernel);
89 
90   int InitKernels(std::vector<kernel::LiteKernel *> dst_kernels);
91   kernel::LiteKernel *SchedulePartialToKernel(const lite::LiteGraph::Node *src_node);
92   // schedule a partial node to a subgraph_kernel
93   std::vector<kernel::LiteKernel *> ScheduleSubGraphToSubGraphKernels(const int &subgraph_index);
94   // schedule a node to a kernel
95   kernel::LiteKernel *ScheduleNodeToKernel(const LiteGraph::Node *src_node, TypeId prefer_data_type = kTypeUnknown);
96   // schedule a LiteGraph::Graph into a vector of subgraph_kernel
97   int ScheduleGraphToKernels(std::vector<kernel::LiteKernel *> *dst_kernels, TypeId prefer_data_type = kTypeUnknown);
98   // schedule a LiteGraph::SubGraph into a vector of kernel and subgraph_kernel
99   int ScheduleSubGraphToKernels(size_t subgraph_index, std::vector<kernel::LiteKernel *> *dst_kernels,
100                                 std::vector<lite::Tensor *> *in_tensors, std::vector<lite::Tensor *> *out_tensors,
101                                 TypeId prefer_data_type = kTypeUnknown);
102   // vector<LiteKernel/SubGraphKernel> --> vector<SubGraphKernel>
103   int ConstructSubGraphs(std::vector<kernel::LiteKernel *> src_kernel, std::vector<kernel::LiteKernel *> *dst_kernel,
104                          std::map<const kernel::LiteKernel *, bool> *sinked_kernel_map);
105   // create subgraph_kernel from a vector of kernel
106   std::vector<kernel::LiteKernel *> ScheduleMainSubGraphToKernels();
107   kernel::LiteKernel *SchedulePartialToSubGraphKernel(const int &subgraph_index);
108   kernel::SubGraphType PartialSubGraphType(const std::vector<kernel::LiteKernel *> &kernels);
109 
110   // other methods
111   static TypeId GetFirstFp32Fp16OrInt8Type(const std::vector<Tensor *> &in_tensors);
112   static void SetKernelTensorDataType(kernel::LiteKernel *kernel);
113   int CopyPartialShapeToSubGraph(const lite::LiteGraph::Node *partial_node);
114   int RestoreSubGraphInput(const lite::LiteGraph::Node *partial_node);
115 
116   bool IsControlFlowPattern(const lite::LiteGraph::Node &partial_node);
117 #ifdef ENABLE_FP16
118   int SubGraphPreferDataType(const int &subgraph_index, TypeId *prefer_data_type);
119 #endif
120 #ifndef CONTROLFLOW_TENSORLIST_CLIP
121   int InferSwitchShape(const LiteGraph::Node *node);
122   LiteGraph::Node *NodeInputIsSwitch(const LiteGraph::Node *node);
123   bool SubGraphHasScheduled(const int &index);
124   void SubGraphMarkScheduled(const int &index);
125   void SetSubgraphForPartialNode();
126   bool IsControlFlowParttern(const std::vector<kernel::LiteKernel *> &kernels);
127   int ConstructControlFlowMainGraph(std::vector<kernel::LiteKernel *> *kernels);
128 #endif
129 
130 #ifndef DELEGATE_CLIP
131   /* delegate related */
132   int ReplaceDelegateKernels(std::vector<kernel::LiteKernel *> *dst_kernels);
133   int InitDelegateKernels(std::vector<kernel::LiteKernel *> *dst_kernels);
134 #endif
135 
136  protected:
137   const InnerContext *context_ = nullptr;
138   const mindspore::Context *ms_context_ = nullptr;
139   Model *src_model_ = nullptr;
140   std::vector<Tensor *> *src_tensors_;
141   const std::vector<Tensor *> &inputs_;
142   const std::vector<Tensor *> &outputs_;
143   std::vector<mindspore::MSTensor> ms_inputs_;
144   std::vector<mindspore::MSTensor> ms_outputs_;
145   std::vector<size_t> graph_output_node_indexes_;
146   std::map<int, OpParameter *> op_parameters_;
147   bool is_train_session_ = false;
148   std::unique_ptr<SchedulerCb> sched_cb_;
149   std::map<kernel::Kernel *, const schema::Primitive *> primitives_;
150   std::shared_ptr<Delegate> delegate_ = nullptr;
151   int delegate_device_type_ = -1;
152   std::deque<int> subgraphs_to_schedule_{};
153   std::unordered_map<size_t, kernel::LiteKernel *> subgraph_index_subgraph_kernel_map_{};
154 #ifndef CONTROLFLOW_TENSORLIST_CLIP
155   std::set<int> scheduled_subgraph_index_{};
156   std::unordered_map<kernel::LiteKernel *, size_t> partial_kernel_subgraph_index_map_{};
157   std::set<lite::LiteGraph::Node *> partial_cnode_inferred_{};
158 #endif
159   int schema_version_ = SCHEMA_VERSION::SCHEMA_CUR;
160   std::map<std::string, TypeId> *execution_plan_ = nullptr;
161 };
162 }  // namespace mindspore::lite
163 
164 #endif  // MINDSPORE_LITE_SRC_SCHEDULER_H_
165