• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_SUBGRAPH_H_
18 #define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_SUBGRAPH_H_
19 
20 #include <memory>
21 #include <vector>
22 #include <string>
23 #include "include/api/kernel.h"
24 #include "src/delegate/npu/npu_executor.h"
25 
26 namespace mindspore {
27 class NPUSubGraph : public kernel::Kernel {
28  public:
NPUSubGraph(const std::vector<NPUOp * > & npu_ops,NPUManager * npu_manager)29   NPUSubGraph(const std::vector<NPUOp *> &npu_ops, NPUManager *npu_manager)
30       : npu_ops_(npu_ops), npu_manager_(npu_manager) {}
31 
32   ~NPUSubGraph() override;
33 
34   int Init();
35 
36   int Prepare() override;
37 
38   int Execute() override;
39 
ReSize()40   int ReSize() override {
41     MS_LOG(ERROR) << "NPU does not support the resize function temporarily.";
42     return lite::RET_ERROR;
43   }
44 
45   void set_input(mindspore::MSTensor in_tensor, int index) override;
46 
47   void set_output(mindspore::MSTensor out_tensor, int index) override;
48 
49   int GetGraphInOutOps();
50 
51   std::vector<NPUOp *> FindPreOps(NPUOp *cur_op);
52 
53  private:
54   std::shared_ptr<domi::ModelBufferData> BuildIRModel();
55 
56   int BuildNPUInputOp();
57 
58   int BuildNPUOutputOp();
59 
60   int GetNPUOperators(const std::vector<NPUOp *> &ops);
61 
62   bool IsSubGraphInputTensor(mindspore::MSTensor input);
63 
64   std::string GetOMModelName();
65 
66   bool is_compiled_ = false;
67 
68   std::vector<ge::Operator> subgraph_input_ops_;
69 
70   std::vector<ge::Operator> subgraph_output_ops_;
71 
72   std::vector<mindspore::MSTensor> out_tensor_sorted_;
73 
74   std::vector<ge::Operator *> op_buffer_;
75 
76   std::vector<NPUOp *> npu_ops_{};
77   // entry nodes in nodes
78   std::vector<NPUOp *> in_ops_{};
79   // exit nodes in nodes
80   std::vector<NPUOp *> out_ops_{};
81 
82   NPUExecutor *executor_ = nullptr;
83 
84   NPUManager *npu_manager_ = nullptr;
85 };
86 
87 }  // namespace mindspore
88 
89 #endif  // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_SUBGRAPH_H_
90