• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_GRAPH_H_
18 #define MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_GRAPH_H_
19 
20 #include <vector>
21 #include <queue>
22 #include <map>
23 #include <utility>
24 #include "include/api/kernel.h"
25 #include "src/litert/delegate/npu/op/npu_op.h"
26 #include "src/litert/delegate/npu/npu_executor.h"
27 
28 namespace mindspore::lite {
29 class NPUGraph : public kernel::Kernel {
30  public:
NPUGraph(std::vector<NPUOp * > npu_ops,NPUManager * npu_manager,const std::vector<mindspore::MSTensor> & inputs,const std::vector<mindspore::MSTensor> & outputs)31   NPUGraph(std::vector<NPUOp *> npu_ops, NPUManager *npu_manager, const std::vector<mindspore::MSTensor> &inputs,
32            const std::vector<mindspore::MSTensor> &outputs)
33       : kernel::Kernel(inputs, outputs, nullptr, nullptr), npu_ops_(std::move(npu_ops)), npu_manager_(npu_manager) {}
34 
35   ~NPUGraph() override;
36 
37   int Init();
38 
39   int Prepare() override;
40 
41   int Execute() override;
42 
ReSize()43   int ReSize() override {
44     MS_LOG(ERROR) << "NPU does not support the resize function temporarily.";
45     return RET_ERROR;
46   }
47 
48   void set_input(mindspore::MSTensor in_tensor, int index) override;
49 
50   void set_output(mindspore::MSTensor out_tensor, int index) override;
51 
52   int FindPreNextOps();
53 
GetOps()54   std::vector<NPUOp *> *GetOps() { return &npu_ops_; }
55 
GetInsertTensors()56   std::vector<mindspore::MSTensor *> *GetInsertTensors() { return &insert_tensors_; }
57 
58  protected:
59   std::vector<NPUOp *> FindPreOps(NPUOp *cur_op);
60 
61   std::vector<NPUOp *> FindNextOps(NPUOp *cur_op);
62 
63   int FindValidSubgraphInOps(std::queue<NPUOp *> *valid_in_ops, std::queue<NPUOp *> *candidate_in_ops,
64                              std::map<const NPUOp *, bool> *is_visited);
65 
66   std::vector<NPUOp *> FindReadySubgraphOps(std::queue<NPUOp *> op_queue, std::queue<NPUOp *> *next_candidate_ops,
67                                             std::map<const NPUOp *, bool> *is_visited);
68 
69   int CreateSubgraphFromReadyOps(std::queue<NPUOp *> *valid_in_ops, std::vector<NPUOp *> ready_ops,
70                                  std::map<const NPUOp *, bool> *is_searched);
71 
72   kernel::Kernel *CreateNPUSubgraphKernel(std::vector<NPUOp *> ops);
73 
74   kernel::Kernel *CreateNPUTransposeKernel(NPUOp *op);
75 
76   std::vector<NPUOp *> npu_ops_{};
77 
78   std::vector<kernel::Kernel *> all_kernels_{};
79 
80   std::vector<mindspore::MSTensor *> insert_tensors_;
81 
82   NPUManager *npu_manager_ = nullptr;
83 };
84 
85 }  // namespace mindspore::lite
86 
87 #endif  // MINDSPORE_LITE_SRC_RUNTIME_DELEGATE_NPU_NPU_GRAPH_H_
88