• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2022 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_LITE_SRC_RUNTIME_PARALLEL_LITE_ACTOR_H_
18 #define MINDSPORE_LITE_SRC_RUNTIME_PARALLEL_LITE_ACTOR_H_
19 #include <vector>
20 #include <memory>
21 #include <string>
22 #include <unordered_map>
23 #include <set>
24 #include <utility>
25 #include "actor/op_actor.h"
26 #include "src/executor/kernel_exec.h"
27 #include "actor/actor.h"
28 #include "async/uuid_base.h"
29 #include "async/future.h"
30 #include "src/executor/sub_graph_kernel.h"
31 #include "src/litert/cpu_info.h"
32 #include "src/tensorlist.h"
33 #include "src/litert/lite_mindrt.h"
34 
35 namespace mindspore::lite {
36 class KernelsActor;
37 class ParallelLiteActor : public LiteOpActor {
38  public:
ParallelLiteActor(kernel::KernelExec * kernel,lite::InnerContext * ctx)39   explicit ParallelLiteActor(kernel::KernelExec *kernel, lite::InnerContext *ctx) : LiteOpActor(kernel, ctx) {}
40   ~ParallelLiteActor() override;
41   void RunOpData(OpData<lite::Tensor> *input_data, mindspore::OpContext<lite::Tensor> *context = nullptr) override;
42   int PostInit() override;
OpContext()43   mindspore::OpContext<lite::Tensor> *OpContext() const { return op_context_; }
SetOpContext(mindspore::OpContext<lite::Tensor> * op_context)44   inline void SetOpContext(mindspore::OpContext<lite::Tensor> *op_context) { op_context_ = op_context; }
AddKernelsActor(const std::shared_ptr<KernelsActor> & kernels_actor)45   void AddKernelsActor(const std::shared_ptr<KernelsActor> &kernels_actor) { kernels_actors_.push_back(kernels_actor); }
SetBeginReadlyIndexs(const std::vector<size_t> & readly_indexs)46   void SetBeginReadlyIndexs(const std::vector<size_t> &readly_indexs) { begin_readly_indexs_ = readly_indexs; }
47   void CheckReadyActors(const std::vector<size_t> &indices);
48   void AddOutputDataCount();
49   int KernelActorInit();
50 
51  private:
52   void DelKernelsActors();
53 
54  private:
55   std::vector<std::shared_ptr<KernelsActor>> kernels_actors_;
56   mindspore::OpContext<lite::Tensor> *op_context_ = nullptr;
57   std::vector<size_t> begin_readly_indexs_{};
58   std::atomic<int> output_data_count_ = 0;
59   bool finish_ = true;
60   bool call_actor_ = false;
61 };
62 
63 class KernelsActor : public ActorBase {
64  public:
KernelsActor(ParallelLiteActor * parallel_lite_actor,const std::string & op_name,const std::vector<kernel::KernelExec * > & nodes)65   explicit KernelsActor(ParallelLiteActor *parallel_lite_actor, const std::string &op_name,
66                         const std::vector<kernel::KernelExec *> &nodes)
67       : ActorBase(op_name), parallel_lite_actor_(parallel_lite_actor), nodes_(nodes) {}
68   ~KernelsActor() override = default;
69   void Run();
AddOutputDataArrows(const DataArrowPtr & data_arrow)70   void AddOutputDataArrows(const DataArrowPtr &data_arrow) { output_data_arrows_.push_back(data_arrow); }
AddOutputData(const OpDataPtr<Tensor> & data)71   void AddOutputData(const OpDataPtr<Tensor> &data) { outputs_data_.push_back(data); }
AddResultsIndex(size_t result)72   void AddResultsIndex(size_t result) { results_index_.push_back(result); }
SetInActorIndexs(const std::vector<size_t> & in_indexs)73   void SetInActorIndexs(const std::vector<size_t> &in_indexs) {
74     in_actors_indexs_ = in_indexs;
75     if (in_indexs.size() <= 1) {
76       in_actors_num_ = 0;
77       is_single_in_ = true;
78     } else {
79       in_actors_num_ = in_indexs.size() - 1;
80       is_single_in_ = false;
81     }
82   }
SetOutActorIndexs(const std::vector<size_t> & out_indexs)83   void SetOutActorIndexs(const std::vector<size_t> &out_indexs) { out_actors_indexs_ = out_indexs; }
GetReady()84   bool GetReady() {
85     if (is_single_in_) {
86       return true;
87     } else if (ready_.fetch_add(1) == in_actors_num_) {
88       ready_ = 0;
89       return true;
90     }
91     return false;
92   }
ClearReady()93   inline void ClearReady() { ready_ = 0; }
SetIsSignleIn(bool flag)94   void SetIsSignleIn(bool flag) { is_single_in_ = flag; }
SetHaveOutput(bool flag)95   void SetHaveOutput(bool flag) { have_output_ = true; }
96 
97  private:
98   // The op data.
99   ParallelLiteActor *parallel_lite_actor_ = nullptr;
100   std::vector<kernel::KernelExec *> nodes_{};
101   std::vector<size_t> out_actors_indexs_{};
102   std::vector<size_t> in_actors_indexs_{};
103 
104   std::vector<size_t> results_index_{};
105   std::vector<DataArrowPtr> output_data_arrows_;
106   std::vector<OpDataPtr<Tensor>> outputs_data_{};
107 
108   std::atomic<int> ready_ = 0;  // This flag is used to reduce message communication
109   bool is_single_in_ = false;
110   int in_actors_num_ = 0;
111 
112   bool have_output_ = false;
113 };
114 }  // namespace mindspore::lite
115 #endif  // MINDSPORE_LITE_SRC_RUNTIME_PARALLEL_LITE_ACTOR_H_
116