• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_OUTPUT_ACTOR_H_
18 #define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_OUTPUT_ACTOR_H_
19 
20 #include <vector>
21 #include <string>
22 #include <memory>
23 #include <utility>
24 #include <algorithm>
25 #include <unordered_map>
26 #include "runtime/framework/control_node_parser.h"
27 #include "runtime/framework/device_tensor_store.h"
28 #include "runtime/framework/actor/actor_common.h"
29 #include "runtime/framework/actor/abstract_actor.h"
30 #include "runtime/hardware/device_context.h"
31 #include "backend/session/anf_runtime_algorithm.h"
32 #include "ir/tensor.h"
33 
34 namespace mindspore {
35 namespace runtime {
36 using mindspore::device::DeviceContext;
37 using mindspore::session::KernelWithIndex;
38 using mindspore::tensor::TensorPtr;
39 
40 // The output actor is used to receive the output result of actor which represents the graph output.
41 class OutputActor : public AbstractActor {
42  public:
OutputActor(std::string name,size_t loop_count,size_t outputs_num,bool need_loop_count)43   OutputActor(std::string name, size_t loop_count, size_t outputs_num, bool need_loop_count)
44       : AbstractActor(name, KernelTransformType::kOutputActor, nullptr),
45         loop_count_(loop_count),
46         current_count_(0),
47         outputs_num_(outputs_num),
48         current_outputs_num_(0),
49         need_loop_count_(need_loop_count) {
50     outputs_.resize(outputs_num);
51     output_nodes_.resize(outputs_num);
52     device_contexts_.resize(outputs_num);
53   }
54   ~OutputActor() override = default;
55 
56   void Init() override;
57 
58   // The output actor collects loop count when receive the input control of loop count actor.
59   void CollectLoopCount(size_t loop_count, OpContext<DeviceTensor> *const context);
60 
61   // The output actor collects output result when receive the data of actor.
62   void CollectOutput(const AnfNodePtr &output_node, size_t output_index, size_t output_position,
63                      OpContext<DeviceTensor> *const context);
64 
65   // The graph output need be set new device address every step or loop, to avoid that the device address
66   // context of tensor be rewritten in the next step or next loop.
67   void UpdateOutputDeviceAddress();
68 
outputs()69   std::vector<TensorPtr> &outputs() { return outputs_; }
70 
71  private:
72   friend class GraphScheduler;
73 
74   // The loop count is constant, the current count is increased after each step running finished.
75   // Collect the output result in the last loop which is represented by "loop_count_ - current_count_ == 1".
76   size_t loop_count_;
77   size_t current_count_;
78 
79   // The outputs.
80   std::vector<TensorPtr> outputs_;
81   std::vector<KernelWithIndex> output_nodes_;
82   size_t outputs_num_;
83   size_t current_outputs_num_;
84   bool need_loop_count_;
85 };
86 
87 using OutputActorPtr = std::shared_ptr<OutputActor>;
88 }  // namespace runtime
89 }  // namespace mindspore
90 
91 #endif  // MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_OUTPUT_ACTOR_H_
92