1 /** 2 * Copyright 2021 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_COPY_ACTOR_H_ 18 #define MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_COPY_ACTOR_H_ 19 20 #include <vector> 21 #include <string> 22 #include <memory> 23 #include "utils/hash_map.h" 24 #include "runtime/graph_scheduler/actor/actor_common.h" 25 #include "runtime/graph_scheduler/actor/memory_aware_actor.h" 26 #include "runtime/hardware/device_context.h" 27 #include "runtime/graph_scheduler/device_tensor_store.h" 28 29 namespace mindspore { 30 namespace runtime { 31 using mindspore::device::DeviceContext; 32 33 // The copy actor is used to receive the device tensors and control info to copy data between input device tensor and 34 // output device tensor. The processing flow is RunOpData/RunOpControl -> CheckRunningCondition -> SendMemoryAllocReq 35 // -> OnMemoryAllocFinish -> Copy -> SendMemoryFreeReq -> SendOutput. 36 class CopyActor : public MemoryAwareActor { 37 public: CopyActor(const std::string & name,AnfNode * from_kernel,const KernelGraphPtr & from_graph,const AID & memory_manager_aid)38 CopyActor(const std::string &name, AnfNode *from_kernel, const KernelGraphPtr &from_graph, 39 const AID &memory_manager_aid) 40 : MemoryAwareActor(name, KernelTransformType::kCopyActor, nullptr, memory_manager_aid), 41 from_kernel_(from_kernel), 42 from_graph_(from_graph), 43 output_(nullptr), 44 is_need_update_output_size_(false) {} 45 ~CopyActor() override = default; 46 47 // The memory related operation interface. 48 void SendMemoryAllocReq(OpContext<DeviceTensor> *const context) override; 49 void SendMemoryFreeReq(OpContext<DeviceTensor> *const context) override; 50 // The copy processing after memory alloc finished. 51 void OnMemoryAllocFinish(OpContext<DeviceTensor> *const context) override; 52 output()53 const DeviceTensorPtr &output() const { return output_; } is_need_update_output_size()54 bool is_need_update_output_size() const { return is_need_update_output_size_; } 55 56 protected: 57 void Init() override; 58 void Run(OpContext<DeviceTensor> *const context) override; 59 void UpdateOutputData(OpData<DeviceTensor> *const output_data, const DataArrowPtr &data_arrow, 60 const AnfNodePtr &output_node, OpContext<DeviceTensor> *const context) override; 61 62 private: 63 friend class GraphScheduler; 64 friend class ControlNodeScheduler; 65 friend class SchedulerHelper; 66 67 // Fetch the device tensor for copy. 68 void FetchDeviceTensor(OpContext<DeviceTensor> *const context); 69 70 // The copy source. 71 AnfNode *from_kernel_; 72 KernelGraphPtr from_graph_; 73 74 // The input device tensor is saved from the input data or fetched by device_tensor_store_keys_. 75 std::vector<DeviceTensor *> input_device_tensor_; 76 // The output device tensor is saved from the output or fetched by device_tensor_store_keys_. 77 std::vector<DeviceTensor *> output_device_tensor_; 78 79 DeviceTensorPtr output_; 80 // The output size needs to be updated in the dynamic shape scene. 81 bool is_need_update_output_size_; 82 }; 83 84 using CopyActorPtr = std::shared_ptr<CopyActor>; 85 } // namespace runtime 86 } // namespace mindspore 87 88 #endif // MINDSPORE_CCSRC_RUNTIME_FRAMEWORK_ACTOR_COPY_ACTOR_H_ 89