1 /** 2 * Copyright 2021-2022 Huawei Technologies Co., Ltd 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef MINDSPORE_CCSRC_RUNTIME_DEVICE_TENSOR_ARRAY_MANAGER_H_ 18 #define MINDSPORE_CCSRC_RUNTIME_DEVICE_TENSOR_ARRAY_MANAGER_H_ 19 #include <vector> 20 #include <string> 21 #include <atomic> 22 #include <utility> 23 #include <map> 24 #include "include/backend/kernel_graph.h" 25 #include "include/backend/anf_runtime_algorithm.h" 26 #include "include/common/utils/anfalgo.h" 27 #include "runtime/device/tensor_array.h" 28 #include "runtime/device/tensors_queue.h" 29 30 namespace mindspore { 31 namespace device { 32 class TensorArrayMgr { 33 public: 34 // TensorArrayMgr is used to manage the TensorArrays. TensorArrayMgr()35 TensorArrayMgr() {} 36 ~TensorArrayMgr() = default; 37 GetInstance()38 static TensorArrayMgr &GetInstance() noexcept { 39 static TensorArrayMgr instance; 40 return instance; 41 } 42 43 TensorArrayMgr(const TensorArrayMgr &) = delete; 44 TensorArrayMgr(const TensorArrayMgr &&) = delete; 45 TensorArrayMgr &operator=(const TensorArrayMgr &&) = delete; 46 TensorArrayMgr &operator=(const TensorArrayMgr &) = delete; 47 AddTensorArray(const int64_t handle,const TensorArrayPtr & ta)48 void AddTensorArray(const int64_t handle, const TensorArrayPtr &ta) { 49 MS_LOG(DEBUG) << "Add a TensorArray to map, handle is " << handle; 50 (void)tensors_map_.emplace(std::make_pair(handle, ta)); 51 // Increase handle count when added a TensorArray. 52 tensor_array_handle_count += 1; 53 } 54 GetTensorArray(const int64_t handle)55 TensorArrayPtr GetTensorArray(const int64_t handle) { 56 if (tensors_map_.count(handle) == 0) { 57 MS_LOG(EXCEPTION) << "Error handle [" << handle << "] to get tensorarray"; 58 } else { 59 MS_LOG(DEBUG) << "Get TensorArray succeed, handle is " << handle; 60 return tensors_map_[handle]; 61 } 62 } 63 EraseTensorArray(const int64_t handle)64 bool EraseTensorArray(const int64_t handle) { 65 if (tensors_map_.count(handle) == 1) { 66 MS_LOG(DEBUG) << "Erase tensorarray from map, handle number is " << handle; 67 (void)tensors_map_.erase(handle); 68 return true; 69 } else { 70 MS_LOG(ERROR) << "Erase failed, no such handle " << handle; 71 return false; 72 } 73 } 74 GetHandleCount()75 int64_t GetHandleCount() const { return tensor_array_handle_count; } 76 77 private: 78 // Store the TensorArrays in a map, as pair(handle, TensorArrayPtr). 79 std::map<const int64_t, TensorArrayPtr> tensors_map_; 80 // Used as an unique handle number for each TensorArray. 81 std::atomic<int64_t> tensor_array_handle_count{0}; 82 }; 83 84 class TensorsQueueMgr { 85 public: 86 // TensorsQueueMgr is used to manage the TensorsQueues. TensorsQueueMgr()87 TensorsQueueMgr() {} 88 ~TensorsQueueMgr() = default; 89 GetInstance()90 static TensorsQueueMgr &GetInstance() noexcept { 91 static TensorsQueueMgr instance; 92 return instance; 93 } 94 95 TensorsQueueMgr(const TensorsQueueMgr &) = delete; 96 TensorsQueueMgr(const TensorsQueueMgr &&) = delete; 97 TensorsQueueMgr &operator=(const TensorsQueueMgr &&) = delete; 98 TensorsQueueMgr &operator=(const TensorsQueueMgr &) = delete; 99 AddTensorsQueue(const int64_t handle,const TensorsQueuePtr & tq)100 void AddTensorsQueue(const int64_t handle, const TensorsQueuePtr &tq) { 101 MS_LOG(DEBUG) << "Add a TensorsQueue to map, handle is " << handle; 102 (void)tensorsqueue_map_.emplace(std::make_pair(handle, tq)); 103 // Increase handle count when added a TensorsQueue. 104 tensors_queue_handle_count += 1; 105 } 106 GetTensorsQueue(const int64_t handle)107 TensorsQueuePtr GetTensorsQueue(const int64_t handle) { 108 if (tensorsqueue_map_.count(handle) == 0) { 109 MS_LOG(EXCEPTION) << "Error handle [" << handle << "] to get TensorsQueue"; 110 } else { 111 MS_LOG(DEBUG) << "Get TensorsQueue succeed, handle is " << handle; 112 return tensorsqueue_map_[handle]; 113 } 114 } 115 EraseTensorsQueue(const int64_t handle)116 bool EraseTensorsQueue(const int64_t handle) { 117 if (tensorsqueue_map_.count(handle) == 1) { 118 MS_LOG(DEBUG) << "Erase TensorsQueue from map, handle number is " << handle; 119 (void)tensorsqueue_map_.erase(handle); 120 return true; 121 } else { 122 MS_LOG(ERROR) << "Erase TensorsQueue failed, no such handle " << handle; 123 return false; 124 } 125 } 126 GetHandleCount()127 int64_t GetHandleCount() const { return tensors_queue_handle_count; } 128 129 private: 130 // Store the TensorsQueues in a map, as pair(handle, TensorsQueuePtr). 131 std::map<const int64_t, TensorsQueuePtr> tensorsqueue_map_; 132 // Used as an unique handle number for each TensorsQueue. 133 std::atomic<int64_t> tensors_queue_handle_count{0}; 134 }; 135 } // namespace device 136 } // namespace mindspore 137 138 #endif // MINDSPORE_CCSRC_RUNTIME_DEVICE_TENSOR_ARRAY_MANAGER_H_ 139