• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_PROFILER_DEVICE_COMMON_PROFILING_MEMORY_H
18 #define MINDSPORE_PROFILER_DEVICE_COMMON_PROFILING_MEMORY_H
19 
20 #include "proto/memory_profiling.pb.h"
21 #include <string>
22 #include <map>
23 #include <vector>
24 #include <memory>
25 #include "utils/ms_context.h"
26 
27 namespace mindspore {
28 namespace profiler {
29 namespace ascend {
30 class NodeMemory {
31  public:
NodeMemory()32   NodeMemory() : node_name_(""), node_id_(0) {}
33   ~NodeMemory() = default;
34 
SetNodeName(const std::string & name)35   void SetNodeName(const std::string &name) { node_name_ = name; }
SetNodeId(uint64_t node_id)36   void SetNodeId(uint64_t node_id) { node_id_ = node_id; }
AddInputTensorId(uint64_t node_id)37   void AddInputTensorId(uint64_t node_id) { input_tensor_id_.emplace_back(node_id); }
AddOutputTensorId(uint64_t node_id)38   void AddOutputTensorId(uint64_t node_id) { output_tensor_id_.emplace_back(node_id); }
AddWorkSpaceTensorId(uint64_t node_id)39   void AddWorkSpaceTensorId(uint64_t node_id) { workspace_tensor_id_.emplace_back(node_id); }
GetNodeName()40   std::string GetNodeName() const { return node_name_; }
GetNodeId()41   uint64_t GetNodeId() const { return node_id_; }
GetInputTensorId()42   std::vector<uint64_t> GetInputTensorId() const { return input_tensor_id_; }
GetOutputTensorId()43   std::vector<uint64_t> GetOutputTensorId() const { return output_tensor_id_; }
GetWorkspaceTensorId()44   std::vector<uint64_t> GetWorkspaceTensorId() const { return workspace_tensor_id_; }
45 
46  private:
47   std::string node_name_;
48   uint64_t node_id_;
49   std::vector<uint64_t> input_tensor_id_;
50   std::vector<uint64_t> output_tensor_id_;
51   std::vector<uint64_t> workspace_tensor_id_;
52 };
53 
54 class TensorMemory {
55  public:
TensorMemory()56   TensorMemory() : tensor_id_(0), size_(0), type_(""), life_start_(0), life_end_(0), life_long_("") {}
57   ~TensorMemory() = default;
58 
SetTensorId(uint64_t tensor_id)59   void SetTensorId(uint64_t tensor_id) { tensor_id_ = tensor_id; }
SetAlignedSize(uint64_t size)60   void SetAlignedSize(uint64_t size) { size_ = size; }
SetType(const std::string & type)61   void SetType(const std::string &type) { type_ = type; }
SetLifeStart(uint64_t start)62   void SetLifeStart(uint64_t start) { life_start_ = start; }
SetLifeEnd(uint64_t end)63   void SetLifeEnd(uint64_t end) { life_end_ = end; }
SetLifeLong(const std::string & life_long)64   void SetLifeLong(const std::string &life_long) { life_long_ = life_long; }
GetTensorId()65   uint64_t GetTensorId() const { return tensor_id_; }
GetAlignedSize()66   uint64_t GetAlignedSize() const { return size_; }
GetType()67   std::string GetType() const { return type_; }
GetLifeStart()68   uint64_t GetLifeStart() const { return life_start_; }
GetLifeEnd()69   uint64_t GetLifeEnd() const { return life_end_; }
GetLifeLong()70   std::string GetLifeLong() const { return life_long_; }
71 
72  private:
73   uint64_t tensor_id_;
74   uint64_t size_;          // aligned tensor size
75   std::string type_;       // see TensorType in somas_tensor.h
76   uint64_t life_start_;    // the exe node id at which tensor memory allocated
77   uint64_t life_end_;      // the exe node id at which tensor memory deallocated
78   std::string life_long_;  // see LifeLongType in somas_tensor.h
79 };
80 
81 class GraphMemory {
82  public:
GraphMemory(uint32_t graph_id)83   explicit GraphMemory(uint32_t graph_id) : graph_id_(graph_id), static_mem_size_(0) {}
84   ~GraphMemory() = default;
AddStaticMemorySize(uint32_t size)85   void AddStaticMemorySize(uint32_t size) { static_mem_size_ += size; }
AddNodeMemory(const NodeMemory & node)86   void AddNodeMemory(const NodeMemory &node) { node_memory_.emplace_back(node); }
AddTensorMemory(const TensorMemory & node)87   void AddTensorMemory(const TensorMemory &node) { tensor_memory_.emplace_back(node); }
GetGraphId()88   uint32_t GetGraphId() const { return graph_id_; }
GetStaticMemSize()89   uint32_t GetStaticMemSize() const { return static_mem_size_; }
GetNodeMemory()90   std::vector<NodeMemory> GetNodeMemory() const { return node_memory_; }
GetTensorMemory()91   std::vector<TensorMemory> GetTensorMemory() const { return tensor_memory_; }
92 
93  private:
94   uint32_t graph_id_;
95   uint32_t static_mem_size_;
96   std::vector<NodeMemory> node_memory_;
97   std::vector<TensorMemory> tensor_memory_;
98 };
99 
100 class MemoryProfiling {
101  public:
MemoryProfiling()102   MemoryProfiling() : device_mem_size_(0) {}
103   ~MemoryProfiling() = default;
104 
GetInstance()105   static MemoryProfiling &GetInstance() {
106     static MemoryProfiling instance;
107     return instance;
108   }
109 
110   bool IsMemoryProfilingEnable() const;
111   std::shared_ptr<GraphMemory> AddGraphMemoryNode(uint32_t graph_id);
112   std::shared_ptr<GraphMemory> GetGraphMemoryNode(uint32_t graph_id) const;
SetDeviceMemSize(uint64_t size)113   void SetDeviceMemSize(uint64_t size) { device_mem_size_ = size; }
114   bool MemoryToPB();
115   void SaveMemoryProfiling();
116 
117  private:
118   std::string GetOutputPath() const;
119 
120   MemoryProto memory_proto_;
121   std::map<uint32_t, std::shared_ptr<GraphMemory>> graph_memory_;
122   uint64_t device_mem_size_;
123 };
124 }  // namespace ascend
125 }  // namespace profiler
126 }  // namespace mindspore
127 #endif
128