• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2021-2024 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #ifndef MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_UTILS_H_
17 #define MINDSPORE_CCSRC_DEBUG_DEBUGGER_DEBUGGER_UTILS_H_
18 
19 #include <iostream>
20 #include <string>
21 #include <vector>
22 #include "include/backend/debug/debugger/debugger.h"
23 #include "kernel/kernel.h"
24 #include "proto/debug_grpc.grpc.pb.h"
25 #include "runtime/hardware/device_context.h"
26 
27 using mindspore::device::DeviceContext;
28 using mindspore::kernel::KernelLaunchAddr;
29 using mindspore::kernel::KernelTensor;
30 
31 namespace mindspore {
32 std::vector<size_t> CheckRealOutput(const std::string &node_name, const size_t &output_size);
33 
34 vector<size_t> GetValidDumpIndex(const CNodePtr &cnode, size_t index_size, bool is_input);
35 
36 // when used in abnormal dump, the async_copy should set to false
37 void LoadInputs(const CNodePtr &cnode, std::vector<device::DeviceAddress *> device_tensors, uint32_t exec_order,
38                 uint32_t root_graph_id, const DeviceContext *device_context, const bool trans_flag,
39                 const uint32_t sample_mode, const uint32_t sample_num, const bool async_copy = true);
40 
41 void LoadOutputs(const CNodePtr &cnode, std::vector<device::DeviceAddress *> device_tensors, uint32_t exec_order,
42                  uint32_t root_graph_id, const DeviceContext *device_context, const bool trans_flag,
43                  const uint32_t sample_mode, const uint32_t sample_num);
44 
45 bool CheckReadData(const CNodePtr &cnode);
46 
47 void ReadDataAndDump(const CNodePtr &cnode, std::vector<device::DeviceAddress *> input_kernel_tensors,
48                      std::vector<device::DeviceAddress *> output_kernel_tensors, uint32_t exec_order,
49                      const DeviceContext *device_context, const bool abnormal_dump = false);
50 
51 std::string CheckDatasetSinkMode(const KernelGraphPtr &graph_ptr);
52 
53 void LoadDataForDebugger(const KernelGraphPtr &graph_ptr);
54 
55 void SuperKernelE2eDump(const KernelGraphPtr &graph);
56 
57 // process reply and command type
58 DebuggerCommand GetCommand(const debugger::EventReply &reply);
59 
60 // parse other data out of EventReply
61 ProtoVector<debugger::WatchCondition_Parameter> GetParameters(const debugger::EventReply &reply);
62 ProtoVector<debugger::WatchNode> GetWatchnodes(const debugger::EventReply &reply);
63 std::string GetNodeName(const debugger::EventReply &reply);
64 std::string GetRunLevel(const debugger::EventReply &reply);
65 debugger::WatchCondition GetWatchcondition(const debugger::EventReply &reply);
66 int32_t GetWatchpointID(const debugger::EventReply &reply);
67 bool GetWatchpointDelete(const debugger::EventReply &reply);
68 ProtoVector<debugger::TensorProto> GetTensors(const debugger::EventReply &reply);
69 bool GetMiVersionMatched(const debugger::EventReply &reply);
70 // get the full name of a tensor, which is the name used in TensorLoader
71 std::string GetTensorFullName(const debugger::TensorProto &tensor);
72 }  // namespace mindspore
73 #endif
74