Home
last modified time | relevance | path

Searched refs:tensor_name (Results 1 – 25 of 88) sorted by relevance

1234

/third_party/mindspore/mindspore/ccsrc/debug/
Dtensor_load.h39 void MoveTensorCurrentToPrev(std::string tensor_name) { in MoveTensorCurrentToPrev() argument
40 auto handle = tensor_list_map_.extract(tensor_name); in MoveTensorCurrentToPrev()
42 MS_LOG(INFO) << "Moving " << tensor_name << " from current map to previous map"; in MoveTensorCurrentToPrev()
49 bool TensorExistsInCurrent(std::string tensor_name) const { in TensorExistsInCurrent() argument
50 return tensor_list_map_.find(tensor_name) != tensor_list_map_.end(); in TensorExistsInCurrent()
54 …bool PrevTensorExistsInCurrent(std::string tensor_name) const { return TensorExistsInCurrent(tenso… in PrevTensorExistsInCurrent() argument
71 bool IsPrevTensor(std::string tensor_name) const { in IsPrevTensor() argument
73 if (tensor_name.length() <= suffix.length()) return false; in IsPrevTensor()
74 return std::equal(suffix.rbegin(), suffix.rend(), tensor_name.rbegin()); in IsPrevTensor()
79 auto tensor_name = tensor->GetName(); in LoadNewTensor() local
[all …]
Ddebug_services.h130 …std::string FindQualifiedTensorName(const std::string &tensor_name, unsigned const int &tensor_dev… in FindQualifiedTensorName()
137 … bool check_tensor_name = found != std::string::npos && w_name.substr(found + 1) == tensor_name; in FindQualifiedTensorName()
139 … (w_type && (tensor_name == w_name || w_name == "*")) || (!w_type && tensor_name == w_name); in FindQualifiedTensorName()
355 …void ReadTensorFromNpy(const std::string &tensor_name, const std::string &file_name, std::string *…
401 …ddAnalyzedTensorToCache(const bool recheck, const unsigned int id, const std::string &tensor_name);
406 …bool DumpTensorToFile(const std::string &tensor_name, bool trans_flag, const std::string &filepath,
429 bool TensorExistsInCurrent(const std::string &tensor_name);
431 void MoveTensorCurrentToPrev(const std::string &tensor_name);
433 void AppendToCacheEvictQueue(const std::string &tensor_name);
/third_party/mindspore/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/api/
Dmodel.h47 inline MSTensor GetInputByTensorName(const std::string &tensor_name);
51 inline MSTensor GetOutputByTensorName(const std::string &tensor_name);
52 inline std::vector<MSTensor> GetOutputsByNodeName(const std::string &tensor_name);
58 MSTensor GetInputByTensorName(const std::vector<char> &tensor_name);
60 MSTensor GetOutputByTensorName(const std::vector<char> &tensor_name);
66 MSTensor Model::GetInputByTensorName(const std::string &tensor_name) { in GetInputByTensorName() argument
67 return GetInputByTensorName(StringToChar(tensor_name)); in GetInputByTensorName()
72 MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) { in GetOutputByTensorName() argument
73 return GetOutputByTensorName(StringToChar(tensor_name)); in GetOutputByTensorName()
76 std::vector<MSTensor> Model::GetOutputsByNodeName(const std::string &tensor_name) { in GetOutputsByNodeName() argument
[all …]
/third_party/mindspore/include/api/
Dmodel.h117 inline MSTensor GetInputByTensorName(const std::string &tensor_name);
157 inline MSTensor GetOutputByTensorName(const std::string &tensor_name);
213 MSTensor GetInputByTensorName(const std::vector<char> &tensor_name);
215 MSTensor GetOutputByTensorName(const std::vector<char> &tensor_name);
221 MSTensor Model::GetInputByTensorName(const std::string &tensor_name) { in GetInputByTensorName() argument
222 return GetInputByTensorName(StringToChar(tensor_name)); in GetInputByTensorName()
227 MSTensor Model::GetOutputByTensorName(const std::string &tensor_name) { in GetOutputByTensorName() argument
228 return GetOutputByTensorName(StringToChar(tensor_name)); in GetOutputByTensorName()
/third_party/mindspore/tests/ut/cpp/cxx_api/
Dtypes_test.cc39 std::string tensor_name = "Name1"; in TEST_F() local
41 MSTensor tensor(tensor_name, data_type, {}, nullptr, 0); in TEST_F()
42 ASSERT_EQ(tensor.Name(), tensor_name); in TEST_F()
145 std::string tensor_name = "tensor_name"; in TEST_F() local
151 auto tensor = MSTensor::StringsToTensor(tensor_name, origin_strs); in TEST_F()
153 ASSERT_EQ(tensor->Name(), tensor_name); in TEST_F()
162 std::string tensor_name = "tensor_name"; in TEST_F() local
164 auto tensor = MSTensor::StringsToTensor(tensor_name, origin_strs); in TEST_F()
166 ASSERT_EQ(tensor->Name(), tensor_name); in TEST_F()
/third_party/mindspore/mindspore/lite/tools/converter/
Dconverter_context.h98 …void UpdateGraphInputTensorShape(const std::string &tensor_name, const std::vector<int64_t> &shape… in UpdateGraphInputTensorShape() argument
99 graph_input_tensor_shape_map_[tensor_name] = shape; in UpdateGraphInputTensorShape()
101 std::vector<int64_t> GetGraphInputTensorShape(const std::string &tensor_name) const { in GetGraphInputTensorShape() argument
102 if (graph_input_tensor_shape_map_.find(tensor_name) == graph_input_tensor_shape_map_.end()) { in GetGraphInputTensorShape()
105 return graph_input_tensor_shape_map_.at(tensor_name); in GetGraphInputTensorShape()
/third_party/mindspore/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/benchmark/
Dcalib_output.cc54 std::string tensor_name; in ReadCalibData() local
56 name_line >> tensor_name >> dim; in ReadCalibData()
65 String name(tensor_name.c_str()); in ReadCalibData()
108 if (output->tensor_name() != calib->tensor_name()) { in CompareOutputs()
/third_party/mindspore/mindspore/lite/micro/example/mnist_x86/benchmark/
Dcalib_output.cc54 std::string tensor_name; in ReadCalibData() local
56 name_line >> tensor_name >> dim; in ReadCalibData()
65 String name(tensor_name.c_str()); in ReadCalibData()
108 if (output->tensor_name() != calib->tensor_name()) { in CompareOutputs()
/third_party/mindspore/mindspore/ccsrc/runtime/device/gpu/
Dgpu_device_address.cc131 bool GPUDeviceAddress::LoadMemToHost(const std::string &tensor_name, int execution_order, const std… in LoadMemToHost() argument
140 if (Debugger::GetInstance()->TensorExistsInCurrent(tensor_name)) { in LoadMemToHost()
141 MS_LOG(INFO) << tensor_name << " already loaded for this step so not loading it again."; in LoadMemToHost()
154 tensor_data->SetName(tensor_name); in LoadMemToHost()
163 MS_LOG(INFO) << "E2E tensor name is " << tensor_name; in LoadMemToHost()
/third_party/mindspore/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/src/
Dsession.cc115 output_names.push_back(output->tensor_name()); in GetOutputTensorNames()
120 mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { in GetOutputByTensorName()
122 if (output->tensor_name() == tensor_name) { in GetOutputByTensorName()
Dsession.h62 …mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { ret… in GetInputsByTensorName() argument
70 mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override;
/third_party/mindspore/mindspore/lite/micro/example/mnist_x86/src/
Dsession.cc116 output_names.push_back(output->tensor_name()); in GetOutputTensorNames()
121 mindspore::tensor::MSTensor *LiteSession::GetOutputByTensorName(const String &tensor_name) const { in GetOutputByTensorName()
123 if (output->tensor_name() == tensor_name) { in GetOutputByTensorName()
Dsession.h62 …mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const override { ret… in GetInputsByTensorName() argument
70 mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const override;
/third_party/mindspore/mindspore/lite/src/train/
Dtransfer_session.cc111 mindspore::tensor::MSTensor *TransferSession::GetInputsByTensorName(const std::string &tensor_name)… in GetInputsByTensorName()
113 auto ret = backbone_session_->GetInputsByTensorName(tensor_name); in GetInputsByTensorName()
116 ret = TrainSession::GetInputsByTensorName(tensor_name); in GetInputsByTensorName()
164 auto in_id = TSFindTensorByName(tensors_, input->tensor_name()); in ConnectionMap()
166 MS_LOG(ERROR) << "cannot find input tensor " << input->tensor_name(); in ConnectionMap()
170 auto out_id = TSFindTensorByName(backbone_session_->tensors_, output->tensor_name()); in ConnectionMap()
172 MS_LOG(ERROR) << "cannot find input tensor " << output->tensor_name(); in ConnectionMap()
Dtrain_session.cc323 auto t_n = tensor->tensor_name(); in IsLossTensor()
558 if (!ms_tensor->tensor_name().empty()) { in CompileEvalOutputs()
559 eval_output_tensor_names_.emplace_back(ms_tensor->tensor_name()); in CompileEvalOutputs()
590 if (!ms_tensor->tensor_name().empty()) { in CompileTrainOutputs()
591 train_output_tensor_names_.emplace_back(ms_tensor->tensor_name()); in CompileTrainOutputs()
706 auto name = kernelParam->tensor_name(); in GetOptimizerParams()
709 if (params[iy]->tensor_name() == name) { in GetOptimizerParams()
727 MS_LOG(ERROR) << "Param tensor " << param->tensor_name() << " is null."; in SetOptimizerParams()
739 …MS_LOG(ERROR) << "Tensor " << param->tensor_name() << " with " << param->ElementsNum() << " elelmt… in SetOptimizerParams()
771 MS_LOG(ERROR) << "gradient tensor " << gradient->tensor_name() << " is null."; in ApplyGradients()
[all …]
Dtrain_utils.cc40 if (where[i]->tensor_name() == searchParameter) { in TSFindTensorByName()
171 MS_LOG(DEBUG) << "Convert tensor to fp16 " << tensor->tensor_name(); in CastTensor()
184 MS_LOG(DEBUG) << "Convert tensor to fp32 " << tensor->tensor_name(); in CastTensor()
197 MS_LOG(DEBUG) << "Tensor: " << tensor->tensor_name() << " type is " << tensor->data_type(); in ScaleTensor()
201 MS_LOG(DEBUG) << "Scale tensor: " << tensor->tensor_name() << " " << scale; in ScaleTensor()
/third_party/mindspore/mindspore/lite/src/delegate/tensorrt/
Dtensorrt_allocator.cc74 void *TensorRTAllocator::GetDevicePtr(const std::string &tensor_name) { in GetDevicePtr() argument
75 if (tensor_name.empty()) { in GetDevicePtr()
78 if (cuda_tensor_map_.find(tensor_name) == cuda_tensor_map_.end()) { in GetDevicePtr()
81 return this->cuda_tensor_map_.find(tensor_name)->second.data; in GetDevicePtr()
/third_party/mindspore/mindspore/ccsrc/cxx_api/model/
Dmodel.cc138 MSTensor Model::GetInputByTensorName(const std::vector<char> &tensor_name) { in GetInputByTensorName() argument
139 std::string tensor_name_str = CharToString(tensor_name); in GetInputByTensorName()
158 MSTensor Model::GetOutputByTensorName(const std::vector<char> &tensor_name) { in GetOutputByTensorName() argument
159 std::string tensor_name_str = CharToString(tensor_name); in GetOutputByTensorName()
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/custom/
Dcustom_coder.cc89 if (tensors[i]->tensor_name().size() > MAX_STR_LEN) { in TransformTensors()
90 MS_LOG(ERROR) << "tensor name is too long: " << tensors[i]->tensor_name(); in TransformTensors()
94 << "malloc(" << tensors[i]->tensor_name().length() + 1 << ");\n"; in TransformTensors()
96 << "\"" << tensors[i]->tensor_name() << "\"" in TransformTensors()
/third_party/mindspore/mindspore/lite/tools/converter/parser/onnx/
Donnx_model_parser.h60 …STATUS SetTensorQuantParam(const std::string &tensor_name, std::vector<schema::QuantParamT> *quant…
61 …STATUS SetTensorQuantParamFromNode(const std::string &tensor_name, std::vector<schema::QuantParamT…
62 …STATUS CopyTensorQuantParam(const std::string &tensor_name, schema::QuantParamT *quant_param, bool…
/third_party/mindspore/mindspore/lite/micro/example/mnist_stm32f746/mnist_stm32f746/include/
Dlite_session.h75 virtual mindspore::tensor::MSTensor *GetInputsByTensorName(const String &tensor_name) const = 0;
113 virtual mindspore::tensor::MSTensor *GetOutputByTensorName(const String &tensor_name) const = 0;
/third_party/mindspore/mindspore/lite/tools/benchmark/
Dbenchmark_base.cc149 std::string tensor_name; in ReadCalibData() local
155 string_line1 >> tensor_name >> dim; in ReadCalibData()
162 auto ret = ReadTensorData(in_file, tensor_name, dims); in ReadCalibData()
164 MS_LOG(ERROR) << "Read tensor data failed, tensor name: " << tensor_name; in ReadCalibData()
173 int BenchmarkBase::ReadTensorData(std::ifstream &in_file_stream, const std::string &tensor_name, in ReadTensorData() argument
178 if (this->benchmark_data_.find(tensor_name) != this->benchmark_data_.end()) { in ReadTensorData()
184 if (GetDataTypeByTensorName(tensor_name) == static_cast<int>(kObjectTypeString)) { in ReadTensorData()
199 MS_LOG(ERROR) << "New CheckTensor failed, tensor name: " << tensor_name; in ReadTensorData()
202 this->benchmark_tensor_names_.push_back(tensor_name); in ReadTensorData()
203 this->benchmark_data_.insert(std::make_pair(tensor_name, check_tensor)); in ReadTensorData()
/third_party/mindspore/include/sdk_api/
Dmodel.h208 …nsorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);
218 …sorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);
/third_party/mindspore/mindspore/ccsrc/debug/debugger/
Ddebugger_utils.cc100 string tensor_name = kernel_name + ':' + std::to_string(j); in LoadOutputs() local
102 … auto ret = gpu_addr->LoadMemToHost(tensor_name, exec_order_, format, int_shapes, type, j, false); in LoadOutputs()
105 << ", tensor_name:" << tensor_name << ", host_format:" << format << ".!"; in LoadOutputs()
/third_party/mindspore/include/c_api/
Dmodel_c.h132 …nsorHandle OH_AI_ModelGetInputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);
140 …sorHandle OH_AI_ModelGetOutputByTensorName(const OH_AI_ModelHandle model, const char *tensor_name);

1234