/third_party/mindspore/mindspore/core/utils/ |
D | tensor_construct_utils.cc | 27 auto tensor_data = tensor->data_c(); in CreateZerosTensor() local 28 char *data = reinterpret_cast<char *>(tensor_data); in CreateZerosTensor() 40 auto tensor_data = tensor->data_c(); in CreateOnesTensor() local 42 …{kNumberTypeBool, [&tensor_data, mem_size]() { SetTensorData<bool>(tensor_data, true, mem_size); }… in CreateOnesTensor() 44 …[&tensor_data, mem_size]() { SetTensorData<int8_t>(tensor_data, static_cast<int8_t>(1), mem_size);… in CreateOnesTensor() 46 …[&tensor_data, mem_size]() { SetTensorData<int16_t>(tensor_data, static_cast<int16_t>(1), mem_size… in CreateOnesTensor() 48 …[&tensor_data, mem_size]() { SetTensorData<int32_t>(tensor_data, static_cast<int32_t>(1), mem_size… in CreateOnesTensor() 50 …[&tensor_data, mem_size]() { SetTensorData<int64_t>(tensor_data, static_cast<int64_t>(1), mem_size… in CreateOnesTensor() 52 …[&tensor_data, mem_size]() { SetTensorData<uint8_t>(tensor_data, static_cast<uint8_t>(1), mem_size… in CreateOnesTensor() 54 …[&tensor_data, mem_size]() { SetTensorData<uint16_t>(tensor_data, static_cast<uint16_t>(1), mem_si… in CreateOnesTensor() [all …]
|
D | tensor_construct_utils.h | 26 auto tensor_data = reinterpret_cast<T *>(data); in SetTensorData() local 27 MS_EXCEPTION_IF_NULL(tensor_data); in SetTensorData() 28 std::fill(tensor_data, tensor_data + data_length, num); in SetTensorData()
|
/third_party/mindspore/tests/ut/python/debugger/gpu_tests/ |
D | test_read_tensors_nonexist_node.py | 66 tensor_data = debugger_backend.read_tensors([info_nonexist]) 69 assert len(tensor_data) == 1 71 self.print_read_tensors([info_nonexist], tensor_data, 0, False) 73 self.compare_expect_actual_result([info_nonexist], tensor_data, 0) 85 tensor_data = debugger_backend.read_tensors([info_nonexist]) 87 assert len(tensor_data) == 1 89 self.print_read_tensors([info_nonexist], tensor_data, 1, True) 91 self.compare_expect_actual_result([info_nonexist], tensor_data, 1) 103 tensor_data = debugger_backend.read_tensors([info_nonexist]) 105 assert len(tensor_data) == 1 [all …]
|
D | test_read_tensors.py | 93 tensor_data = debugger_backend.read_tensors(self.tensor_info) 95 self.print_read_tensors(self.tensor_info, tensor_data, 0, False) 97 self.compare_expect_actual_result(self.tensor_info, tensor_data, 0) 114 tensor_data = debugger_backend.read_tensors(self.tensor_info) 116 self.compare_expect_actual_result(self.tensor_info, tensor_data, 0) 133 for x, (tensor_info, tensor_data) in enumerate(zip(tensor_info_list, tensor_data_list)): 143 tensor_data.data_ptr, np.uint8, tensor_data.data_size).tolist() 145 assert tensor_data.data_size == info['tensor_data']['size_in_bytes'] 146 assert tensor_data.dtype == info['tensor_data']['debugger_dtype'] 147 assert tensor_data.shape == info['tensor_data']['shape'] [all …]
|
/third_party/mindspore/tests/st/debugger/ |
D | test_sync_trans_true_read_tensor.py | 55 tensor_data = debugger_backend.read_tensors(tensor_info) 57 print_read_tensors(tensor_info, tensor_data) 62 def print_read_tensors(tensor_info, tensor_data): argument 80 tensor_data[x].data_ptr, np.uint8, tensor_data[x].data_size)) + "\n") 81 py_byte_size = len(tensor_data[x].data_ptr) 82 c_byte_size = tensor_data[x].data_size 86 f_write.write("size in bytes = " + str(tensor_data[x].data_size) + "\n") 87 f_write.write("debugger dtype = " + str(tensor_data[x].dtype) + "\n") 88 f_write.write("shape = " + str(tensor_data[x].shape) + "\n")
|
D | test_sync_trans_false_read_tensors.py | 55 tensor_data = debugger_backend.read_tensors(tensor_info) 57 print_read_tensors(tensor_info, tensor_data) 62 def print_read_tensors(tensor_info, tensor_data): argument 80 tensor_data[x].data_ptr, np.uint8, tensor_data[x].data_size)) + "\n") 81 py_byte_size = len(tensor_data[x].data_ptr) 82 c_byte_size = tensor_data[x].data_size 86 f_write.write("size in bytes = " + str(tensor_data[x].data_size) + "\n") 87 f_write.write("debugger dtype = " + str(tensor_data[x].dtype) + "\n") 88 f_write.write("shape = " + str(tensor_data[x].shape) + "\n")
|
D | test_async_sink_mode_true_read_tensors.py | 52 tensor_data = debugger_backend.read_tensors(tensor_info) 54 print_read_tensors(tensor_info, tensor_data) 59 def print_read_tensors(tensor_info, tensor_data): argument 77 tensor_data[x].data_ptr, np.uint8, tensor_data[x].data_size)) + "\n") 78 py_byte_size = len(tensor_data[x].data_ptr) 79 c_byte_size = tensor_data[x].data_size 83 f_write.write("size in bytes = " + str(tensor_data[x].data_size) + "\n") 84 f_write.write("debugger dtype = " + str(tensor_data[x].dtype) + "\n") 85 f_write.write("shape = " + str(tensor_data[x].shape) + "\n")
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/gpu/ |
D | gpu_device_address.cc | 152 auto tensor_data = std::make_shared<mindspore::TensorData>(); in LoadMemToHost() local 153 MS_EXCEPTION_IF_NULL(tensor_data); in LoadMemToHost() 154 tensor_data->SetName(tensor_name); in LoadMemToHost() 155 tensor_data->SetExecutionOrder(execution_order); in LoadMemToHost() 156 tensor_data->SetSlot(slot); in LoadMemToHost() 157 tensor_data->SetTensor(out_tensor); in LoadMemToHost() 158 tensor_data->SetDataPtr(static_cast<char *>(out_tensor->data_c())); in LoadMemToHost() 159 tensor_data->SetByteSize(out_tensor->data().nbytes()); in LoadMemToHost() 160 tensor_data->SetType((unsigned int)host_type); in LoadMemToHost() 161 tensor_data->SetShape(out_tensor->shape()); in LoadMemToHost() [all …]
|
/third_party/mindspore/mindspore/lite/tools/benchmark/ |
D | benchmark_c_api.cc | 299 auto tensor_data = OH_AI_TensorGetData(tensor); in CompareDataGetTotalBiasAndSize() local 300 if (tensor_data == nullptr) { in CompareDataGetTotalBiasAndSize() 311 bias = CompareData<float, int64_t>(name, vec_shape, tensor_data); in CompareDataGetTotalBiasAndSize() 315 bias = CompareData<int8_t, int64_t>(name, vec_shape, tensor_data); in CompareDataGetTotalBiasAndSize() 319 bias = CompareData<uint8_t, int64_t>(name, vec_shape, tensor_data); in CompareDataGetTotalBiasAndSize() 323 bias = CompareData<int32_t, int64_t>(name, vec_shape, tensor_data); in CompareDataGetTotalBiasAndSize() 327 bias = CompareData<int16_t, int64_t>(name, vec_shape, tensor_data); in CompareDataGetTotalBiasAndSize() 331 bias = CompareData<bool, int64_t>(name, vec_shape, tensor_data); in CompareDataGetTotalBiasAndSize() 357 auto tensor_data = OH_AI_TensorGetData(input); in PrintInputData() local 361 std::cout << static_cast<const float *>(tensor_data)[j] << " "; in PrintInputData() [all …]
|
/third_party/mindspore/tests/ut/cpp/ops/ |
D | test_ops_logical_not.cc | 32 auto tensor_data = reinterpret_cast<T *>(data); in SetTensorData() local 33 MS_EXCEPTION_IF_NULL(tensor_data); in SetTensorData() 35 *tensor_data = num; in SetTensorData() 36 ++tensor_data; in SetTensorData()
|
D | test_ops_assert.cc | 32 auto tensor_data = reinterpret_cast<T *>(data); in SetTensorData() local 33 MS_EXCEPTION_IF_NULL(tensor_data); in SetTensorData() 35 *tensor_data = num; in SetTensorData() 36 ++tensor_data; in SetTensorData()
|
D | test_ops_strided_slice.cc | 31 auto tensor_data = reinterpret_cast<T *>(data); in SetTensorData() local 32 MS_EXCEPTION_IF_NULL(tensor_data); in SetTensorData() 34 *tensor_data = num[index]; in SetTensorData()
|
/third_party/mindspore/mindspore/lite/tools/converter/parser/tf/ |
D | tf_model_parser.cc | 119 auto tensor_data = reinterpret_cast<float *>((*tensor_info)->data_c()); in SetFloatTensorInfo() local 120 if (tensor_data == nullptr) { in SetFloatTensorInfo() 127 tensor_data[i] = tensor_proto.float_val(0); in SetFloatTensorInfo() 132 if (::memcpy_s(tensor_data, (*tensor_info)->Size(), addr, shape_size * sizeof(float)) != EOK) { in SetFloatTensorInfo() 134 delete[] tensor_data; in SetFloatTensorInfo() 154 auto tensor_data = reinterpret_cast<int *>((*tensor_info)->data_c()); in SetInt32TensorInfo() local 155 if (tensor_data == nullptr) { in SetInt32TensorInfo() 162 tensor_data[i] = tensor_proto.int_val(0); in SetInt32TensorInfo() 167 … if (::memcpy_s(tensor_data, (*tensor_info)->Size(), addr, shape_size * sizeof(int32_t)) != EOK) { in SetInt32TensorInfo() 169 delete[] tensor_data; in SetInt32TensorInfo() [all …]
|
/third_party/mindspore/mindspore/lite/tools/optimizer/graph/ |
D | node_infershape.cc | 221 std::vector<int> tensor_data; in GetIntVecInput() local 232 tensor_data.resize(static_cast<size_t>(specify_tensors.front()->shape()[0])); in GetIntVecInput() 233 …if (memcpy_s(tensor_data.data(), tensor_data.size() * sizeof(int), specify_tensors.front()->data(), in GetIntVecInput() 234 tensor_data.size() * sizeof(int)) != EOK) { in GetIntVecInput() 238 return tensor_data; in GetIntVecInput() 454 auto tensor_data = reinterpret_cast<char *>(malloc(tensor_size)); in ConvertToLiteTensor() local 455 if (tensor_data == nullptr) { in ConvertToLiteTensor() 460 if (memcpy_s(tensor_data, tensor_size, data_info.data_.data(), tensor_size) != EOK) { in ConvertToLiteTensor() 462 free(tensor_data); in ConvertToLiteTensor() 463 tensor_data = nullptr; in ConvertToLiteTensor() [all …]
|
/third_party/mindspore/mindspore/lite/tools/optimizer/fusion/ |
D | tf_lstm_cell_fusion.cc | 194 auto tensor_data = new (std::nothrow) float[static_cast<size_t>(param_num) * sizeof(float)]; in SetWeightAbstractAndDefault() local 196 if (tensor_data == nullptr) { in SetWeightAbstractAndDefault() 203 …tensor_data[(i * hidden_size + j) * shape[2] + t] = data_ptr[t * shape[1] + data_diff[i] * hidden_… in SetWeightAbstractAndDefault() 208 …lite::CreateTensorInfo(tensor_data, static_cast<size_t>(param_num) * sizeof(float), shape, kNumber… in SetWeightAbstractAndDefault() 209 delete[] tensor_data; in SetWeightAbstractAndDefault() 310 auto tensor_data = std::make_unique<float[]>(static_cast<size_t>(hidden_size) * 8); in PopulateBiasNode() local 311 MS_CHECK_TRUE_RET(tensor_data != nullptr, lite::RET_ERROR); in PopulateBiasNode() 326 tensor_data[i * hidden_size + j] = data_ptr[data_diff[i] * hidden_size + j]; in PopulateBiasNode() 328 tensor_data[i * hidden_size + j] += forget_bias_value; in PopulateBiasNode() 331 tensor_data[i * hidden_size + j] = 0.0f; in PopulateBiasNode() [all …]
|
D | tf_bidirection_gru_fusion.h | 68 … const std::vector<int> &shape, TypeId type, void **tensor_data); 74 float *tensor_data);
|
D | constant_folding_fusion.cc | 121 auto tensor_data = tensor->MutableData(); in GetCNodeInputTensors() local 122 if (tensor_data == nullptr) { in GetCNodeInputTensors() 127 …if (memcpy_s(tensor_data, tensor->Size(), data_info.data_.data(), data_info.data_.size()) != EOK) { in GetCNodeInputTensors() 153 auto tensor_data = static_cast<uint8_t *>(tensor_info->data_c()); in CreateNewParamter() local 154 auto ret = memcpy_s(tensor_data, tensor_info->Size(), tensor->data(), tensor->Size()); in CreateNewParamter()
|
/third_party/mindspore/mindspore/ccsrc/debug/ |
D | debug_services.cc | 837 auto tensor_data = std::make_shared<TensorData>(); in GetTensorDataInfoAsync() local 838 tensor_data->SetName(orig_name); in GetTensorDataInfoAsync() 839 tensor_data->SetExecutionOrder(0); in GetTensorDataInfoAsync() 840 tensor_data->SetSlot(slot); in GetTensorDataInfoAsync() 841 tensor_data->SetIteration(iteration); in GetTensorDataInfoAsync() 842 tensor_data->SetDeviceId(device_id); in GetTensorDataInfoAsync() 843 tensor_data->SetRootGraphId(root_graph_id); in GetTensorDataInfoAsync() 844 tensor_data->SetDataPtr(nullptr); in GetTensorDataInfoAsync() 845 tensor_data->SetByteSize(0); in GetTensorDataInfoAsync() 846 tensor_data->SetType(""); in GetTensorDataInfoAsync() [all …]
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/ascend/ |
D | ascend_device_address.cc | 551 auto tensor_data = std::make_shared<mindspore::TensorData>(); in LoadMemToHost() local 552 MS_EXCEPTION_IF_NULL(tensor_data); in LoadMemToHost() 553 tensor_data->SetName(tensor_name); in LoadMemToHost() 554 tensor_data->SetExecutionOrder(execution_order); in LoadMemToHost() 555 tensor_data->SetSlot(slot); in LoadMemToHost() 566 tensor_data->SetTensor(out_tensor); in LoadMemToHost() 567 tensor_data->SetDataPtr(static_cast<char *>(out_tensor->data_c())); in LoadMemToHost() 568 tensor_data->SetByteSize(LongToSize(out_tensor->data().nbytes())); in LoadMemToHost() 569 tensor_data->SetType((unsigned int)host_type); in LoadMemToHost() 570 tensor_data->SetShape(out_tensor->shape()); in LoadMemToHost() [all …]
|
/third_party/mindspore/tests/ut/cpp/transform/ |
D | transform_base_test.cc | 62 py::array tensor_data = TensorPy::AsNumpy(*tensor); in PrintMeTensor() local 63 std::cout << std::string(py::str(tensor_data)) << std::endl; in PrintMeTensor() 65 std::cout << "tensor dtype is: " << py::str(tensor_data.dtype()) << std::endl; in PrintMeTensor()
|
/third_party/mindspore/mindspore/ccsrc/utils/ |
D | convert_utils.cc | 48 bool *tensor_data = static_cast<bool *>(tensor->data_c()); in ValueToBool() local 50 auto vb = tensor_data[0]; in ValueToBool() 65 auto *tensor_data = static_cast<int32_t *>(tensor->data_c()); in BaseRefToInt() local 66 auto vb = tensor_data[0]; in BaseRefToInt() 69 auto *tensor_data = static_cast<int64_t *>(tensor->data_c()); in BaseRefToInt() local 70 auto vb = tensor_data[0]; in BaseRefToInt()
|
/third_party/mindspore/mindspore/lite/tools/converter/quantizer/ |
D | full_quant_quantizer.cc | 965 const auto *tensor_data = static_cast<const float *>(tensor->MutableData()); in DoInference() local 966 MS_CHECK_TRUE_MSG(tensor_data != nullptr, false, "tensor_data is nullptr."); in DoInference() 968 vector<float> data(tensor_data, tensor_data + elem_count); in DoInference() 996 const auto *tensor_data = static_cast<const float *>(tensor->MutableData()); in DoInference() local 997 CHECK_NULL_RETURN(tensor_data); in DoInference() 999 vector<float> data(tensor_data, tensor_data + elem_count); in DoInference() 1220 const auto *tensor_data = static_cast<const float *>(tensor->MutableData()); in CollectDataFrequency() local 1221 MS_ASSERT(tensor_data != nullptr); in CollectDataFrequency() 1223 vector<float> data(tensor_data, tensor_data + elem_count); in CollectDataFrequency() 1245 const auto *tensor_data = static_cast<const float *>(tensor->MutableData()); in CollectDataFrequency() local [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/ir_fusion/ |
D | avgpool_3d_fusion.cc | 168 auto tensor_data = reinterpret_cast<float16 *>(tensor->data_c()); in ConstructMultiplier() local 196 *tensor_data = float16(val); in ConstructMultiplier() 197 ++tensor_data; in ConstructMultiplier() 223 auto tensor_data = reinterpret_cast<float16 *>(assist_tensor->data_c()); in ConstructFilterValueNode() local 228 *tensor_data = float16(t); in ConstructFilterValueNode() 229 ++tensor_data; in ConstructFilterValueNode()
|
/third_party/mindspore/tests/ut/python/train/summary/ |
D | test_histogram_summary.py | 133 tensor_data = rng.normal(size=[dim, dim, dim, dim]) 134 test_data = _wrap_test_data(Tensor(tensor_data)) 143 assert event.summary.value[0].histogram.count == tensor_data.size
|
/third_party/mindspore/mindspore/offline_debug/ |
D | dbg_services.py | 284 … tensor_data = TensorData(b'', elem.get_data_size(), elem.get_dtype(), elem.get_shape()) 286 …tensor_data = TensorData(elem.get_data_ptr(), elem.get_data_size(), elem.get_dtype(), elem.get_sha… 287 tensor_data_list_ret.append(tensor_data) 541 self.instance = cds.tensor_data(data_ptr, data_size, dtype, shape)
|