/third_party/mindspore/mindspore/lite/tools/converter/quantizer/ |
D | weight_quantizer.cc | 61 STATUS WeightQuantizer::SetAbstract(const tensor::TensorPtr &tensor_info, const ParameterPtr ¶m… in SetAbstract() argument 63 MS_CHECK_TRUE_MSG(tensor_info != nullptr, RET_NULL_PTR, "tensor_info is nullptr."); in SetAbstract() 68 tensor_info->set_data_type(type_id_); in SetAbstract() 84 weight_quantized_tensors_.insert({tensor_info, param_node}); in SetAbstract() 101 tensor::TensorPtr tensor_info; in DoConvQuantize() local 103 GetLiteParameter(input_node, ¶m_node, &tensor_info); in DoConvQuantize() 104 if (param_node == nullptr || tensor_info == nullptr) { in DoConvQuantize() 109 if (tensor_info->data_type() != mindspore::kNumberTypeFloat32) { in DoConvQuantize() 110 …< cnode->fullname_with_scope() << " weight data type is not fp32 but " << tensor_info->data_type(); in DoConvQuantize() 116 …status = MixedBitQuantFilter(tensor_info, primitive, QuantType_QUANT_WEIGHT, WeightQuantType::MIXE… in DoConvQuantize() [all …]
|
/third_party/mindspore/mindspore/lite/tools/common/ |
D | tensor_util.cc | 54 tensor::TensorPtr tensor_info = nullptr; in CreateTensorInfo() local 57 tensor_info = std::make_shared<tensor::Tensor>(data_type, scalar_shape); in CreateTensorInfo() 58 if (tensor_info == nullptr) { in CreateTensorInfo() 62 tensor_info->set_shape({}); in CreateTensorInfo() 64 tensor_info = std::make_shared<tensor::Tensor>(data_type, shape); in CreateTensorInfo() 66 if (tensor_info == nullptr) { in CreateTensorInfo() 71 return tensor_info; in CreateTensorInfo() 77 auto ret = memcpy_s(tensor_info->data_c(), tensor_info->data().nbytes(), data, data_size); in CreateTensorInfo() 82 return tensor_info; in CreateTensorInfo() 86 auto tensor_info = CreateTensorInfo(nullptr, 0, shape, data_type); in CreateTensorAbstract() local [all …]
|
/third_party/mindspore/tests/ut/python/debugger/gpu_tests/ |
D | test_read_tensors.py | 71 cls.tensor_info = [info1, info2, info3, info4] 72 cls.temp_dir = build_dump_structure(tensor_name, tensor_list, "Test", cls.tensor_info) 93 tensor_data = debugger_backend.read_tensors(self.tensor_info) 95 self.print_read_tensors(self.tensor_info, tensor_data, 0, False) 97 self.compare_expect_actual_result(self.tensor_info, tensor_data, 0) 114 tensor_data = debugger_backend.read_tensors(self.tensor_info) 116 self.compare_expect_actual_result(self.tensor_info, tensor_data, 0) 133 for x, (tensor_info, tensor_data) in enumerate(zip(tensor_info_list, tensor_data_list)): 136 assert tensor_info.node_name == info['tensor_info']['node_name'] 137 assert tensor_info.slot == info['tensor_info']['slot'] [all …]
|
D | test_read_tensors_nonexist_node.py | 49 tensor_info = [info1, info2] 51 cls.temp_dir = build_dump_structure(tensor_name, tensor_list, "Test", tensor_info) 117 for x, (tensor_info, tensor_data) in enumerate(zip(tensor_info_list, tensor_data_list)): 120 assert tensor_info.node_name == info['tensor_info']['node_name'] 121 assert tensor_info.slot == info['tensor_info']['slot'] 122 assert tensor_info.iteration == info['tensor_info']['iteration'] 123 assert tensor_info.rank_id == info['tensor_info']['rank_id'] 124 assert tensor_info.root_graph_id == info['tensor_info']['root_graph_id'] 125 assert tensor_info.is_output == info['tensor_info']['is_output'] 135 for x, (tensor_info, tensor_data) in enumerate(zip(tensor_info_list, tensor_data_list)): [all …]
|
D | test_sync_read_tensors_base_stat.py | 63 cls.tensor_info = [info1, info2, info3, info4] 66 "Test", cls.tensor_info) 114 …def compare_expect_actual_result(self, tensor_info, tensor_base_data_list, tensor_stat_data_list, … argument 121 for x, (tensor_info_item, tensor_base, tensor_stat) in enumerate(zip(tensor_info, 152 …def print_read_tensors(self, tensor_info, tensor_base_data_list, tensor_stat_data_list, test_index… argument 154 for x, (tensor_info_item, tensor_base, tensor_stat) in enumerate(zip(tensor_info,
|
/third_party/mindspore/tests/st/debugger/ |
D | test_sync_trans_true_read_tensor.py | 53 tensor_info = [info1, info2, info3] 55 tensor_data = debugger_backend.read_tensors(tensor_info) 57 print_read_tensors(tensor_info, tensor_data) 62 def print_read_tensors(tensor_info, tensor_data): argument 68 for x, _ in enumerate(tensor_info): 71 f_write.write("node name = " + tensor_info[x].node_name + "\n") 72 f_write.write("slot = " + str(tensor_info[x].slot) + "\n") 73 f_write.write("iteration = " + str(tensor_info[x].iteration) + "\n") 74 f_write.write("device_id = " + str(tensor_info[x].device_id) + "\n") 75 f_write.write("root_graph_id = " + str(tensor_info[x].root_graph_id) + "\n") [all …]
|
D | test_sync_trans_false_read_tensors.py | 53 tensor_info = [info1, info2, info3] 55 tensor_data = debugger_backend.read_tensors(tensor_info) 57 print_read_tensors(tensor_info, tensor_data) 62 def print_read_tensors(tensor_info, tensor_data): argument 68 for x, _ in enumerate(tensor_info): 71 f_write.write("node name = " + tensor_info[x].node_name + "\n") 72 f_write.write("slot = " + str(tensor_info[x].slot) + "\n") 73 f_write.write("iteration = " + str(tensor_info[x].iteration) + "\n") 74 f_write.write("device_id = " + str(tensor_info[x].device_id) + "\n") 75 f_write.write("root_graph_id = " + str(tensor_info[x].root_graph_id) + "\n") [all …]
|
D | test_async_sink_mode_true_read_tensors.py | 50 tensor_info = [info1, info2] 52 tensor_data = debugger_backend.read_tensors(tensor_info) 54 print_read_tensors(tensor_info, tensor_data) 59 def print_read_tensors(tensor_info, tensor_data): argument 65 for x, _ in enumerate(tensor_info): 68 f_write.write("node name = " + tensor_info[x].node_name + "\n") 69 f_write.write("slot = " + str(tensor_info[x].slot) + "\n") 70 f_write.write("iteration = " + str(tensor_info[x].iteration) + "\n") 71 f_write.write("device_id = " + str(tensor_info[x].device_id) + "\n") 72 f_write.write("root_graph_id = " + str(tensor_info[x].root_graph_id) + "\n") [all …]
|
/third_party/mindspore/mindspore/lite/test/ut/tools/converter/registry/parser/ |
D | model_parser_test.cc | 72 auto tensor_info = std::make_shared<tensor::Tensor>(TypeId::kNumberTypeFloat32, shape); in BuildGraphInputs() local 73 if (tensor_info == nullptr) { in BuildGraphInputs() 77 parameter->set_abstract(tensor_info->ToAbstract()); in BuildGraphInputs() 120 auto tensor_info = std::make_shared<tensor::Tensor>(TypeId::kNumberTypeFloat32, shape); in BuildGraphNodes() local 121 auto size = tensor_info->Size(); in BuildGraphNodes() 122 memset_s(tensor_info->data_c(), size, 0, size); in BuildGraphNodes() 123 parameter->set_abstract(tensor_info->ToAbstract()); in BuildGraphNodes() 124 parameter->set_default_param(tensor_info); in BuildGraphNodes() 132 auto tensor_info = std::make_shared<tensor::Tensor>(TypeId::kNumberTypeFloat32, ShapeVector{}); in BuildGraphNodes() local 133 cnode->set_abstract(tensor_info->ToAbstract()); in BuildGraphNodes()
|
/third_party/mindspore/mindspore/lite/tools/converter/parser/tf/ |
D | tf_model_parser.cc | 107 …S SetFloatTensorInfo(const tensorflow::TensorProto &tensor_proto, tensor::TensorPtr *tensor_info) { in SetFloatTensorInfo() argument 114 *tensor_info = CreateTensorInfo(nullptr, 0, shape_vector, kNumberTypeFloat32); in SetFloatTensorInfo() 115 if (*tensor_info == nullptr) { in SetFloatTensorInfo() 119 auto tensor_data = reinterpret_cast<float *>((*tensor_info)->data_c()); in SetFloatTensorInfo() 132 if (::memcpy_s(tensor_data, (*tensor_info)->Size(), addr, shape_size * sizeof(float)) != EOK) { in SetFloatTensorInfo() 142 …S SetInt32TensorInfo(const tensorflow::TensorProto &tensor_proto, tensor::TensorPtr *tensor_info) { in SetInt32TensorInfo() argument 149 *tensor_info = CreateTensorInfo(nullptr, 0, shape_vector, kNumberTypeInt32); in SetInt32TensorInfo() 150 if (*tensor_info == nullptr) { in SetInt32TensorInfo() 154 auto tensor_data = reinterpret_cast<int *>((*tensor_info)->data_c()); in SetInt32TensorInfo() 167 … if (::memcpy_s(tensor_data, (*tensor_info)->Size(), addr, shape_size * sizeof(int32_t)) != EOK) { in SetInt32TensorInfo() [all …]
|
/third_party/mindspore/mindspore/lite/tools/anf_exporter/ |
D | fetch_content.cc | 36 STATUS GetShapeVectorFromStringTensor(const tensor::TensorPtr &tensor_info, ShapeVector *shape_vect… in GetShapeVectorFromStringTensor() argument 37 MS_ASSERT(tensor_info != nullptr && shape_vector != nullptr && offset != nullptr); in GetShapeVectorFromStringTensor() 38 auto data_type = tensor_info->data_type(); in GetShapeVectorFromStringTensor() 44 …MS_CHECK_TRUE_MSG(tensor_info->data_c() != nullptr, RET_ERROR, "tensor_info->data_c() is nullptr"); in GetShapeVectorFromStringTensor() 45 auto tensor_data = reinterpret_cast<uint8_t *>(tensor_info->data_c()); in GetShapeVectorFromStringTensor() 50 for (; *offset < tensor_info->Size(); (*offset)++) { in GetShapeVectorFromStringTensor() 63 for (; *offset < tensor_info->Size(); (*offset)++) { in GetShapeVectorFromStringTensor() 244 auto tensor_info = std::dynamic_pointer_cast<tensor::Tensor>(param_node->default_param()); in FetchFromDefaultParam() local 247 status = GetShapeVectorFromStringTensor(tensor_info, &shape_vector, &offset); in FetchFromDefaultParam() 255 if (tensor_info != nullptr && tensor_info->Size() != 0) { in FetchFromDefaultParam() [all …]
|
/third_party/mindspore/mindspore/lite/tools/converter/parser/onnx/ |
D | onnx_given_tensor_fill_parser.cc | 48 …auto tensor_info = CreateTensorInfo(iter->ints().data(), data_size, shape_vector, kNumberTypeInt64… in ParseInt8GivenIntTensorFill() local 49 if (tensor_info == nullptr) { in ParseInt8GivenIntTensorFill() 53 prim->set_attr("const_data", tensor_info); in ParseInt8GivenIntTensorFill() 71 auto tensor_info = CreateTensorInfo(iter->s().data(), data_count, shape_vector, kNumberTypeUInt8); in ParseInt8GivenTensorFill() local 72 if (tensor_info == nullptr) { in ParseInt8GivenTensorFill() 76 prim->set_attr("const_data", tensor_info); in ParseInt8GivenTensorFill()
|
D | onnx_constant_parser.cc | 37 auto tensor_info = std::make_shared<tensor::Tensor>(data_type, shape_vector); in AddDataInfoAttr() local 38 if (tensor_info == nullptr) { in AddDataInfoAttr() 42 if (OnnxNodeParser::CopyOnnxTensorData(onnx_const_tensor, tensor_info) != RET_OK) { in AddDataInfoAttr() 46 prim->set_attr("const_data", tensor_info); in AddDataInfoAttr()
|
D | onnx_inputs_adjust.cc | 97 auto tensor_info = default_value->cast<tensor::TensorPtr>(); in ReplaceInt64ParameterNode() local 98 if (tensor_info == nullptr) { in ReplaceInt64ParameterNode() 102 auto param_node_new = opt::BuildParameterNode(func_graph, param_node, tensor_info); in ReplaceInt64ParameterNode() 121 auto tensor_info = std::dynamic_pointer_cast<tensor::Tensor>(param_node->default_param()); in ValidParameterNode() local 122 MS_ASSERT(tensor_info != nullptr); in ValidParameterNode() 123 return tensor_info->Size() != 0; in ValidParameterNode() 148 auto tensor_info = primitive_c->GetAttr("const_data"); in ReplaceConstant() local 149 if (tensor_info == nullptr) { in ReplaceConstant() 153 auto tensor_info_ptr = tensor_info->cast<tensor::TensorPtr>(); in ReplaceConstant()
|
D | onnx_model_parser.cc | 84 auto tensor_info = CreateTensorInfo(tensor_data, sizeof(int), {1}, kNumberTypeInt32); in CreateConstParamter() local 85 if (tensor_info == nullptr) { in CreateConstParamter() 93 const_node->set_default_param(tensor_info); in CreateConstParamter() 195 auto tensor_info = std::make_shared<tensor::Tensor>(data_type, shape_vector); in BuildParameterNode() local 196 MS_CHECK_TRUE_MSG(tensor_info != nullptr, RET_NULL_PTR, "create tensor_info return nullptr"); in BuildParameterNode() 200 auto status = OnnxNodeParser::CopyOnnxTensorData(tensor, tensor_info); in BuildParameterNode() 205 parameter_node->set_default_param(tensor_info); in BuildParameterNode() 878 auto tensor_info = parameter->default_param()->cast<tensor::TensorPtr>(); in BuildCNode() local 879 …auto copy_tensor_info = CreateTensorInfo(tensor_info->data_c(), tensor_info->Size(), tensor_info->… in BuildCNode() 880 tensor_info->data_type()); in BuildCNode() [all …]
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/base/ |
D | group_convolution_creator.cc | 83 lite::Tensor *CreateVarTensor(const TensorInfo &tensor_info, bool inferred) { in CreateVarTensor() argument 89 tensor->set_data_type(tensor_info.data_type_); in CreateVarTensor() 90 tensor->set_format(tensor_info.format_); in CreateVarTensor() 91 tensor->set_category(tensor_info.tensor_type_); in CreateVarTensor() 92 if (tensor_info.is_in_) { in CreateVarTensor() 93 tensor->set_shape(tensor_info.shape_); in CreateVarTensor() 98 if (!tensor_info.is_in_) { in CreateVarTensor() 99 tensor->set_shape(tensor_info.shape_); in CreateVarTensor()
|
/third_party/mindspore/mindspore/lite/tools/optimizer/common/ |
D | gllo_utils.cc | 534 auto tensor_info = in AddNewBiasNode() local 536 if (tensor_info == nullptr) { in AddNewBiasNode() 540 auto status = lite::InitParameterFromTensorInfo(bias_parameter, tensor_info); in AddNewBiasNode() 569 auto tensor_info = param->default_param()->cast<tensor::TensorPtr>(); in GetTensorInfo() local 570 return tensor_info; in GetTensorInfo() 636 STATUS GetTensorInfoFromAbstract(tensor::TensorPtr *tensor_info, const CNodePtr &cnode, size_t inde… in GetTensorInfoFromAbstract() argument 637 CHECK_NULL_RETURN(tensor_info); in GetTensorInfoFromAbstract() 653 *tensor_info = utils::cast<tensor::TensorPtr>(abstract_tensor->GetValueTrack()); in GetTensorInfoFromAbstract() 654 if (*tensor_info == nullptr) { in GetTensorInfoFromAbstract() 892 const tensor::TensorPtr &tensor_info) { in BuildParameterNode() argument [all …]
|
/third_party/mindspore/mindspore/core/load_mindir/ |
D | anf_model_parser.cc | 243 tensor::TensorPtr tensor_info = in BuildTensorInfoForFuncGraph() local 245 return tensor_info; in BuildTensorInfoForFuncGraph() 261 tensor::TensorPtr tensor_info = BuildTensorInfoForFuncGraph(parameter_proto); in BuildParameterForFuncGraph() local 262 MS_EXCEPTION_IF_NULL(tensor_info); in BuildParameterForFuncGraph() 265 load_tensor_map_[debug_info_name] = tensor_info; in BuildParameterForFuncGraph() 278 tensor_info->set_param_info(param_info); in BuildParameterForFuncGraph() 280 auto tensor_abstract = tensor_info->ToAbstract(); in BuildParameterForFuncGraph() 285 auto *tensor_data_buf = reinterpret_cast<uint8_t *>(tensor_info->data_c()); in BuildParameterForFuncGraph() 287 …auto ret = memcpy_s(tensor_data_buf, static_cast<size_t>(tensor_info->data().nbytes()), initial_da… in BuildParameterForFuncGraph() 294 node->set_default_param(tensor_info); in BuildParameterForFuncGraph() [all …]
|
/third_party/mindspore/mindspore/lite/tools/optimizer/fusion/ |
D | conv_conv_fusion.cc | 88 …auto tensor_info = lite::CreateTensorInfo(nullptr, 0, {new_bias_size}, up_bias_param->data_type()); in GenNewConvBias() local 89 if (tensor_info == nullptr) { in GenNewConvBias() 93 MS_ASSERT(tensor_info->data_c() != nullptr); in GenNewConvBias() 94 auto new_bias_data = static_cast<float *>(tensor_info->data_c()); in GenNewConvBias() 95 if (memset_s(new_bias_data, tensor_info->Size(), 0, new_bias_size * sizeof(float)) != EOK) { in GenNewConvBias() 109 auto status = lite::InitParameterFromTensorInfo(new_bias_node, tensor_info); in GenNewConvBias() 137 …auto tensor_info = lite::CreateTensorInfo(nullptr, 0, new_weight_shape, up_weight_param->data_type… in GenNewConvWeight() local 138 if (tensor_info == nullptr) { in GenNewConvWeight() 142 MS_ASSERT(tensor_info->data_c() != nullptr); in GenNewConvWeight() 143 auto new_weight_data = static_cast<float *>(tensor_info->data_c()); in GenNewConvWeight() [all …]
|
D | batchmatmul_fusion.cc | 43 auto tensor_info = param_input->default_param()->cast<tensor::TensorPtr>(); in GetInputAddr() local 44 if (tensor_info == nullptr) { in GetInputAddr() 48 return tensor_info->data_c(); in GetInputAddr() 68 auto tensor_info = lite::CreateTensorInfo(nullptr, 0, shape_vector, fc_weight_param->data_type()); in GetRightMatmulInputParamter() local 69 if (tensor_info == nullptr) { in GetRightMatmulInputParamter() 79 if (EOK != memcpy_s(static_cast<int8_t *>(tensor_info->data_c()) + (i - 1) * tensor_size, in GetRightMatmulInputParamter() 80 tensor_info->Size() - (i - 1) * tensor_size, tensor_addr, tensor_size)) { in GetRightMatmulInputParamter() 85 auto status = lite::InitParameterFromTensorInfo(rmatmul_input, tensor_info); in GetRightMatmulInputParamter()
|
/third_party/mindspore/mindspore/lite/src/runtime/kernel/ascend310/src/ |
D | model_process.cc | 350 … const std::vector<AclTensorInfo> &tensor_info, size_t dynamic_nums) { in CheckTensorByTensorInfo() argument 352 for (size_t i = 0; i < tensor_info.size(); ++i) { in CheckTensorByTensorInfo() 353 if (tensor[i].Shape() != tensor_info[i].dims) { in CheckTensorByTensorInfo() 354 …RROR) << "Note: input " << i << " shape not match, required " << ShapeToString(tensor_info[i].dims) in CheckTensorByTensorInfo() 358 if (tensor[i].DataType() != TransToDataType(tensor_info[i].data_type)) { in CheckTensorByTensorInfo() 360 << static_cast<int>(TransToDataType(tensor_info[i].data_type)) << ", given " in CheckTensorByTensorInfo() 364 if (tensor[i].DataSize() != tensor_info[i].buffer_size) { in CheckTensorByTensorInfo() 365 …MS_LOG(ERROR) << "Input " << i << " data size not match, required size " << tensor_info[i].buffer_… in CheckTensorByTensorInfo() 458 std::vector<AclTensorInfo> *tensor_info) { in SortTensorInfoByName() argument 459 if (tensor_info == nullptr) { in SortTensorInfoByName() [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/mem_reuse/ |
D | mem_swap_manager.cc | 42 TensorInfo tensor_info = {output_sizes[output_idx], kernel, output_idx}; in Init() local 43 ordered_tensors_.push_back(tensor_info); in Init() 54 for (auto &tensor_info : ordered_tensors_) { in Init() local 55 if (cur_tensor_size != tensor_info.tensor_size_) { in Init() 56 cur_tensor_size = tensor_info.tensor_size_; in Init() 81 for (const auto &tensor_info : ordered_tensors_) { in InitSwapThreshold() local 82 size_t tensor_size = tensor_info.tensor_size_; in InitSwapThreshold() 86 if (!CheckDistanceBetweenKernels(tensor_info)) { in InitSwapThreshold() 90 accumulation += tensor_info.tensor_size_; in InitSwapThreshold() 129 bool MemSwapManager::CheckDistanceBetweenKernels(const TensorInfo &tensor_info) const { in CheckDistanceBetweenKernels() [all …]
|
/third_party/mindspore/mindspore/lite/test/common/ |
D | import_from_meta_graphT.cc | 59 tensor::TensorPtr tensor_info = std::make_shared<tensor::Tensor>(type_id, shape_vector); in ConverterConstTensor() local 60 if (tensor_info == nullptr) { in ConverterConstTensor() 67 auto tensor_data = static_cast<char *>(tensor_info->data_c()); in ConverterConstTensor() 73 status = lite::InitParameterFromTensorInfo(parameter, tensor_info); in ConverterConstTensor() 76 status = lite::InitParameterFromTensorInfo(parameter, tensor_info); in ConverterConstTensor()
|
/third_party/mindspore/mindspore/lite/tools/optimizer/graph/ |
D | add_tensor_array.cc | 156 auto tensor_info = utils::cast<tensor::TensorPtr>(abstract_tensor->GetValueTrack()); in Process() local 157 MS_ASSERT(tensor_info != nullptr); in Process() 158 if (tensor_info->data_type() == kObjectTypeTensorType) { in Process() 168 std::for_each(tensor_info->shape().begin(), tensor_info->shape().end(), in Process() 171 tensor_array->set_data_type(tensor_info->data_type()); in Process()
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ |
D | parameter_manager.cc | 199 static RankList GetGroupByTensorInfo(const TensorInfo &tensor_info) { in GetGroupByTensorInfo() argument 203 Shape dev_matrix_shape = tensor_info.tensor_layout().device_arrangement().array(); in GetGroupByTensorInfo() 204 Shape tensor_map = tensor_info.tensor_layout().tensor_map().array(); in GetGroupByTensorInfo() 223 TensorInfo tensor_info; in GetParameterSliceInfo() local 226 tensor_info = op_info->inputs_tensor_info()[param_index]; in GetParameterSliceInfo() 233 tensor_info = op_info->inputs_tensor_info()[LongToSize(user_input_index - 1)]; in GetParameterSliceInfo() 237 parameter_slice_info.slice_shape = tensor_info.slice_shape(); in GetParameterSliceInfo() 238 parameter_slice_info.group_ranks = GetGroupByTensorInfo(tensor_info); in GetParameterSliceInfo() 240 << ", the slice shape is " << tensor_info.slice_shape() << ", the origin shape is " in GetParameterSliceInfo() 241 … << tensor_info.shape() << ", the group rank list is " << parameter_slice_info.group_ranks; in GetParameterSliceInfo()
|