/third_party/mindspore/mindspore/lite/src/ |
D | lite_model.cc | 200 auto tensor_size = this->graph_.all_tensors_.size(); in NodeVerify() local 209 [&tensor_size](const uint32_t &idx) { return idx >= tensor_size; })) { in NodeVerify() 214 [&tensor_size](const uint32_t &idx) { return idx >= tensor_size; })) { in NodeVerify() 231 auto tensor_size = this->graph_.all_tensors_.size(); in SubGraphVerify() local 245 [&tensor_size](const uint32_t &idx) { return idx >= tensor_size; })) { in SubGraphVerify() 250 [&tensor_size](const uint32_t &idx) { return idx >= tensor_size; })) { in SubGraphVerify() 255 [&tensor_size](const uint32_t &idx) { return idx >= tensor_size; })) { in SubGraphVerify()
|
/third_party/mindspore/mindspore/ccsrc/fl/server/kernel/ |
D | optimizer_kernel.h | 73 size_t tensor_size = in InitServerKernelInputOutputSize() local 75 input_size_list_.emplace_back(tensor_size); in InitServerKernelInputOutputSize() 80 size_t tensor_size = in InitServerKernelInputOutputSize() local 82 output_size_list_.emplace_back(tensor_size); in InitServerKernelInputOutputSize()
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/cpu/ |
D | cpu_kernel_runtime.cc | 112 …size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multipl… in AssignValueNodeAddress() local 114 address = CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, output_type_id); in AssignValueNodeAddress() 119 … address->ptr_ = static_cast<CPUMemoryManager *>(mem_manager_.get())->StaticMemMalloc(tensor_size); in AssignValueNodeAddress() 144 size_t tensor_size = in AssignInputNodeAddress() local 148 auto address = CreateDeviceAddress(nullptr, tensor_size, format, output_type_id); in AssignInputNodeAddress() 213 …size_t tensor_size = std::accumulate(temp_shape.begin(), temp_shape.end(), type_size, std::multipl… in CreatTensorForOutput() local 214 if (tensor_size < address->size_) { in CreatTensorForOutput() 229 …size_t tensor_size = std::accumulate(data_shape.begin(), data_shape.end(), type_size, std::multipl… in CreatTensorForOutput() local 230 … address->ptr_ = static_cast<CPUMemoryManager *>(mem_manager_.get())->StaticMemMalloc(tensor_size); in CreatTensorForOutput() 231 address->size_ = tensor_size; in CreatTensorForOutput() [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/ |
D | cpu_kernel.cc | 35 size_t tensor_size = in InitInputOutputSize() local 37 tensor_size = std::max(tensor_size, type_size); in InitInputOutputSize() 38 (void)input_size_list_.emplace_back(tensor_size); in InitInputOutputSize() 45 size_t tensor_size = in InitInputOutputSize() local 47 tensor_size = std::max(tensor_size, type_size); in InitInputOutputSize() 48 (void)output_size_list_.emplace_back(tensor_size); in InitInputOutputSize()
|
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/util/ |
D | json_helper.cc | 136 size_t JsonHelper::DumpData(const unsigned char *tensor_addr, const size_t &tensor_size, void *addr, in DumpData() argument 139 errno_t ret = memcpy_s(addr, buffer_size, tensor_addr, tensor_size); in DumpData() 146 return tensor_size; in DumpData()
|
D | json_helper.h | 200 …size_t DumpData(const unsigned char *tensor_addr, const size_t &tensor_size, void *addr, const siz…
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/ |
D | kernel_runtime.cc | 232 auto tensor_size = runtime_info->output_tensor_size(i); in RunOpMallocPre() local 235 auto device_address = CreateDeviceAddress(nullptr, tensor_size, output_format, output_type); in RunOpMallocPre() 294 auto tensor_size = AnfAlgo::GetOutputTensorMemSize(input_node, index); in ResetNodeAddress() local 295 …auto device_address = CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(input_nod… in ResetNodeAddress() 422 auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index); in RunOpAssignInputMemory() local 424 …CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id, {… in RunOpAssignInputMemory() 427 auto ret = mem_manager_->MallocMemFromMemPool(device_address, tensor_size); in RunOpAssignInputMemory() 429 … MS_LOG(EXCEPTION) << "Device memory isn't enough and alloc failed, alloc size:" << tensor_size; in RunOpAssignInputMemory() 618 auto tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index); in AssignStaticMemoryInput() local 620 …CreateDeviceAddress(nullptr, tensor_size, AnfAlgo::GetOutputFormat(item, index), output_type_id, {… in AssignStaticMemoryInput() [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/mkldnn/ |
D | batch_norm_grad_cpu_kernel.cc | 35 size_t tensor_size = shape[C] * SCALE_SHIFT_NUM * type_size; in InitInputOutputSize() local 38 (void)workspace_size_list_.emplace_back(tensor_size); in InitInputOutputSize() 40 (void)workspace_size_list_.emplace_back(tensor_size); in InitInputOutputSize()
|
D | softmax_cross_entropy_with_logits_cpu_kernel.cc | 38 …size_t tensor_size = std::accumulate(shape.begin(), shape.end(), type_size, std::multiplies<size_t… in InitInputOutputSize() local 39 (void)workspace_size_list_.emplace_back(tensor_size); in InitInputOutputSize()
|
D | batch_norm_cpu_kernel.cc | 35 size_t tensor_size = shape[1] * 2 * type_size; // [2, c] to store scale and bias in InitInputOutputSize() local 36 (void)workspace_size_list_.emplace_back(tensor_size); in InitInputOutputSize()
|
D | sparse_softmax_cross_entropy_with_logits_cpu_kernel.cc | 39 …size_t tensor_size = std::accumulate(shape.begin(), shape.end(), type_size, std::multiplies<size_t… in InitInputOutputSize() local 40 (void)workspace_size_list_.emplace_back(tensor_size); in InitInputOutputSize()
|
/third_party/mindspore/mindspore/lite/tools/optimizer/fusion/ |
D | batchmatmul_fusion.cc | 63 auto tensor_size = fc_weight_param->Size(); in GetRightMatmulInputParamter() local 79 if (EOK != memcpy_s(static_cast<int8_t *>(tensor_info->data_c()) + (i - 1) * tensor_size, in GetRightMatmulInputParamter() 80 tensor_info->Size() - (i - 1) * tensor_size, tensor_addr, tensor_size)) { in GetRightMatmulInputParamter()
|
/third_party/mindspore/mindspore/ccsrc/runtime/framework/ |
D | graph_compiler.cc | 98 size_t tensor_size = AnfAlgo::GetOutputTensorMemSize(item, index); in CreateParameterDeviceAddress() local 99 auto device_address = device_context->CreateDeviceAddress(nullptr, tensor_size, in CreateParameterDeviceAddress() 132 size_t tensor_size = AnfAlgo::GetOutputTensorMemSize(value_node, output_idx); in CreateDeviceAddressForTensorValue() local 140 device_context->CreateDeviceAddress(nullptr, tensor_size, output_format, output_type_id); in CreateDeviceAddressForTensorValue() 161 size_t tensor_size = value.size(); in CreateValueNodeDeviceAddress() local 162 …auto address = device_context->CreateDeviceAddress(nullptr, tensor_size, kOpFormat_DEFAULT, kNumbe… in CreateValueNodeDeviceAddress()
|
/third_party/mindspore/mindspore/ccsrc/runtime/framework/actor/ |
D | data_prepare_actor.cc | 313 size_t tensor_size = AnfAlgo::GetOutputTensorMemSize(input_node, 0); in PrepareDataForStepMode() local 315 nullptr, tensor_size, AnfAlgo::GetOutputFormat(input_node, 0), output_type_id); in PrepareDataForStepMode() 391 size_t tensor_size = value.size(); in PrepareDataForValueNode() local 392 ShapeVector shape = {1, SizeToLong(tensor_size)}; in PrepareDataForValueNode() 393 if (!device_tensor->SyncHostToDevice(shape, tensor_size, kNumberTypeUInt8, value.data())) { in PrepareDataForValueNode()
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/mem_reuse/ |
D | mem_swap_manager.cc | 82 size_t tensor_size = tensor_info.tensor_size_; in InitSwapThreshold() local 83 if (tensor_size < tensor_size_threshold_) { in InitSwapThreshold() 262 size_t tensor_size = tensor.tensor_size_; in AddSwapInfo() local 263 if (tensor_size < tensor_size_threshold_) { in AddSwapInfo() 280 host_addr.size = tensor_size; in AddSwapInfo()
|
D | mem_dynamic_allocator.cc | 164 bool DynamicMemPoolBestFit::IsDivide(size_t tensor_size, size_t mem_buf_size) const { in IsDivide() argument 165 return mem_buf_size - tensor_size >= DYNAMIC_MEM_ALIGN_SIZE; in IsDivide()
|
D | mem_dynamic_allocator.h | 131 bool IsDivide(size_t tensor_size, size_t mem_buf_size) const;
|
/third_party/mindspore/mindspore/lite/tools/optimizer/graph/ |
D | node_infershape.cc | 445 auto tensor_size = data_info.data_.size(); in ConvertToLiteTensor() local 446 if (tensor_size > 0) { in ConvertToLiteTensor() 454 auto tensor_data = reinterpret_cast<char *>(malloc(tensor_size)); in ConvertToLiteTensor() 460 if (memcpy_s(tensor_data, tensor_size, data_info.data_.data(), tensor_size) != EOK) { in ConvertToLiteTensor()
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/pass/ |
D | communication_op_fusion.cc | 358 size_t tensor_size = AnfAlgo::GetOutputTensorMemSize(input_node, 0); in CreateFusedCommunicationOp() local 364 tensor_size = (tensor_size / kAlignSize + 1) * kAlignSize / type_size; in CreateFusedCommunicationOp() 365 fusion_total_size += static_cast<int64_t>(tensor_size); in CreateFusedCommunicationOp()
|
/third_party/mindspore/mindspore/_extends/graph_kernel/model/ |
D | graph_split.py | 22 def tensor_size(tensor): function 76 return any([tensor_size(tensor) >= stitch_buffer_size for tensor in stitch_tensors]) 807 is_all_reduce = tensor_size(dom.ops[0].output) == 1 809 if is_all_reduce and tensor_size(dom.ops[0].inputs[0]) > 1024 * 12: 822 if tensor_size(dom.ops[0].output) == 1: 824 if tensor_size(dom.ops[0].inputs[0]) < 1024 * 12: 984 iter_size = tensor_size(op.output if not PrimLib.is_reduce(op) else op.inputs[0]) 1098 if tensor_size(dom.ops[0].output) == 1: 1100 if tensor_size(dom.ops[0].inputs[0]) < 32 * 16 * 16:
|
/third_party/mindspore/mindspore/lite/java/native/runtime/ |
D | ms_tensor.cpp | 276 int tensor_size = static_cast<jint>(data_len / sizeof(float)); in Java_com_mindspore_lite_MSTensor_createTensor() local 277 std::vector<int> shape = {tensor_size}; in Java_com_mindspore_lite_MSTensor_createTensor()
|
D | lite_session.cpp | 261 auto tensor_size = static_cast<int>(env->GetArrayLength(dims)); in Java_com_mindspore_lite_LiteSession_resize() local 262 for (int i = 0; i < tensor_size; i++) { in Java_com_mindspore_lite_LiteSession_resize()
|
/third_party/mindspore/mindspore/lite/tools/benchmark_train/ |
D | net_train.cc | 566 int tensor_size = tensor->ElementsNum(); in CheckSum() local 572 TensorNan(reinterpret_cast<float *>(data), tensor_size); in CheckSum() 573 std::cout << TensorSum<float>(data, tensor_size) << std::endl; in CheckSum() 576 std::cout << TensorSum<int>(data, tensor_size) << std::endl; in CheckSum() 580 std::cout << TensorSum<float16_t>(data, tensor_size) << std::endl; in CheckSum()
|
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/api/ |
D | data_helper.cc | 177 size_t DataHelper::DumpData(const unsigned char *tensor_addr, const size_t &tensor_size, void *addr, in DumpData() argument 180 return jh.DumpData(tensor_addr, tensor_size, addr, buffer_size); in DumpData()
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/auto_parallel/rec_core/ |
D | rec_cost.h | 39 double tensor_size, bool is_search_forward);
|