| /third_party/mindspore/mindspore-src/source/mindspore/lite/providers/dpico/manager/ |
| D | acl_model_manager.cc | 51 int AclModelManager::LoadModel(const std::vector<mindspore::MSTensor> &input_tensors) { in LoadModel() 196 int AclModelManager::CopyTensorDataToAclInputs(const std::vector<mindspore::MSTensor> &input_tensor… in CopyTensorDataToAclInputs() 320 int AclModelManager::AclModelRun(const std::vector<mindspore::MSTensor> &input_tensors) { in AclModelRun() 360 … const schema::Primitive *primitive, const std::vector<mindspore::MSTensor> &input_tensors, in Init() 396 int AclModelManager::UpdateBatchSize(const std::vector<mindspore::MSTensor> &input_tensors) { in UpdateBatchSize() 422 int AclModelManager::PrepareAclInputs(std::vector<mindspore::MSTensor> *input_tensors) { in PrepareAclInputs() 509 int AclModelManager::UpdateAclInputs(std::vector<mindspore::MSTensor> *input_tensors) { in UpdateAclInputs() 595 int AclModelManager::Execute(const std::vector<mindspore::MSTensor> &input_tensors, in Execute()
|
| /third_party/mindspore/mindspore-src/source/tests/st/ops/cpu/ |
| D | test_square_sum_all.py | 36 def run_net(datatype, input_tensors, output_tensors): argument 82 def test_cpu(dtype, input_tensors, output_tensors): argument
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/runtime/pynative/ |
| D | op_runner.cc | 59 const std::vector<tensor::BaseTensorPtr> &input_tensors, in UpdateInputTensorFromDevice() 132 const std::vector<tensor::BaseTensorPtr> &input_tensors, in UpdateInputNodeDeviceAddress() 248 const std::vector<tensor::BaseTensorPtr> &input_tensors, in CopyParameterDataToDevice() 500 …id CopyDataToDevice(const KernelGraphPtr &graph, const std::vector<tensor::BaseTensorPtr> &input_t… in CopyDataToDevice() 577 const std::vector<tensor::BaseTensorPtr> &input_tensors) { in LaunchKernels() 800 const std::vector<tensor::BaseTensorPtr> &input_tensors) { in RunSingleOpGraph() 854 const std::vector<tensor::BaseTensorPtr> &input_tensors) { in RunSingleOpGraph() 933 … const std::vector<tensor::BaseTensorPtr> &input_tensors, bool is_sync) { in UpdateInputDeviceAddress() 979 const std::vector<tensor::BaseTensorPtr> &input_tensors) { in CopyHostToDevice()
|
| D | graph_adapter.cc | 357 …id GraphAdapter::HandleHeterogeneousTensors(const std::vector<std::vector<tensor::TensorPtr>> &inp… in HandleHeterogeneousTensors() 381 … const std::vector<tensor::TensorPtr> &input_tensors, in ReplaceGraphParameterProperties()
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/tools/converter/micro/coder/opcoders/ |
| D | op_coder.cc | 30 const std::vector<Tensor *> OperatorCoder::input_tensors() const { return input_tensors_; } in input_tensors() function in mindspore::lite::micro::OperatorCoder
|
| /third_party/mindspore/mindspore-src/source/tests/ut/cpp/ops/ |
| D | test_ops_concat.cc | 130 std::vector<tensor::TensorPtr> input_tensors; member 140 auto input_tensors = param.input_tensors; in TEST_P() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/test/ut/src/ |
| D | utils_test.cc | 61 auto input_tensors = kernel::KernelExecUtil::SubgraphInputTensors(kernels); in TEST_F() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/litert/kernel/cpu/fp32/ |
| D | affine_fp32.cc | 258 std::vector<lite::Tensor *> input_tensors; in FullMatmulKernelCreate() local 318 std::vector<lite::Tensor *> input_tensors; in IncrementMatmulKernelCreate() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/extendrt/delegate/tensorrt/ |
| D | tensorrt_graph_executor.cc | 123 … BaseOperatorPtr *base_operator, std::vector<TensorInfo> *input_tensors, in GetAbstractArgsFromCNode() 181 std::vector<TensorInfo> *input_tensors) { in GetModelInputsInfo() 419 std::vector<TensorInfo> input_tensors; in BuildSubGraph() local 458 const std::vector<TensorInfo> &input_tensors, in FindTensorRTOp()
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/extendrt/delegate/graph_executor/litert/ |
| D | graph_executor.cc | 168 auto input_tensors = lite_session_->GetInputs(); in RunGraph() local 260 auto input_tensors = lite_session_->GetInputs(); in Resize() local 287 std::vector<tensor::Tensor> input_tensors; in GetInputInfos() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/extendrt/session/ |
| D | lite_infer_session.cc | 138 auto input_tensors = lite_session_->GetInputs(); in RunGraph() local 220 std::vector<MutableTensorImplPtr> input_tensors; in GetInputs() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/extendrt/utils/ |
| D | runtime_utils.cc | 101 auto input_tensors = AnfAlgo::GetOrCreateAllInputKernelTensors(kernel_node); in UpdateKernelNodeOutputInfo() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/backend/common/session/ |
| D | session_basic.cc | 93 const std::vector<tensor::TensorPtr> &input_tensors) { in GetNodeOutputTensorFromInputs() 128 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputTensor() 279 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputPlaceholder() 312 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputPlaceholder() 380 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputTensors() 518 …const KernelGraphPtr &kernel_graph, const std::vector<tensor::TensorPtr> &input_tensors, VectorRef… in CreateOutputPlaceholder() 990 const std::vector<tensor::TensorPtr> &input_tensors, in UpdateOutputs() 1032 …asic::CreateOutputTensors(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &input_ten… in CreateOutputTensors()
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/backend/graph_compiler/ |
| D | backend_base.cc | 77 std::vector<tensor::TensorPtr> *input_tensors) { in PushTensor() 93 std::vector<tensor::TensorPtr> *input_tensors) { in PushTupleTensor() 124 …mForwardOutputParameter(const AnfNodePtr &input_node, std::vector<tensor::TensorPtr> *input_tensor… in GetTensorFromForwardOutputParameter() 153 std::vector<tensor::TensorPtr> input_tensors; in GetRunGraphInputs() local 179 std::vector<tensor::TensorPtr> input_tensors; in GetRunGraphInputs() local 1136 std::vector<std::vector<tensor::TensorPtr>> input_tensors; in RunGraph() local 1531 std::vector<std::vector<tensor::TensorPtr> *> input_tensors; in ConstructGraphCompilerInfo() local
|
| D | backend.cc | 601 void CreateKernelTensor(const std::vector<std::vector<tensor::TensorPtr>> &input_tensors, in CreateKernelTensor() 666 auto input_tensors = GetRunGraphInputs(graph_compiler_info, args); in RealCompileGraphBeforeRunActor() local 734 auto input_tensors = GetRunGraphInputs(graph_compiler_info, args); in RunGraphByActors() local 1070 auto input_tensors = runtime::OpRunner::GetTensorWithoutValueMask(op_run_info); in RunOpImplDynamic() local 1084 auto input_tensors = runtime::OpRunner::GetTensorWithoutValueMask(op_run_info); in RunOpImplDynamic() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/runtime/graph_scheduler/actor/ |
| D | data_prepare_actor.cc | 325 void SyncTensorTrunk(const std::vector<std::vector<TensorPtr>> &input_tensors) { in SyncTensorTrunk() 496 void DataPrepareActor::SetInitTensorsIfNeeded(const std::vector<std::vector<TensorPtr>> &input_tens… in SetInitTensorsIfNeeded() 514 void DataPrepareActor::PrepareData(const std::vector<std::vector<TensorPtr>> &input_tensors, const … in PrepareData() 711 …taPrepareActor::PrepareDataForDeviceTensorStore(const std::vector<std::vector<TensorPtr>> &input_t… in PrepareDataForDeviceTensorStore() 771 …DataPrepareActor::PrepareDataForHostTensorQueue(const std::vector<std::vector<TensorPtr>> &input_t… in PrepareDataForHostTensorQueue()
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/extendrt/kernel/ascend_native/ |
| D | ascend_native_composite_kernel.cc | 76 std::vector<kernel::InferTensor *> input_tensors; in CreateKernel() local 168 … std::vector<kernel::InferTensor *> *input_tensors) { in CreateInputKernelTensors()
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/extendrt/delegate/ascend_native/ |
| D | delegate.cc | 193 … std::vector<kernel::InferTensor *> *input_tensors, in CreateInputKernelTensors() 248 std::vector<kernel::InferTensor *> input_tensors; in CreateKernel() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/gpu/hal/hardware/ |
| D | gpu_somas.cc | 115 auto input_tensors = node->input_tensors_; in InplaceNodeProcess() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/ccsrc/plugin/device/ascend/hal/hardware/ |
| D | acl_somas.cc | 114 auto input_tensors = node->input_tensors_; in RuntimeNodeProcess() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/tools/converter/legacy_optimizer/graph/ |
| D | infershape_pass.cc | 50 void FreeTensors(std::vector<Tensor *> *input_tensors, std::vector<Tensor *> *output_tensors) { in FreeTensors() 294 void PrintTensorShape(const std::vector<Tensor *> &input_tensors, const std::vector<Tensor *> &outp… in PrintTensorShape() 608 auto input_tensors = ConvertTensorToLiteTensor(graph, node->inputIndex); in InferSubgraph() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/tools/converter/adapter/ |
| D | utils.cc | 116 …eadInputFile(const std::vector<std::string> &in_data_files, std::vector<MSTensor> *input_tensors) { in ReadInputFile()
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/litert/ |
| D | mindrt_executor.cc | 177 auto input_tensors = op_actor->GetKernel()->in_tensors(); in BuildReceiverMap() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/src/litert/kernel/cpu/bolt/ |
| D | convolution_bolt.cc | 415 std::vector<BoltTensor> input_tensors(1, input_tensor); in Run() local
|
| /third_party/mindspore/mindspore-src/source/mindspore/lite/tools/optimizer/const_fold/ |
| D | fold_utils.cc | 286 std::vector<lite::Tensor *> input_tensors; in DoConstantFold() local
|