| /third_party/mindspore/mindspore/ccsrc/backend/session/ |
| D | cpu_session.cc | 158 …sion::CreateOutputTensors(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &input_ten… in CreateOutputTensors() 224 const std::vector<tensor::TensorPtr> &input_tensors, in BuildOpImpl() 274 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImplOrigin() 280 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImpl()
|
| D | session_basic.h | 75 std::vector<tensor::TensorPtr> input_tensors; member 231 const std::vector<tensor::TensorPtr> &input_tensors, in BuildOpImpl() 236 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImpl() 239 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImplOrigin()
|
| D | pynative_task_manager.h | 34 std::vector<tensor::TensorPtr> input_tensors, in RunOpContext() 47 const std::vector<tensor::TensorPtr> &input_tensors() const { return input_tensors_; } in input_tensors() function
|
| D | gpu_session.cc | 254 void GPUSession::RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors, in RunOpAllocateMemory() 639 const std::vector<tensor::TensorPtr> &input_tensors, in BuildOpImpl() 664 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImplOrigin() 670 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImpl()
|
| D | ascend_session.cc | 793 const std::vector<tensor::TensorPtr> &input_tensors, in BuildOpImpl() 835 … bool is_dynamic_shape, const std::vector<tensor::TensorPtr> &input_tensors) { in LaunchFunc() 893 const std::vector<tensor::TensorPtr> &input_tensors, in PrepareForOutputTensor() 915 std::vector<tensor::TensorPtr> *input_tensors, in CreateKernelGraph() 943 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImpl() 986 … std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImplOrigin() 1024 const std::vector<tensor::TensorPtr> &input_tensors, in PreBuildOp() 1479 void AscendSession::RunOpMemoryAlloc(const std::vector<tensor::TensorPtr> &input_tensors, in RunOpMemoryAlloc() 1487 void AscendSession::RunOpMemoryAllocNew(const std::vector<tensor::TensorPtr> &input_tensors, in RunOpMemoryAllocNew()
|
| D | session_basic.cc | 158 const std::vector<tensor::TensorPtr> &input_tensors) { in GetNodeOutputTensorFromInputs() 197 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputTensor() 250 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputTensors() 377 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputPlaceholder() 409 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputPlaceholder() 1182 const std::vector<tensor::TensorPtr> &input_tensors) { in GetSingleOpGraphInfo() 1274 …const KernelGraphPtr &kernel_graph, const std::vector<tensor::TensorPtr> &input_tensors, VectorRef… in CreateOutputPlaceholder() 1606 const std::vector<tensor::TensorPtr> &input_tensors, in UpdateOutputs() 1689 …asic::CreateOutputTensors(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &input_ten… in CreateOutputTensors() 2126 … const std::vector<tensor::TensorPtr> &input_tensors, in ConstructSingleOpGraph() [all …]
|
| D | kernel_graph.h | 280 …void SetInputTensors(const std::vector<tensor::TensorPtr> &input_tensors) { input_tensors_ = input… in SetInputTensors() 281 const std::vector<tensor::TensorPtr> &input_tensors() const { return input_tensors_; } in input_tensors() function
|
| /third_party/mindspore/mindspore/ccsrc/runtime/framework/actor/ |
| D | loop_count_actor.cc | 87 std::vector<std::vector<TensorPtr>> input_tensors; in SendOutput() local
|
| D | data_prepare_actor.cc | 124 void DataPrepareActor::PrepareData(const std::vector<std::vector<TensorPtr>> &input_tensors, in PrepareData() 193 …taPrepareActor::PrepareDataForDeviceTensorStore(const std::vector<std::vector<TensorPtr>> &input_t… in PrepareDataForDeviceTensorStore() 224 …DataPrepareActor::PrepareDataForHostTensorQueue(const std::vector<std::vector<TensorPtr>> &input_t… in PrepareDataForHostTensorQueue() 268 void DataPrepareActor::PrepareDataForStepMode(const std::vector<std::vector<TensorPtr>> &input_tens… in PrepareDataForStepMode()
|
| D | kernel_actor.cc | 134 const std::vector<TensorPtr> *input_tensors) { in RunOpControlWithInputTensor() 246 void KernelActor::PushInputDeviceTensor(const std::vector<TensorPtr> *input_tensors) { in PushInputDeviceTensor()
|
| /third_party/mindspore/mindspore/lite/micro/coder/opcoders/ |
| D | op_coder.cc | 30 const std::vector<Tensor *> OperatorCoder::input_tensors() const { return input_tensors_; } in input_tensors() function in mindspore::lite::micro::OperatorCoder
|
| /third_party/mindspore/mindspore/lite/test/ut/src/ |
| D | utils_test.cc | 64 auto input_tensors = kernel::LiteKernelUtil::SubgraphInputTensors(kernels); in TEST_F() local
|
| /third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32/ |
| D | affine_fp32.cc | 258 std::vector<lite::Tensor *> input_tensors; in FullMatmulKernelCreate() local 312 std::vector<lite::Tensor *> input_tensors; in IncrementMatmulKernelCreate() local
|
| /third_party/mindspore/mindspore/lite/src/delegate/nnrt/ |
| D | nnrt_model_kernel.cc | 99 auto input_tensors = this->inputs(); in PrepareInputs() local
|
| D | nnrt_delegate.cc | 154 auto input_tensors = model->inputs(); in PrepareInputs() local
|
| /third_party/mindspore/mindspore/lite/tools/converter/legacy_optimizer/graph/ |
| D | infershape_pass.cc | 49 void FreeTensors(std::vector<Tensor *> *input_tensors, std::vector<Tensor *> *output_tensors) { in FreeTensors() 249 void PrintTensorShape(const std::vector<Tensor *> &input_tensors, const std::vector<Tensor *> &outp… in PrintTensorShape() 511 auto input_tensors = ConvertTensorToLiteTensor(graph, node->inputIndex); in InferSubgraph() local
|
| /third_party/mindspore/mindspore/ccsrc/vm/ |
| D | backend.cc | 472 std::vector<tensor::TensorPtr> *input_tensors) { in CompileGraph() 799 std::vector<std::vector<tensor::TensorPtr>> input_tensors; in RunGraph() local 985 std::vector<std::vector<tensor::TensorPtr> *> input_tensors; in ConstructGraphCompilerInfo() local 993 const std::vector<tensor::TensorPtr> *input_tensors, bool need_erase) { in ConstructGraphCompilerInfo() 1038 … const std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs) { in RunGraph()
|
| /third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/cpu/pyfunc/ |
| D | py_func_cpu_kernel.cc | 171 const std::vector<tensor::TensorPtr> &input_tensors) { in RawMemoryToPyObjects()
|
| /third_party/mindspore/mindspore/lite/micro/coder/ |
| D | graph.cc | 191 std::vector<lite::Tensor *> CoderGraph::input_tensors() const { return input_tensors_; } in input_tensors() function in mindspore::lite::micro::CoderGraph
|
| /third_party/mindspore/mindspore/ccsrc/runtime/framework/ |
| D | graph_compiler.cc | 386 … std::vector<TensorPtr> *const input_tensors, bool *single_op_cache_hit, in CompileGraph() 478 …GetSingleOpRunInfoAndGraphInfo(const CNodePtr &kernel, const std::vector<TensorPtr> &input_tensors, in GetSingleOpRunInfoAndGraphInfo()
|
| /third_party/mindspore/mindspore/lite/src/ |
| D | lite_kernel_util.cc | 87 std::vector<lite::Tensor *> input_tensors; in SubgraphInputTensors() local
|
| /third_party/mindspore/mindspore/lite/tools/optimizer/fusion/ |
| D | constant_folding_fusion.cc | 350 auto input_tensors = GetCNodeInputTensors(input_cnode, fmk_type_); in Process() local
|
| /third_party/mindspore/mindspore/ccsrc/runtime/device/ |
| D | kernel_runtime.cc | 221 const std::vector<tensor::TensorPtr> &input_tensors) { in RunOpMallocPre() 315 void KernelRuntime::RunOpAssignMemory(const std::vector<tensor::TensorPtr> &input_tensors, in RunOpAssignMemory() 389 void KernelRuntime::RunOpAssignInputMemory(const std::vector<tensor::TensorPtr> &input_tensors, in RunOpAssignInputMemory() 1387 auto &input_tensors = graph.input_tensors(); in InitGraphInputTensors() local
|
| /third_party/mindspore/mindspore/ccsrc/pipeline/pynative/ |
| D | pynative_execute.cc | 308 …ngleOpGraphInfo(const OpExecInfoPtr &op_exec_info, const std::vector<tensor::TensorPtr> &input_ten… in GetSingleOpGraphInfo() 411 std::vector<tensor::TensorPtr> *input_tensors) { in PlantTensorTupleToVector() 425 …ertValueTupleToTensor(const py::object &input_object, std::vector<tensor::TensorPtr> *input_tensor… in ConvertValueTupleToTensor() 440 … std::vector<tensor::TensorPtr> *input_tensors, int64_t *const tensor_mask) { in ConvertMultiPyObjectToTensor() 461 … std::vector<tensor::TensorPtr> *input_tensors, int64_t *const tensor_mask) { in ConvertPyObjectToTensor() 498 std::vector<tensor::TensorPtr> *input_tensors) { in ConstructInputTensor() 1943 std::vector<tensor::TensorPtr> input_tensors; in RunOpInMs() local
|
| /third_party/mindspore/mindspore/lite/src/cxx_api/model/ |
| D | model_impl.cc | 209 auto input_tensors = session_->GetInputs(); in Predict() local
|