Home
last modified time | relevance | path

Searched refs:input_tensors (Results 1 – 25 of 52) sorted by relevance

123

/third_party/mindspore/mindspore/ccsrc/backend/session/
Dascend_session.h65 const std::vector<tensor::TensorPtr> &input_tensors,
70 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
73 …const GraphInfo &graph_info, OpRunInfo *op_run_info, std::vector<tensor::TensorPtr> *input_tensors,
107 …void RunOpMemoryAlloc(const std::vector<tensor::TensorPtr> &input_tensors, KernelGraph *kernel_gra…
108 void RunOpMemoryAllocNew(const std::vector<tensor::TensorPtr> &input_tensors,
149 …phPtr PreBuildOp(const OpRunInfo &op_run_info, const std::vector<tensor::TensorPtr> &input_tensors,
155 …reForOutputTensor(const KernelGraphPtr &graph, const std::vector<tensor::TensorPtr> &input_tensors,
162 const std::vector<tensor::TensorPtr> &input_tensors);
164 std::vector<tensor::TensorPtr> *input_tensors,
Dcpu_session.cc158 …::CreateOutputTensors(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &input_tensors, in CreateOutputTensors() argument
163 runtime_.CreateOutputTensors(kernel_graph.get(), input_tensors, outputs, tensor_to_node); in CreateOutputTensors()
224 const std::vector<tensor::TensorPtr> &input_tensors, in BuildOpImpl() argument
233 const auto &kernel_graph = ConstructSingleOpGraph(op_run_info, input_tensors, tensors_mask); in BuildOpImpl()
274 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImplOrigin() argument
276 RunOpImpl(graph_info, op_run_info, input_tensors, outputs, tensors_mask); in RunOpImplOrigin()
280 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImpl() argument
282 MS_EXCEPTION_IF_NULL(input_tensors); in RunOpImpl()
284 const auto &kernel_graph = BuildOpImpl(*op_run_info, graph_info, *input_tensors, tensors_mask); in RunOpImpl()
285 EraseValueNodeTensor(tensors_mask, input_tensors); in RunOpImpl()
[all …]
Dcpu_session.h36 …tTensors(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &input_tensors, VectorRef *,
47 const std::vector<tensor::TensorPtr> &input_tensors,
49 …const GraphInfo &graph_info, OpRunInfo *op_run_info, std::vector<tensor::TensorPtr> *input_tensors,
52 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
Dsession_basic.h75 std::vector<tensor::TensorPtr> input_tensors; member
111 …void RunOp(OpRunInfo *, const GraphInfo &, std::vector<tensor::TensorPtr> *input_tensors, VectorRe…
151 … const std::vector<tensor::TensorPtr> &input_tensors,
154 std::vector<tensor::TensorPtr> *input_tensors) const;
188 …laceholder(const KernelGraphPtr &kernel_graph, const std::vector<tensor::TensorPtr> &input_tensors,
210 …d CreateOutputTensors(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &input_tensors,
231 const std::vector<tensor::TensorPtr> &input_tensors, in BuildOpImpl() argument
236 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImpl() argument
239 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImplOrigin() argument
264 const std::vector<tensor::TensorPtr> &input_tensors,
[all …]
Dgpu_session.h49 const std::vector<tensor::TensorPtr> &input_tensors,
51 …const GraphInfo &graph_info, OpRunInfo *op_run_info, std::vector<tensor::TensorPtr> *input_tensors,
54 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs,
85 …void RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors, const KernelGraph *k…
Dascend_session.cc793 const std::vector<tensor::TensorPtr> &input_tensors, in BuildOpImpl() argument
800 const auto &graph = PreBuildOp(op_run_info, input_tensors, tensors_mask); in BuildOpImpl()
835 … bool is_dynamic_shape, const std::vector<tensor::TensorPtr> &input_tensors) { in LaunchFunc() argument
838 for (auto &tensor : input_tensors) { in LaunchFunc()
845 RunOpMemoryAllocNew(input_tensors, tensor_to_node, *graph); in LaunchFunc()
855 LoadInputData(graph, input_tensors); in LaunchFunc()
893 const std::vector<tensor::TensorPtr> &input_tensors, in PrepareForOutputTensor() argument
898 runtime_instance->RunOpMallocPre(*graph, input_tensors); in PrepareForOutputTensor()
901 UpdateOutputs(graph, outputs, input_tensors, tensor_to_node); in PrepareForOutputTensor()
915 std::vector<tensor::TensorPtr> *input_tensors, in CreateKernelGraph() argument
[all …]
Dgpu_session.cc254 void GPUSession::RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors, in RunOpAllocateMemory() argument
259 runtime_instance->RunOpAssignMemory(input_tensors, *kernel_graph); in RunOpAllocateMemory()
639 const std::vector<tensor::TensorPtr> &input_tensors, in BuildOpImpl() argument
648 const auto &kernel_graph = ConstructSingleOpGraph(op_run_info, input_tensors, tensors_mask); in BuildOpImpl()
664 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImplOrigin() argument
666 RunOpImpl(graph_info, op_run_info, input_tensors, outputs, tensors_mask); in RunOpImplOrigin()
670 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOpImpl() argument
672 MS_EXCEPTION_IF_NULL(input_tensors); in RunOpImpl()
674 const auto &kernel_graph = BuildOpImpl(*op_run_info, graph_info, *input_tensors, tensors_mask); in RunOpImpl()
675 EraseValueNodeTensor(tensors_mask, input_tensors); in RunOpImpl()
[all …]
Dsession_basic.cc158 const std::vector<tensor::TensorPtr> &input_tensors) { in GetNodeOutputTensorFromInputs() argument
182 if (input_idx >= input_tensors.size()) { in GetNodeOutputTensorFromInputs()
183 MS_LOG(EXCEPTION) << "Input idx:" << input_idx << "out of range:" << input_tensors.size(); in GetNodeOutputTensorFromInputs()
186 return input_tensors[input_idx]; in GetNodeOutputTensorFromInputs()
197 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputTensor() argument
203 auto tensor_from_input = GetNodeOutputTensorFromInputs(node_output_pair, graph, input_tensors); in CreateNodeOutputTensor()
250 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputTensors() argument
266 …auto out = CreateNodeOutputTensors(cnode->input(i), graph, input_tensors, tensor_to_node, node_to_… in CreateNodeOutputTensors()
283 …const auto &tensor = CreateNodeOutputTensor(item_with_index, graph, input_tensors, tensor_to_node); in CreateNodeOutputTensors()
377 const std::vector<tensor::TensorPtr> &input_tensors, in CreateNodeOutputPlaceholder() argument
[all …]
Dpynative_task_manager.h34 std::vector<tensor::TensorPtr> input_tensors, in RunOpContext() argument
40 input_tensors_(std::move(input_tensors)), in RunOpContext()
47 const std::vector<tensor::TensorPtr> &input_tensors() const { return input_tensors_; } in input_tensors() function
Dexecutor.cc425 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, in RunOp() argument
428 MS_EXCEPTION_IF_NULL(input_tensors); in RunOp()
434 for (auto &tensor : *input_tensors) { in RunOp()
443 session->RunOpImpl(graph_info, op_run_info, input_tensors, outputs, tensors_mask); in RunOp()
445 session->RunOpImpl(graph_info, op_run_info, input_tensors, outputs, tensors_mask); in RunOp()
453 task->input_tensors_ = input_tensors; in RunOp()
455 for (auto &tensor : *input_tensors) { in RunOp()
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/nnacl/int8/
Dconcat_int8_coder.cc40 size_t input_num = input_tensors().size(); in Prepare()
44 auto *input_tensor = input_tensors().at(i); in Prepare()
107 int in_tensor_count = input_tensors().size(); in DoCode()
110 for (int i = 0; i < static_cast<int>(input_tensors().size()); ++i) { in DoCode()
111 MS_CHECK_PTR(input_tensors().at(i)); in DoCode()
112 …code << "input_data[" << i << "] = " << allocator_->GetRuntimeAddr(input_tensors().at(i)) << ";\n"; in DoCode()
/third_party/mindspore/mindspore/lite/tools/optimizer/fusion/
Dconstant_folding_fusion.cc350 auto input_tensors = GetCNodeInputTensors(input_cnode, fmk_type_); in Process() local
351 if (input_tensors.empty()) { in Process()
361 FreeTensors(&input_tensors, &output_tensors); in Process()
366 if (CopyQuantParams(input_cnode, input_tensors, output_tensors) != lite::RET_OK) { in Process()
368 FreeTensors(&input_tensors, &output_tensors); in Process()
371 …auto lite_kernel = GetLiteKernel(input_tensors, &output_tensors, input_cnode, context_.get(), ms_c… in Process()
373 FreeTensors(&input_tensors, &output_tensors); in Process()
381 FreeTensors(&input_tensors, &output_tensors); in Process()
390 FreeTensors(&input_tensors, &output_tensors); in Process()
396 FreeTensors(&input_tensors, &output_tensors); in Process()
[all …]
/third_party/mindspore/mindspore/ccsrc/runtime/framework/actor/
Ddata_prepare_actor.cc124 void DataPrepareActor::PrepareData(const std::vector<std::vector<TensorPtr>> &input_tensors, in PrepareData() argument
129 if (input_tensors.size() > 0) { in PrepareData()
130 PrepareDataForDeviceTensorStore(input_tensors, context); in PrepareData()
132 PrepareDataForHostTensorQueue(input_tensors, context); in PrepareData()
134 PrepareDataForStepMode(input_tensors, context); in PrepareData()
193 …areActor::PrepareDataForDeviceTensorStore(const std::vector<std::vector<TensorPtr>> &input_tensors, in PrepareDataForDeviceTensorStore() argument
208 const auto &tensors = input_tensors[i]; in PrepareDataForDeviceTensorStore()
221 …PrepareDeviceTensorStoreForControlNode(graph_compiler_info_->control_node_parser_, input_tensors.b… in PrepareDataForDeviceTensorStore()
224 …epareActor::PrepareDataForHostTensorQueue(const std::vector<std::vector<TensorPtr>> &input_tensors, in PrepareDataForHostTensorQueue() argument
239 const auto &tensors = input_tensors[i]; in PrepareDataForHostTensorQueue()
[all …]
Ddata_prepare_actor.h55 …void PrepareData(const std::vector<std::vector<TensorPtr>> &input_tensors, OpContext<DeviceTensor>…
71 void PrepareDataForDeviceTensorStore(const std::vector<std::vector<TensorPtr>> &input_tensors,
73 void PrepareDataForHostTensorQueue(const std::vector<std::vector<TensorPtr>> &input_tensors,
75 void PrepareDataForStepMode(const std::vector<std::vector<TensorPtr>> &input_tensors,
Dkernel_actor.cc134 const std::vector<TensorPtr> *input_tensors) { in RunOpControlWithInputTensor() argument
136 MS_EXCEPTION_IF_NULL(input_tensors); in RunOpControlWithInputTensor()
140 PushInputDeviceTensor(input_tensors); in RunOpControlWithInputTensor()
246 void KernelActor::PushInputDeviceTensor(const std::vector<TensorPtr> *input_tensors) { in PushInputDeviceTensor() argument
247 MS_EXCEPTION_IF_NULL(input_tensors); in PushInputDeviceTensor()
248 if (input_tensors->size() != real_input_num_) { in PushInputDeviceTensor()
249 MS_LOG(ERROR) << "Input tensor number: " << input_tensors->size() in PushInputDeviceTensor()
254 for (size_t input_index = 0; input_index < input_tensors->size(); input_index++) { in PushInputDeviceTensor()
255 const auto &input_tensor = (*input_tensors)[input_index]; in PushInputDeviceTensor()
Dloop_count_actor.cc87 std::vector<std::vector<TensorPtr>> input_tensors; in SendOutput() local
88 Async(data_prepare_aid_, &DataPrepareActor::PrepareData, input_tensors, context); in SendOutput()
/third_party/mindspore/mindspore/lite/tools/converter/legacy_optimizer/graph/
Dinfershape_pass.cc49 void FreeTensors(std::vector<Tensor *> *input_tensors, std::vector<Tensor *> *output_tensors) { in FreeTensors() argument
50 if (input_tensors == nullptr) { in FreeTensors()
53 for (auto &tensor : *input_tensors) { in FreeTensors()
76 input_tensors->resize(0); in FreeTensors()
249 void PrintTensorShape(const std::vector<Tensor *> &input_tensors, const std::vector<Tensor *> &outp… in PrintTensorShape() argument
251 for (auto input_tensor : input_tensors) { in PrintTensorShape()
511 auto input_tensors = ConvertTensorToLiteTensor(graph, node->inputIndex); in InferSubgraph() local
513 …ut_tensors.empty() || output_tensors.size() != node->outputIndex.size() || input_tensors.empty() || in InferSubgraph()
514 input_tensors.size() != node->inputIndex.size()) { in InferSubgraph()
516 FreeTensors(&input_tensors, &output_tensors); in InferSubgraph()
[all …]
/third_party/mindspore/mindspore/lite/src/runtime/kernel/arm/fp32/
Daffine_fp32.cc258 std::vector<lite::Tensor *> input_tensors; in FullMatmulKernelCreate() local
263 input_tensors = {full_input_, in_tensors_.at(kWeightIndex), in_tensors_.at(kBiasIndex)}; in FullMatmulKernelCreate()
265 input_tensors = {full_input_, in_tensors_.at(kWeightIndex)}; in FullMatmulKernelCreate()
275 … params, input_tensors, out_tensors_, static_cast<const lite::InnerContext *>(this->ms_context_)); in FullMatmulKernelCreate()
312 std::vector<lite::Tensor *> input_tensors; in IncrementMatmulKernelCreate() local
317 input_tensors = {increment_input_, in_tensors_.at(kWeightIndex), in_tensors_.at(kBiasIndex)}; in IncrementMatmulKernelCreate()
319 input_tensors = {increment_input_, in_tensors_.at(kWeightIndex)}; in IncrementMatmulKernelCreate()
329 …params, input_tensors, {increment_output_}, static_cast<const lite::InnerContext *>(this->ms_conte… in IncrementMatmulKernelCreate()
/third_party/mindspore/mindspore/lite/src/
Dlite_kernel_util.cc87 std::vector<lite::Tensor *> input_tensors; in SubgraphInputTensors() local
94 if (!lite::IsContain(input_tensors, in_node_in_tensor)) { in SubgraphInputTensors()
95 input_tensors.push_back(in_node_in_tensor); in SubgraphInputTensors()
109 if (!lite::IsContain(input_tensors, in_node_in_tensor)) { in SubgraphInputTensors()
110 input_tensors.push_back(in_node_in_tensor); in SubgraphInputTensors()
116 return input_tensors; in SubgraphInputTensors()
/third_party/mindspore/mindspore/ccsrc/vm/
Dbackend.cc472 std::vector<tensor::TensorPtr> *input_tensors) { in CompileGraph() argument
481 …auto graph_id = graph_compiler_->CompileGraph(op_run_info, graph_info, tensors_mask, input_tensors, in CompileGraph()
499 …auto graph_compiler_info = ConstructGraphCompilerInfo(actor_info, tensors_mask, input_tensors, !en… in CompileGraph()
749 …graph_compiler_->GetSingleOpRunInfoAndGraphInfo(kernel, input_tensor_info.input_tensors, &op_run_i… in RunGraphBySingleOp()
753 &input_tensor_info.input_tensors); in RunGraphBySingleOp()
754 …(actor_info, &op_run_info, &input_tensor_info.input_tensors_mask, &input_tensor_info.input_tensors, in RunGraphBySingleOp()
799 std::vector<std::vector<tensor::TensorPtr>> input_tensors; in RunGraph() local
807 (void)input_tensors.emplace_back(input_tensor); in RunGraph()
818 (void)input_tensors.emplace_back(input_tensor); in RunGraph()
824 RunGraphBySingleOp(graph_compiler_info.graphs_, input_tensors, outputs); in RunGraph()
[all …]
/third_party/mindspore/mindspore/ccsrc/runtime/framework/
Dgraph_compiler.h61 const std::vector<std::vector<TensorPtr> *> &input_tensors, in GraphCompilerInfo()
69 input_tensors_(input_tensors), in GraphCompilerInfo()
104 … const std::vector<int64_t> *tensors_mask, std::vector<TensorPtr> *const input_tensors,
134 …GetSingleOpRunInfoAndGraphInfo(const CNodePtr &kernel, const std::vector<TensorPtr> &input_tensors,
/third_party/mindspore/mindspore/lite/micro/coder/
Dsession.cc53 context_->set_graph_inputs(coder_graph_->input_tensors()); in EndCode()
69 std::vector<lite::Tensor *> inputs = coder_graph_->input_tensors(); in Run()
163 std::vector<Tensor *> inputs = op_coder->input_tensors(); in InitOpcodersInputsAndOutputs()
166 std::vector<Tensor *> outputs = op_coder->input_tensors(); in InitOpcodersInputsAndOutputs()
171 std::vector<Tensor *> inputs = op_coder->input_tensors(); in InitOpcodersInputsAndOutputs()
194 auto inputs = node->input_tensors(); in InitTensorsRef()
/third_party/mindspore/mindspore/lite/src/cxx_api/model/
Dmodel_impl.cc209 auto input_tensors = session_->GetInputs(); in Predict() local
210 if (input_tensors.empty()) { in Predict()
214 if (input_tensors.size() != inputs.size()) { in Predict()
220 auto input = input_tensors.at(i); in Predict()
223 ResetTensorData(old_data, input_tensors); in Predict()
229 ResetTensorData(old_data, input_tensors); in Predict()
242 ResetTensorData(old_data, input_tensors); in Predict()
255 ResetTensorData(old_data, input_tensors); in Predict()
264 ResetTensorData(old_data, input_tensors); in Predict()
/third_party/mindspore/mindspore/lite/src/delegate/nnrt/
Dnnrt_model_kernel.cc99 auto input_tensors = this->inputs(); in PrepareInputs() local
100 for (int i = 0; i < input_tensors.size(); i++) { in PrepareInputs()
101 auto tensor = input_tensors[i]; in PrepareInputs()
/third_party/mindspore/mindspore/lite/micro/coder/opcoders/cmsis-nn/int8/
Dmul_int8_coder.cc27 input1_ = OperatorCoder::input_tensors().at(0); in Prepare()
28 input2_ = OperatorCoder::input_tensors().at(1); in Prepare()

123