/third_party/mindspore/mindspore/ccsrc/backend/session/ |
D | session_basic.h | 55 … const std::map<std::string, mindspore::tensor::TensorPtr> ¶ms_list); 75 std::vector<tensor::TensorPtr> input_tensors; 81 tensor::TensorPtr output_stub_tensor; 88 std::vector<tensor::TensorPtr> graph_output_tensors; 109 …void RunGraph(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRef *ou… 110 …void RunGraphAsync(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRe… 111 …void RunOp(OpRunInfo *, const GraphInfo &, std::vector<tensor::TensorPtr> *input_tensors, VectorRe… 113 …void RunOpsInGraph(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, VectorRe… 134 …void InitPSParamAndOptim(const KernelGraphPtr &kernel_graph, const std::vector<tensor::TensorPtr> … 136 virtual bool CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs, in CheckModelInputs() [all …]
|
D | ascend_session.h | 56 const std::vector<tensor::TensorPtr> &inputs_const) const override; 57 …aph(const std::shared_ptr<KernelGraph> &kernel_graph, const std::vector<tensor::TensorPtr> &inputs, 59 …aph(const std::shared_ptr<KernelGraph> &kernel_graph, const std::vector<tensor::TensorPtr> &inputs, 65 const std::vector<tensor::TensorPtr> &input_tensors, 68 …void BindAddressToTensor(const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_no… 70 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, 73 …void RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info, std::vector<tensor::TensorPtr>… 76 const std::vector<tensor::TensorPtr> &graph_inputs, 84 … const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node, 86 …DeviceAddressPtr AssignExtraMemForGraphOutput(const tensor::TensorPtr &tensor, const AnfNodePtr &n… [all …]
|
D | cpu_session.h | 36 …void CreateOutputTensors(const GraphId &graph_id, const std::vector<tensor::TensorPtr> &input_tens… 37 … std::map<tensor::TensorPtr, session::KernelWithIndex> *tensor_to_node) override; 39 …aph(const std::shared_ptr<KernelGraph> &kernel_graph, const std::vector<tensor::TensorPtr> &inputs, 41 …aph(const std::shared_ptr<KernelGraph> &kernel_graph, const std::vector<tensor::TensorPtr> &inputs, 47 const std::vector<tensor::TensorPtr> &input_tensors, 49 …void RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info, std::vector<tensor::TensorPtr>… 52 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, 55 const std::vector<tensor::TensorPtr> &inputs_const) const override; 63 void UpdateDynamicOutputShape(const std::map<tensor::TensorPtr, KernelWithIndex> &tensor_to_node);
|
D | gpu_session.h | 43 …aph(const std::shared_ptr<KernelGraph> &kernel_graph, const std::vector<tensor::TensorPtr> &inputs, 45 …aph(const std::shared_ptr<KernelGraph> &kernel_graph, const std::vector<tensor::TensorPtr> &inputs, 49 const std::vector<tensor::TensorPtr> &input_tensors, 51 …void RunOpImpl(const GraphInfo &graph_info, OpRunInfo *op_run_info, std::vector<tensor::TensorPtr>… 54 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, 59 const std::vector<tensor::TensorPtr> &inputs_const) const override; 61 … const std::map<tensor::TensorPtr, session::KernelWithIndex> &tensor_to_node, 85 …void RunOpAllocateMemory(const std::vector<tensor::TensorPtr> &input_tensors, const KernelGraph *k…
|
D | executor.h | 94 std::vector<tensor::TensorPtr> input_tensors_; 95 std::vector<tensor::TensorPtr> input_need_wait_tensors_; 96 std::vector<tensor::TensorPtr> input_need_lock_tensors_; 99 std::map<tensor::TensorPtr, session::KernelWithIndex> tensor_to_node_; 107 std::vector<tensor::TensorPtr> input_tensors_; 119 std::vector<tensor::TensorPtr> *input_tensors_{nullptr}; 160 …h(const SessionPtr &session, const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, 162 …c(const SessionPtr &session, const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs, 165 std::vector<tensor::TensorPtr> *input_tensors, VectorRef *outputs, 167 …h(const SessionPtr &session, const GraphId &graph_id, const std::vector<tensor::TensorPtr> &inputs,
|
D | gpu_inference_session.h | 39 const std::vector<tensor::TensorPtr> &inputs_const) const; 40 bool CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs, 42 bool CompareInput(const tensor::TensorPtr &input, const ParameterPtr ¶meter) const; 45 …tsInfo(const std::vector<ParameterPtr> ¶s, const std::vector<tensor::TensorPtr> &inputs) const;
|
D | ascend_inference_session.h | 39 const std::vector<tensor::TensorPtr> &inputs_const) const; 40 bool CheckModelInputs(uint32_t graph_id, const std::vector<tensor::TensorPtr> &inputs, 42 bool CompareInput(const tensor::TensorPtr &input, const ParameterPtr ¶meter) const; 45 …tsInfo(const std::vector<ParameterPtr> ¶s, const std::vector<tensor::TensorPtr> &inputs) const;
|
D | pynative_task_manager.h | 34 std::vector<tensor::TensorPtr> input_tensors, in RunOpContext() 35 std::map<tensor::TensorPtr, KernelWithIndex> tensor_to_node) in RunOpContext() argument 47 const std::vector<tensor::TensorPtr> &input_tensors() const { return input_tensors_; } in input_tensors() 48 …const std::map<tensor::TensorPtr, KernelWithIndex> &tensor_to_node() const { return tensor_to_node… in tensor_to_node() 55 std::vector<tensor::TensorPtr> input_tensors_; 56 std::map<tensor::TensorPtr, session::KernelWithIndex> tensor_to_node_;
|
D | kernel_graph.h | 134 ValueNodePtr NewValueNode(const tensor::TensorPtr &input_tensor); 160 ValueNodePtr GetValueNodeByTensor(const tensor::TensorPtr &tensor); 162 void TensorValueNodeMapAdd(const tensor::TensorPtr &tensor, const ValueNodePtr &value_node); 205 …void set_input_ctrl_tensors(const std::shared_ptr<std::vector<tensor::TensorPtr>> &input_tensors_p… in set_input_ctrl_tensors() 209 …std::shared_ptr<std::vector<tensor::TensorPtr>> input_ctrl_tensors() const { return input_ctrl_ten… in input_ctrl_tensors() 235 …void AddInternalOutputTensor(const AnfNodePtr &node, size_t output_idx, const tensor::TensorPtr &t… 236 tensor::TensorPtr GetInternalOutputTensor(const AnfNodePtr &node, size_t output_idx); 280 …void SetInputTensors(const std::vector<tensor::TensorPtr> &input_tensors) { input_tensors_ = input… in SetInputTensors() 281 const std::vector<tensor::TensorPtr> &input_tensors() const { return input_tensors_; } in input_tensors() 285 tensor::TensorPtr GetNodeOutputTensor(const session::KernelWithIndex &output_index) const { in GetNodeOutputTensor() [all …]
|
/third_party/mindspore/mindspore/lite/mindir/include/ |
D | mindir_tensor.h | 20 TensorPtr MindIR_Tensor_Create(); 21 TensorPtr MindIR_Tensor_Create(const std::string &name, DataType data_type, const std::vector<int32… 25 void MindIR_Tensor_SetName(TensorPtr *tensor, const std::string &name); 27 void MindIR_Tensor_SetDataType(TensorPtr *tensor, DataType data_type); 29 void MindIR_Tensor_SetDims(TensorPtr *tensor, const std::vector<int32_t> &dims); 31 void MindIR_Tensor_SetFormat(TensorPtr *tensor, Format format); 35 void MindIR_Tensor_SetData(TensorPtr *tensor, const std::vector<uint8_t> &data); 38 void MindIR_Tensor_SetQuantParams(TensorPtr *tensor, const std::vector<QuantParam> &quant_params); 40 void MindIR_Tensor_Destroy(TensorPtr *tensor);
|
/third_party/mindspore/mindspore/ccsrc/runtime/framework/ |
D | graph_compiler.h | 41 using tensor::TensorPtr; 61 const std::vector<std::vector<TensorPtr> *> &input_tensors, in GraphCompilerInfo() 82 std::vector<std::vector<TensorPtr> *> input_tensors_; 104 … const std::vector<int64_t> *tensors_mask, std::vector<TensorPtr> *const input_tensors, 118 void GetParamAndOutputIndex(const KernelGraphPtr &graph, const std::vector<TensorPtr> &inputs, 124 …void GetSingleOpInputTensors(const CNodePtr &kernel, const std::map<KernelWithIndex, TensorPtr> &o… 126 … const std::vector<TensorPtr> &graph_inputs, InputTensorInfo *const input_tensor_info); 128 …TensorPtr GetSingleOpInputTensorByIndex(const CNodePtr &kernel, const std::map<KernelWithIndex, Te… 130 const std::vector<TensorPtr> &graph_inputs, 134 …void GetSingleOpRunInfoAndGraphInfo(const CNodePtr &kernel, const std::vector<TensorPtr> &input_te… [all …]
|
D | host_tensor_queue.h | 27 using mindspore::tensor::TensorPtr; 36 void Push(const std::vector<TensorPtr> &tensors) { buffers_.push(tensors); } in Push() 38 const std::vector<TensorPtr> &Pull() { return buffers_.front(); } in Pull() 45 std::queue<std::vector<TensorPtr>> buffers_;
|
/third_party/mindspore/mindspore/ccsrc/cxx_api/graph/gpu/ |
D | gpu_graph_impl.h | 44 Status CheckModelInputs(const std::vector<tensor::TensorPtr> &inputs) const; 45 std::vector<tensor::TensorPtr> RunGraph(const std::vector<tensor::TensorPtr> &inputs); 52 std::vector<tensor::TensorPtr> inputs_info_; 53 std::vector<tensor::TensorPtr> outputs_info_; 54 std::vector<tensor::TensorPtr> last_inputs_; 55 std::vector<tensor::TensorPtr> last_outputs_;
|
/third_party/mindspore/mindspore/ccsrc/runtime/framework/actor/ |
D | data_prepare_actor.h | 55 …void PrepareData(const std::vector<std::vector<TensorPtr>> &input_tensors, OpContext<DeviceTensor>… 71 void PrepareDataForDeviceTensorStore(const std::vector<std::vector<TensorPtr>> &input_tensors, 73 void PrepareDataForHostTensorQueue(const std::vector<std::vector<TensorPtr>> &input_tensors, 75 void PrepareDataForStepMode(const std::vector<std::vector<TensorPtr>> &input_tensors, 79 …orWeightNode(const AnfNodePtr &backend_node, const AnfNodePtr &front_node, const TensorPtr &tensor, 90 const std::vector<TensorPtr> &tensors, 92 void PrepareHostTensorQueueForControlNode(const std::vector<TensorPtr> &tensors, 93 std::vector<TensorPtr> *const host_tensors, 97 …const AnfNodePtr &node, const AnfNodePtr &front_node, const TensorPtr &tensor, const DeviceContext…
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/cpu/ |
D | cpu_kernel_runtime.h | 41 …void CreateOutputTensors(session::KernelGraph *kernel_graph, const std::vector<tensor::TensorPtr> … 42 … VectorRef *outputs, std::map<tensor::TensorPtr, session::KernelWithIndex> *tensor_to_node); 43 …void BindInputOutput(session::KernelGraph *kernel_graph, const std::vector<tensor::TensorPtr> &inp… 60 …tensor::TensorPtr CreatTensorForOutput(session::KernelGraph *kernel_graph, const CNodePtr &node, s… 61 … std::map<tensor::TensorPtr, session::KernelWithIndex> *tensor_to_node); 63 … std::map<tensor::TensorPtr, session::KernelWithIndex> *tensor_to_node); 64 …tTensorAddressPtr(const session::KernelGraph &graph, const std::vector<tensor::TensorPtr> &inputs); 71 std::map<AnfNodePtr, tensor::TensorPtr> input_param_tensor_map_;
|
/third_party/mindspore/mindspore/lite/minddata/wrapper/ |
D | album_op_android.h | 96 Status LoadImageTensor(const std::string &image_file, int32_t col_num, TensorPtr *tensor); 103 Status LoadIntArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor); 110 Status LoadFloatArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor); 117 Status LoadStringArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor); 124 Status LoadStringTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor); 131 Status LoadFloatTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor); 138 Status LoadIntTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor); 144 Status LoadEmptyTensor(int32_t col_num, TensorPtr *tensor); 151 Status LoadIDTensor(const std::string &file, int32_t col_num, TensorPtr *tensor);
|
D | album_op_android.cc | 180 Status AlbumOp::LoadImageTensor(const std::string &image_file_path, int32_t col_num, TensorPtr *ten… in LoadImageTensor() 181 TensorPtr image; in LoadImageTensor() 182 TensorPtr rotate_tensor; in LoadImageTensor() 260 Status AlbumOp::LoadStringArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *t… in LoadStringArrayTensor() 268 Status AlbumOp::LoadStringTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tensor… in LoadStringTensor() 273 TensorPtr label; in LoadStringTensor() 278 Status AlbumOp::LoadIntArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *tens… in LoadIntArrayTensor() 305 Status AlbumOp::LoadFloatArrayTensor(const nlohmann::json &json_obj, int32_t col_num, TensorPtr *te… in LoadFloatArrayTensor() 332 Status AlbumOp::LoadIDTensor(const std::string &file, int32_t col_num, TensorPtr *tensor) { in LoadIDTensor() 344 Status AlbumOp::LoadEmptyTensor(int32_t col_num, TensorPtr *tensor) { in LoadEmptyTensor() [all …]
|
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/kernels/image/ |
D | bounding_box.h | 66 Status WriteToTensor(const TensorPtr &bbox_tensor, dsize_t index_of_bbox = 0); 74 static Status ReadFromTensor(const TensorPtr &bbox_tensor, dsize_t index_of_bbox, 88 static Status GetListOfBoundingBoxes(const TensorPtr &bbox_tensor, 96 TensorPtr *tensor_out); 105 …static Status PadBBoxes(const TensorPtr *bbox_list, size_t bbox_count, int32_t pad_top, int32_t pa… 114 …static Status UpdateBBoxesForCrop(TensorPtr *bbox_list, size_t *bbox_count, int32_t CB_Xmin, int32… 125 …static Status UpdateBBoxesForResize(const TensorPtr &bbox_list, size_t bbox_count, int32_t target_…
|
/third_party/mindspore/mindspore/ccsrc/cxx_api/graph/ascend/ |
D | ascend_graph_impl.h | 48 Status CheckModelInputs(const std::vector<tensor::TensorPtr> &inputs) const; 49 std::vector<tensor::TensorPtr> RunGraph(const std::vector<tensor::TensorPtr> &inputs); 57 std::vector<tensor::TensorPtr> inputs_info_; 58 std::vector<tensor::TensorPtr> outputs_info_; 59 std::vector<tensor::TensorPtr> last_inputs_; 60 std::vector<tensor::TensorPtr> last_outputs_;
|
/third_party/mindspore/mindspore/ccsrc/minddata/dataset/core/ |
D | tensor.h | 63 using TensorPtr = std::shared_ptr<Tensor>; variable 91 static Status CreateEmpty(const TensorShape &shape, const DataType &type, TensorPtr *out); 100 …CreateFromMemory(const TensorShape &shape, const DataType &type, const uchar *src, TensorPtr *out); 110 const dsize_t &length, TensorPtr *out); 116 static Status CreateFromTensor(const TensorPtr &in, TensorPtr *out) { in CreateFromTensor() 123 static Status CreateFromMSTensor(const MSTensor &in, TensorPtr *out); 130 static Status CreateFromNpArray(const py::array &arr, TensorPtr *out); 139 …ateFromByteList(const dataengine::BytesList &bytes_list, const TensorShape &shape, TensorPtr *out); 150 const DataType &type, dsize_t pad_size, TensorPtr *out); 160 …static Status CreateFromVector(const std::vector<T> &items, const TensorShape &shape, TensorPtr *o… in CreateFromVector() [all …]
|
/third_party/mindspore/mindspore/ccsrc/vm/ |
D | backend.cc | 137 void PushInputTensor(const BaseRef &arg, std::vector<tensor::TensorPtr> *inputs) { in PushInputTensor() 139 if (utils::isa<tensor::TensorPtr>(arg)) { in PushInputTensor() 140 auto value = utils::cast<tensor::TensorPtr>(arg); in PushInputTensor() 150 [](const ValuePtr &v) { return v->cast<tensor::TensorPtr>(); }); in PushInputTensor() 152 tensor::TensorPtr scalar_tensor = ScalarToTensor(value->cast<ScalarPtr>()); in PushInputTensor() 158 inputs->push_back(value->cast<tensor::TensorPtr>()); in PushInputTensor() 162 inputs->push_back(py::cast<tensor::TensorPtr>(value)); in PushInputTensor() 175 std::vector<tensor::TensorPtr> *input_tensor) { in PushTensor() 197 TensorPtr CreateOutputTensor(const AnfNodePtr &output_node, size_t output_index) { in CreateOutputTensor() 276 std::vector<tensor::TensorPtr> inputs; in MsRunGraph() [all …]
|
/third_party/mindspore/mindspore/core/utils/ |
D | tensor_construct_utils.cc | 22 tensor::TensorPtr TensorConstructUtils::CreateZerosTensor(const TypePtr &type_ptr, const std::vecto… in CreateZerosTensor() 25 tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape); in CreateZerosTensor() 35 tensor::TensorPtr TensorConstructUtils::CreateOnesTensor(const TypePtr &type_ptr, const std::vector… in CreateOnesTensor() 38 tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape); in CreateOnesTensor() 76 tensor::TensorPtr TensorConstructUtils::CreateTensor(const TypePtr &type_ptr, const std::vector<int… in CreateTensor() 80 tensor::TensorPtr tensor = std::make_shared<tensor::Tensor>(type_id, shape, data, type_id); in CreateTensor()
|
/third_party/mindspore/mindspore/core/base/ |
D | base_ref_utils.cc | 22 void IterateFindTensor(std::vector<tensor::TensorPtr> *msTensors, const VectorRef &ref_list) { in IterateFindTensor() 24 if (utils::isa<tensor::TensorPtr>(ref_list[i])) { in IterateFindTensor() 37 std::vector<tensor::TensorPtr> TransformVectorRefToMultiTensor(const VectorRef &base_ref) { in TransformVectorRefToMultiTensor() 38 std::vector<tensor::TensorPtr> msTensors; in TransformVectorRefToMultiTensor()
|
/third_party/mindspore/tests/ut/cpp/ir/ |
D | meta_tensor_test.cc | 188 TensorPtr tensor = TensorPy::MakeTensor(py::array(tuple), kFloat64); in TEST_F() 207 TensorPtr tensor_int8 = TensorPy::MakeTensor(py::array(tuple), kInt8); in TEST_F() 212 TensorPtr tensor_int16 = TensorPy::MakeTensor(py::array(tuple), kInt16); in TEST_F() 215 TensorPtr tensor_int32 = TensorPy::MakeTensor(py::array(tuple), kInt32); in TEST_F() 218 TensorPtr tensor_float16 = TensorPy::MakeTensor(py::array(tuple), kFloat16); in TEST_F() 221 TensorPtr tensor_float32 = TensorPy::MakeTensor(py::array(tuple), kFloat32); in TEST_F() 224 TensorPtr tensor_float64 = TensorPy::MakeTensor(py::array(tuple), kFloat64); in TEST_F() 230 TensorPtr t1 = TensorPy::MakeTensor(py::array(tuple), kInt32); in TEST_F() 231 TensorPtr t2 = TensorPy::MakeTensor(py::array(tuple), kInt32); in TEST_F() 236 TensorPtr t3 = std::make_shared<Tensor>(kInt32->type_id(), shape); in TEST_F() [all …]
|
/third_party/mindspore/mindspore/ccsrc/utils/ |
D | convert_utils.h | 38 using TensorPtr = std::shared_ptr<Tensor>; variable 63 tensor::TensorPtr ScalarToTensor(const ScalarPtr &scalar); 66 std::vector<T> TensorValueToVector(const tensor::TensorPtr &tensor) { in TensorValueToVector() 77 void TensorValueToTensor(const ValuePtr &value, std::vector<tensor::TensorPtr> *tensors);
|