Home
last modified time | relevance | path

Searched refs:host_shape (Results 1 – 19 of 19) sorted by relevance

/third_party/mindspore/mindspore/ccsrc/runtime/device/ascend/
Dascend_device_address.cc203 std::vector<size_t> host_shape; in SyncDeviceToHost() local
204 (void)std::transform(shape.begin(), shape.end(), std::back_inserter(host_shape), LongToSize); in SyncDeviceToHost()
205 if (host_shape.empty()) { in SyncDeviceToHost()
206 host_shape.emplace_back(1); in SyncDeviceToHost()
215 auto shape_size = abstract::ShapeSize(host_shape); in SyncDeviceToHost()
241 std::vector<size_t> AscendDeviceAddress::GetDeviceShape(std::vector<size_t> *host_shape) const { in GetDeviceShape()
242 MS_EXCEPTION_IF_NULL(host_shape); in GetDeviceShape()
246 …device_shape = trans::TransShapeToDevice(*host_shape, format_, node_index.first, node_index.second… in GetDeviceShape()
249 *host_shape = trans::PaddingShape(*host_shape, format_); in GetDeviceShape()
251 host_shape->clear(); in GetDeviceShape()
[all …]
Dascend_device_address.h54 …pMemToFile(const std::string &filepath, const std::string &host_fmt, const ShapeVector &host_shape,
58 … const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev) const override;
64 …bool SyncDeviceToHostAndConvertFormatBasedOnTransData(const std::vector<size_t> &host_shape, size_…
67 std::vector<size_t> GetDeviceShape(std::vector<size_t> *host_shape) const;
68 std::shared_ptr<LaunchKernel> CreateLaunchTransData(const std::vector<size_t> &host_shape,
Dascend_launch_transdata.h29 std::vector<size_t> host_shape) in AscendLaunchTransData() argument
37 shape_(host_shape) {} in AscendLaunchTransData()
/third_party/mindspore/mindspore/ccsrc/common/
Dtrans.cc891 std::vector<size_t> host_shape; in GetRuntimePaddingShape() local
902 …(void)std::transform(shape_temp.begin(), shape_temp.end(), std::back_inserter(host_shape), LongToS… in GetRuntimePaddingShape()
903 if (host_shape.empty()) { in GetRuntimePaddingShape()
904 host_shape.push_back(1); in GetRuntimePaddingShape()
907 host_shape = AnfAlgo::GetOutputInferShape(node, index); in GetRuntimePaddingShape()
910 if (trans::IsNeedPadding(format, host_shape.size())) { in GetRuntimePaddingShape()
911host_shape = trans::PaddingShape(host_shape, format, AnfAlgo::GetOutputReshapeType(node, index)); in GetRuntimePaddingShape()
913 std::transform(host_shape.begin(), host_shape.end(), std::back_inserter(shape), SizeToLong); in GetRuntimePaddingShape()
1065 if (args.host_shape.size() != kNchwDims) { in CheckArgs()
1066 …MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dim… in CheckArgs()
[all …]
Dtrans.h66 std::vector<size_t> host_shape; member
228 std::vector<T> host_shape; variable
233 host_shape = trans::PaddingShapeTo5d(shape, pad_index);
235 host_shape = trans::PaddingShapeTo4d(shape, pad_index);
237 return host_shape;
/third_party/mindspore/tests/ut/cpp/common/
Dtrans_test.cc103 std::vector<size_t> host_shape = {43, 120}; in TEST_F() local
106 auto trans_shape = trans::TransShapeToDevice(host_shape, format, 1, input_hidden_size); in TEST_F()
115 std::vector<size_t> host_shape = {120}; in TEST_F() local
118 auto trans_shape = trans::TransShapeToDevice(host_shape, format, 1, input_hidden_size); in TEST_F()
127 std::vector<int64_t> host_shape = {-1, -1}; in TEST_F() local
130 auto trans_shape = trans::TransShapeToDevice(host_shape, format, 1, input_hidden_size); in TEST_F()
139 std::vector<int64_t> host_shape = {-1}; in TEST_F() local
142 auto trans_shape = trans::TransShapeToDevice(host_shape, format, 1, input_hidden_size); in TEST_F()
/third_party/mindspore/mindspore/ccsrc/runtime/device/gpu/
Dgpu_device_address.cc132 const ShapeVector &host_shape, TypeId host_type, size_t slot, in LoadMemToHost() argument
145 mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(host_type, host_shape); in LoadMemToHost()
147 auto ret_rt_memcpy = SyncDeviceToHost(host_shape, host_size, host_type, out_tensor->data_c()); in LoadMemToHost()
Dgpu_device_address.h54 … const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev) const override;
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/cache_embedding/
Dcache_embedding.cc183 auto host_shape = host_tensor->shape_c(); in BindAndInitCacheTensor() local
185 if (host_shape.size() != 2 && host_shape.size() != 2 && host_shape[1] != cache_shape[1]) { in BindAndInitCacheTensor()
187 << "host shape:" << host_shape << ", cache shape:" << cache_shape; in BindAndInitCacheTensor()
194 LongToSize(host_shape[1])); in BindAndInitCacheTensor()
198 LongToSize(host_shape[1])); in BindAndInitCacheTensor()
247 std::vector<int64_t> host_shape{static_cast<int64_t>(hashmap_size), 4}; in InitHashMap() local
248 auto new_tensor = std::make_shared<tensor::Tensor>(type_id, host_shape); in InitHashMap()
266 std::vector<int64_t> host_shape{1}; in InitStep() local
267 auto new_tensor = std::make_shared<tensor::Tensor>(type_id, host_shape); in InitStep()
/third_party/mindspore/mindspore/ccsrc/pybind_api/ir/
Dtensor_py.cc342 auto host_shape = tensor.shape_c(); in FlushFromCache() local
344 if (host_shape.size() != 2 && host_shape.size() != 2 && host_shape[1] != cache_shape[1]) { in FlushFromCache()
346 << "host shape:" << host_shape << ", cache shape:" << cache_shape; in FlushFromCache()
353 … host_data_max_size, cache_data_max_size, hashmap_size, host_shape[1]); in FlushFromCache()
356 … host_data_max_size, cache_data_max_size, hashmap_size, host_shape[1]); in FlushFromCache()
/third_party/mindspore/mindspore/ccsrc/runtime/device/cpu/
Dcpu_device_address.cc40 …ess::DumpMemToFile(const std::string &filepath, const std::string &, const ShapeVector &host_shape, in DumpMemToFile() argument
50 ret = DumpJsonParser::DumpToFile(path, ptr_, size_, host_shape, host_type); in DumpMemToFile()
Dcpu_device_address.h42 …pMemToFile(const std::string &filepath, const std::string &host_fmt, const ShapeVector &host_shape,
/third_party/mindspore/mindspore/ccsrc/runtime/device/
Ddevice_address.h87 …pMemToFile(const std::string &filepath, const std::string &host_fmt, const ShapeVector &host_shape, in DumpMemToFile() argument
93 … const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev) const { in LoadMemToHost() argument
/third_party/mindspore/mindspore/ccsrc/debug/
Dtensor_load.h198 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type, in DumpTensorToFile() argument
219 return DumpJsonParser::DumpToFile(path, node->GetDataPtr(), host_size, host_shape, host_type); in DumpTensorToFile()
Ddebug_services.h407 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type,
Ddebug_services.cc1256 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, in DumpTensorToFile() argument
1259 …return tensor_loader_->DumpTensorToFile(tensor_name, trans_flag, filepath, host_fmt, host_shape, h… in DumpTensorToFile()
/third_party/mindspore/mindspore/ccsrc/runtime/framework/actor/
Dswitch_actor.cc292 ShapeVector host_shape; in GetIndex() local
293 if (!device_tensor->SyncDeviceToHost(host_shape, size, type_id, static_cast<void *>(buf))) { in GetIndex()
/third_party/mindspore/mindspore/ccsrc/debug/debugger/
Ddebugger.h106 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type,
Ddebugger.cc1154 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type, in DumpTensorToFile() argument
1156 …rvices_.get()->DumpTensorToFile(tensor_name, trans_flag, filepath, host_fmt, host_shape, host_type, in DumpTensorToFile()