/third_party/mindspore/mindspore/ccsrc/runtime/device/ascend/ |
D | ascend_device_address.cc | 203 std::vector<size_t> host_shape; in SyncDeviceToHost() local 204 (void)std::transform(shape.begin(), shape.end(), std::back_inserter(host_shape), LongToSize); in SyncDeviceToHost() 205 if (host_shape.empty()) { in SyncDeviceToHost() 206 host_shape.emplace_back(1); in SyncDeviceToHost() 215 auto shape_size = abstract::ShapeSize(host_shape); in SyncDeviceToHost() 241 std::vector<size_t> AscendDeviceAddress::GetDeviceShape(std::vector<size_t> *host_shape) const { in GetDeviceShape() 242 MS_EXCEPTION_IF_NULL(host_shape); in GetDeviceShape() 246 …device_shape = trans::TransShapeToDevice(*host_shape, format_, node_index.first, node_index.second… in GetDeviceShape() 249 *host_shape = trans::PaddingShape(*host_shape, format_); in GetDeviceShape() 251 host_shape->clear(); in GetDeviceShape() [all …]
|
D | ascend_device_address.h | 54 …pMemToFile(const std::string &filepath, const std::string &host_fmt, const ShapeVector &host_shape, 58 … const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev) const override; 64 …bool SyncDeviceToHostAndConvertFormatBasedOnTransData(const std::vector<size_t> &host_shape, size_… 67 std::vector<size_t> GetDeviceShape(std::vector<size_t> *host_shape) const; 68 std::shared_ptr<LaunchKernel> CreateLaunchTransData(const std::vector<size_t> &host_shape,
|
D | ascend_launch_transdata.h | 29 std::vector<size_t> host_shape) in AscendLaunchTransData() argument 37 shape_(host_shape) {} in AscendLaunchTransData()
|
/third_party/mindspore/mindspore/ccsrc/common/ |
D | trans.cc | 891 std::vector<size_t> host_shape; in GetRuntimePaddingShape() local 902 …(void)std::transform(shape_temp.begin(), shape_temp.end(), std::back_inserter(host_shape), LongToS… in GetRuntimePaddingShape() 903 if (host_shape.empty()) { in GetRuntimePaddingShape() 904 host_shape.push_back(1); in GetRuntimePaddingShape() 907 host_shape = AnfAlgo::GetOutputInferShape(node, index); in GetRuntimePaddingShape() 910 if (trans::IsNeedPadding(format, host_shape.size())) { in GetRuntimePaddingShape() 911 … host_shape = trans::PaddingShape(host_shape, format, AnfAlgo::GetOutputReshapeType(node, index)); in GetRuntimePaddingShape() 913 std::transform(host_shape.begin(), host_shape.end(), std::back_inserter(shape), SizeToLong); in GetRuntimePaddingShape() 1065 if (args.host_shape.size() != kNchwDims) { in CheckArgs() 1066 …MS_LOG(ERROR) << "Invalid host shape, host shape dims:" << args.host_shape.size() << ", expect dim… in CheckArgs() [all …]
|
D | trans.h | 66 std::vector<size_t> host_shape; member 228 std::vector<T> host_shape; variable 233 host_shape = trans::PaddingShapeTo5d(shape, pad_index); 235 host_shape = trans::PaddingShapeTo4d(shape, pad_index); 237 return host_shape;
|
/third_party/mindspore/tests/ut/cpp/common/ |
D | trans_test.cc | 103 std::vector<size_t> host_shape = {43, 120}; in TEST_F() local 106 auto trans_shape = trans::TransShapeToDevice(host_shape, format, 1, input_hidden_size); in TEST_F() 115 std::vector<size_t> host_shape = {120}; in TEST_F() local 118 auto trans_shape = trans::TransShapeToDevice(host_shape, format, 1, input_hidden_size); in TEST_F() 127 std::vector<int64_t> host_shape = {-1, -1}; in TEST_F() local 130 auto trans_shape = trans::TransShapeToDevice(host_shape, format, 1, input_hidden_size); in TEST_F() 139 std::vector<int64_t> host_shape = {-1}; in TEST_F() local 142 auto trans_shape = trans::TransShapeToDevice(host_shape, format, 1, input_hidden_size); in TEST_F()
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/gpu/ |
D | gpu_device_address.cc | 132 const ShapeVector &host_shape, TypeId host_type, size_t slot, in LoadMemToHost() argument 145 mindspore::tensor::TensorPtr out_tensor = std::make_shared<tensor::Tensor>(host_type, host_shape); in LoadMemToHost() 147 auto ret_rt_memcpy = SyncDeviceToHost(host_shape, host_size, host_type, out_tensor->data_c()); in LoadMemToHost()
|
D | gpu_device_address.h | 54 … const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev) const override;
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/cache_embedding/ |
D | cache_embedding.cc | 183 auto host_shape = host_tensor->shape_c(); in BindAndInitCacheTensor() local 185 if (host_shape.size() != 2 && host_shape.size() != 2 && host_shape[1] != cache_shape[1]) { in BindAndInitCacheTensor() 187 << "host shape:" << host_shape << ", cache shape:" << cache_shape; in BindAndInitCacheTensor() 194 LongToSize(host_shape[1])); in BindAndInitCacheTensor() 198 LongToSize(host_shape[1])); in BindAndInitCacheTensor() 247 std::vector<int64_t> host_shape{static_cast<int64_t>(hashmap_size), 4}; in InitHashMap() local 248 auto new_tensor = std::make_shared<tensor::Tensor>(type_id, host_shape); in InitHashMap() 266 std::vector<int64_t> host_shape{1}; in InitStep() local 267 auto new_tensor = std::make_shared<tensor::Tensor>(type_id, host_shape); in InitStep()
|
/third_party/mindspore/mindspore/ccsrc/pybind_api/ir/ |
D | tensor_py.cc | 342 auto host_shape = tensor.shape_c(); in FlushFromCache() local 344 if (host_shape.size() != 2 && host_shape.size() != 2 && host_shape[1] != cache_shape[1]) { in FlushFromCache() 346 << "host shape:" << host_shape << ", cache shape:" << cache_shape; in FlushFromCache() 353 … host_data_max_size, cache_data_max_size, hashmap_size, host_shape[1]); in FlushFromCache() 356 … host_data_max_size, cache_data_max_size, hashmap_size, host_shape[1]); in FlushFromCache()
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/cpu/ |
D | cpu_device_address.cc | 40 …ess::DumpMemToFile(const std::string &filepath, const std::string &, const ShapeVector &host_shape, in DumpMemToFile() argument 50 ret = DumpJsonParser::DumpToFile(path, ptr_, size_, host_shape, host_type); in DumpMemToFile()
|
D | cpu_device_address.h | 42 …pMemToFile(const std::string &filepath, const std::string &host_fmt, const ShapeVector &host_shape,
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/ |
D | device_address.h | 87 …pMemToFile(const std::string &filepath, const std::string &host_fmt, const ShapeVector &host_shape, in DumpMemToFile() argument 93 … const ShapeVector &host_shape, TypeId host_type, size_t slot, bool keep_prev) const { in LoadMemToHost() argument
|
/third_party/mindspore/mindspore/ccsrc/debug/ |
D | tensor_load.h | 198 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type, in DumpTensorToFile() argument 219 return DumpJsonParser::DumpToFile(path, node->GetDataPtr(), host_size, host_shape, host_type); in DumpTensorToFile()
|
D | debug_services.h | 407 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type,
|
D | debug_services.cc | 1256 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, in DumpTensorToFile() argument 1259 …return tensor_loader_->DumpTensorToFile(tensor_name, trans_flag, filepath, host_fmt, host_shape, h… in DumpTensorToFile()
|
/third_party/mindspore/mindspore/ccsrc/runtime/framework/actor/ |
D | switch_actor.cc | 292 ShapeVector host_shape; in GetIndex() local 293 if (!device_tensor->SyncDeviceToHost(host_shape, size, type_id, static_cast<void *>(buf))) { in GetIndex()
|
/third_party/mindspore/mindspore/ccsrc/debug/debugger/ |
D | debugger.h | 106 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type,
|
D | debugger.cc | 1154 … const std::string &host_fmt, const std::vector<int64_t> &host_shape, TypeId host_type, in DumpTensorToFile() argument 1156 …rvices_.get()->DumpTensorToFile(tensor_name, trans_flag, filepath, host_fmt, host_shape, host_type, in DumpTensorToFile()
|