/third_party/mindspore/mindspore/parallel/ |
D | _tensor.py | 22 def _get_tensor_strategy(dev_mat, tensor_map): argument 38 tensor_strategy.append(dev_mat[-dim-1]) 172 def _get_slice_index(dev_mat, tensor_map): argument 184 tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map) 185 tensor_slice_index = _get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, rank) 189 def _load_tensor(tensor, dev_mat, tensor_map): argument 208 tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map) 209 tensor_slice_index = _get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, rank) 235 dev_mat = layout[0] 243 tensor_slice = _load_tensor(tensor, dev_mat, tensor_map) [all …]
|
D | _utils.py | 304 dev_mat = tensor_layout[0][:] 306 for dim in range(len(dev_mat)): 308 dev_mat[-1-dim] = 1 309 new_tensor_layout[0] = dev_mat
|
/third_party/mindspore/tests/ut/python/parallel/ |
D | test_combined_tensor.py | 21 dev_mat = [2, 2] 24 tensor = _reshape_param_data(input_tensor, dev_mat, tensor_map) 30 tensor = _reshape_param_data(input_tensor, dev_mat, tensor_map) 46 dev_mat = [4] 48 tensor = _reshape_param_data(input_tensor, dev_mat, tensor_map)
|
D | test_load_tensor.py | 24 dev_mat = [2, 3] 27 tensor_slice = _load_tensor(tensor, dev_mat, tensor_map) 33 tensor_slice = _load_tensor(tensor, dev_mat, tensor_map)
|
/third_party/mindspore/tests/ut/cpp/parallel/tensor_layout/ |
D | redistribution_operator_infer_test.cc | 73 Shape dev_mat; in InferOperatorCheckAll() local 74 for (uint32_t i = 0; i < dim_len; i++) dev_mat.push_back(2); in InferOperatorCheckAll() 75 Shape tensor_shape = dev_mat; in InferOperatorCheckAll() 76 GenerateValidTensorMap(dev_mat, tensor_shape, &tensor_map_list); in InferOperatorCheckAll() 80 in_dev_mat.Init(dev_mat); in InferOperatorCheckAll()
|
D | construct_operator_test.cc | 85 Arrangement dev_mat; in TEST_F() local 86 dev_mat.Init(device_arrangement); in TEST_F() 94 tensor_layout.Init(dev_mat, tensor_map, tensor_shape); in TEST_F()
|
/third_party/mindspore/mindspore/train/ |
D | serialization.py | 634 dev_mat = layout[0] 666 param_data = _reshape_param_data(param_data, dev_mat, tensor_map) 1110 dev_mat = list(layout.dev_matrix[0].dim) 1118 for dim in dev_mat: 1133 merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, field_size) 1135 merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map) 1138 tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map) 1151 slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i)) 1495 dev_mat = list(layout.dev_matrix[0].dim) 1501 …train_map[param_name] = [dev_mat, tensor_map, param_split_shape, field_size, shard_stride, shard_s…
|
/third_party/mindspore/mindspore/ops/operations/ |
D | comm_ops.py | 927 def infer_value(self, x, dev_mat, tensor_map): argument 929 validator.check_value_type("dev_mat", dev_mat, [tuple], self.name) 931 return Tensor(_load_tensor(x, dev_mat, tensor_map))
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/tensor_layout/ |
D | tensor_layout.cc | 409 Shape dev_mat(device_arrangement_origin_.array()); in TransferRepeatLayout() local 413 repeat.InitFromVector(dev_mat, tensor_map, tensor_shape); in TransferRepeatLayout()
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ |
D | parameter_manager.cc | 583 auto dev_mat = tensor_layout->device_arrangement().array(); in HandleAdaFactorOpt() local 601 if (new_tensor_layout.InitFromVector(dev_mat, tensor_map, origin_shape) != SUCCESS) { in HandleAdaFactorOpt()
|
/third_party/mindspore/mindspore/ops/_grad/ |
D | grad_comm_ops.py | 534 def bprop(x, dev_mat, tensor_map, out, dout): argument
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ops_info/ |
D | operator_info.cc | 588 Shape dev_mat = {repeated_size, device_num / repeated_size}; in CreateGroupForOptShard() local 589 DeviceMatrix temp_dev_matrix(rank, stage_device_list_, dev_mat); in CreateGroupForOptShard()
|