Home
last modified time | relevance | path

Searched refs:dev_mat (Results 1 – 12 of 12) sorted by relevance

/third_party/mindspore/mindspore/parallel/
D_tensor.py22 def _get_tensor_strategy(dev_mat, tensor_map): argument
38 tensor_strategy.append(dev_mat[-dim-1])
172 def _get_slice_index(dev_mat, tensor_map): argument
184 tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
185 tensor_slice_index = _get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, rank)
189 def _load_tensor(tensor, dev_mat, tensor_map): argument
208 tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
209 tensor_slice_index = _get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, rank)
235 dev_mat = layout[0]
243 tensor_slice = _load_tensor(tensor, dev_mat, tensor_map)
[all …]
D_utils.py304 dev_mat = tensor_layout[0][:]
306 for dim in range(len(dev_mat)):
308 dev_mat[-1-dim] = 1
309 new_tensor_layout[0] = dev_mat
/third_party/mindspore/tests/ut/python/parallel/
Dtest_combined_tensor.py21 dev_mat = [2, 2]
24 tensor = _reshape_param_data(input_tensor, dev_mat, tensor_map)
30 tensor = _reshape_param_data(input_tensor, dev_mat, tensor_map)
46 dev_mat = [4]
48 tensor = _reshape_param_data(input_tensor, dev_mat, tensor_map)
Dtest_load_tensor.py24 dev_mat = [2, 3]
27 tensor_slice = _load_tensor(tensor, dev_mat, tensor_map)
33 tensor_slice = _load_tensor(tensor, dev_mat, tensor_map)
/third_party/mindspore/tests/ut/cpp/parallel/tensor_layout/
Dredistribution_operator_infer_test.cc73 Shape dev_mat; in InferOperatorCheckAll() local
74 for (uint32_t i = 0; i < dim_len; i++) dev_mat.push_back(2); in InferOperatorCheckAll()
75 Shape tensor_shape = dev_mat; in InferOperatorCheckAll()
76 GenerateValidTensorMap(dev_mat, tensor_shape, &tensor_map_list); in InferOperatorCheckAll()
80 in_dev_mat.Init(dev_mat); in InferOperatorCheckAll()
Dconstruct_operator_test.cc85 Arrangement dev_mat; in TEST_F() local
86 dev_mat.Init(device_arrangement); in TEST_F()
94 tensor_layout.Init(dev_mat, tensor_map, tensor_shape); in TEST_F()
/third_party/mindspore/mindspore/train/
Dserialization.py634 dev_mat = layout[0]
666 param_data = _reshape_param_data(param_data, dev_mat, tensor_map)
1110 dev_mat = list(layout.dev_matrix[0].dim)
1118 for dim in dev_mat:
1133 merged_tensor = _reshape_param_data_with_weight(all_gather_tensor, dev_mat, field_size)
1135 merged_tensor = _reshape_param_data(all_gather_tensor, dev_mat, tensor_map)
1138 tensor_strategy = _get_tensor_strategy(dev_mat, tensor_map)
1151 slice_index = int(_get_tensor_slice_index(dev_mat, tensor_strategy, tensor_map, i))
1495 dev_mat = list(layout.dev_matrix[0].dim)
1501 …train_map[param_name] = [dev_mat, tensor_map, param_split_shape, field_size, shard_stride, shard_s…
/third_party/mindspore/mindspore/ops/operations/
Dcomm_ops.py927 def infer_value(self, x, dev_mat, tensor_map): argument
929 validator.check_value_type("dev_mat", dev_mat, [tuple], self.name)
931 return Tensor(_load_tensor(x, dev_mat, tensor_map))
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/tensor_layout/
Dtensor_layout.cc409 Shape dev_mat(device_arrangement_origin_.array()); in TransferRepeatLayout() local
413 repeat.InitFromVector(dev_mat, tensor_map, tensor_shape); in TransferRepeatLayout()
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/
Dparameter_manager.cc583 auto dev_mat = tensor_layout->device_arrangement().array(); in HandleAdaFactorOpt() local
601 if (new_tensor_layout.InitFromVector(dev_mat, tensor_map, origin_shape) != SUCCESS) { in HandleAdaFactorOpt()
/third_party/mindspore/mindspore/ops/_grad/
Dgrad_comm_ops.py534 def bprop(x, dev_mat, tensor_map, out, dout): argument
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ops_info/
Doperator_info.cc588 Shape dev_mat = {repeated_size, device_num / repeated_size}; in CreateGroupForOptShard() local
589 DeviceMatrix temp_dev_matrix(rank, stage_device_list_, dev_mat); in CreateGroupForOptShard()