/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ops_info/ |
D | get_next_info.cc | 33 auto slice_dim_iter = std::find(dev_matrix_shape_.begin(), dev_matrix_shape_.end(), shard_num_); in InferTensorMap() 34 if (slice_dim_iter == dev_matrix_shape_.end()) { in InferTensorMap() 38 size_t slice_dim = size_t(slice_dim_iter - dev_matrix_shape_.begin()); in InferTensorMap() 45 tensor_map_index.push_back(dev_matrix_shape_.size() - 1 - slice_dim); in InferTensorMap() 63 …if (output_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[i], outputs_shape_[i]) != … in InferTensorLayout() 92 dev_matrix_shape_ = *dev_matrix_iter; in InferDevMatrixShape() 94 auto shard_num_iter = std::max_element(dev_matrix_shape_.begin(), dev_matrix_shape_.end()); in InferDevMatrixShape() 95 if (shard_num_iter != dev_matrix_shape_.end()) { in InferDevMatrixShape()
|
D | gather_v2_info.cc | 109 dev_matrix_shape_ = stra.at(0); in InferDevMatrixShape() 188 …if ((input_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_.at(0), input_shape) … in InferTensorInfo() 189 …(input_index_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_.at(1), input_index_shape)… in InferTensorInfo() 190 …(output_tensor_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_.at(0), output_shape) !… in InferTensorInfo() 228 for (size_t i = LongToSize(axis_) + 1; i < dev_matrix_shape_.size(); i++) { in InferTensorSubOps() 229 mod_n *= dev_matrix_shape_.at(i); in InferTensorSubOps() 231 if ((axis_ >= SizeToLong(dev_matrix_shape_.size())) || axis_ < 0) { in InferTensorSubOps() 232 MS_LOG(ERROR) << "Axis is " << axis_ << ", not in [0, " << dev_matrix_shape_.size() << ")."; in InferTensorSubOps() 234 int64_t mod_p = mod_n * dev_matrix_shape_.at(LongToSize(axis_)); in InferTensorSubOps() 246 …int64_t sub_value = inputs_shape_[0][LongToSize(axis_)] / dev_matrix_shape_[LongToSize(axis_)] * m… in InferTensorSubOps()
|
D | virtual_dataset_info.cc | 88 dev_matrix_shape_ = stra[max_size_strategy_dim_]; in InferDevMatrixShape() 97 auto slice_dim_iter = std::find(dev_matrix_shape_.begin(), dev_matrix_shape_.end(), shard_num_); in InferTensorMap() 98 if (slice_dim_iter == dev_matrix_shape_.end()) { in InferTensorMap() 102 size_t slice_dim = size_t(slice_dim_iter - dev_matrix_shape_.begin()); in InferTensorMap() 110 tensor_map_index.push_back(dev_matrix_shape_.size() - 1 - slice_dim); in InferTensorMap()
|
D | matmul_info.cc | 211 SetDevMatrixShape(mat_a_strategy, mat_b_strategy, transpose_b_, &dev_matrix_shape_); in InferDevMatrixShape() 212 origin_dev_matrix_shape_ = dev_matrix_shape_; in InferDevMatrixShape() 249 size_t size = dev_matrix_shape_.size(); in InferTensorMap() 252 size = dev_matrix_shape_.size() - 1; in InferTensorMap() 292 if (dev_matrix_shape_.size() != 3) { in InferTensorMap() 297 } else if (outputs_shape_[0][0] % (dev_matrix_shape_[0] * dev_matrix_shape_[1]) != 0) { in InferTensorMap() 317 if (dev_matrix_shape_.size() != 3) { in InferTensorLayout() 321 output_dev_matrix_shape = {dev_matrix_shape_[0] * dev_matrix_shape_[1], dev_matrix_shape_[2]}; in InferTensorLayout() 323 output_dev_matrix_shape = dev_matrix_shape_; in InferTensorLayout() 327 …if ((mat_a_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], inputs_shape_[0]) != SU… in InferTensorLayout() [all …]
|
D | tensordot_info.cc | 156 dev_matrix_shape_ = input_a_strategy; in InferDevMatrixShape() 158 dev_matrix_shape_.push_back(input_b_strategy[i]); in InferDevMatrixShape() 161 dev_matrix_shape_ = input_a_strategy; in InferDevMatrixShape() 172 dev_matrix_shape_.push_back(input_b_strategy[i]); in InferDevMatrixShape() 180 MS_LOG(INFO) << name_ << ": The dev matrix is " << ShapeToString(dev_matrix_shape_); in InferDevMatrixShape() 193 forward_group_map.push_back(dev_matrix_shape_.size() - 1); in InferForwardCommunication() 278 size_t size = dev_matrix_shape_.size(); in InferTensorMap() 281 size = dev_matrix_shape_.size() - 1; in InferTensorMap()
|
D | reduce_method_info.cc | 38 dev_matrix_shape_ = input_strategy; in InferDevMatrixShape() 164 if ((dev_matrix_shape_.size() > size) && !repeated_num_in_dev_matrix_right_) { in InferForwardCommunication() 165 group_creat_map.push_back(SizeToInt(dev_matrix_shape_.size() - size_t(1))); in InferForwardCommunication() 240 if ((dev_matrix_shape_.size() > size) && !repeated_num_in_dev_matrix_right_) { in InferForwardCommunication() 241 group_creat_map.push_back(SizeToInt(dev_matrix_shape_.size() - size_t(1))); in InferForwardCommunication() 365 …if ((input_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], input_shape) != … in InferTensorInfo() 366 …(output_tensor_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[0], output_shape) != S… in InferTensorInfo() 494 …if ((input_tensor_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[0], input_shape) != … in InferTensorInfo() 495 …(output_tensor_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[0], output_shape) != S… in InferTensorInfo() 523 as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[0]); in InferAsLossDivisor() [all …]
|
D | gather_v2_p_info.cc | 407 dev_matrix_shape_.clear(); in InferDevMatrixShape() 414 dev_matrix_shape_ = param_strategy; in InferDevMatrixShape() 415 out_dev_matrix_shape_ = dev_matrix_shape_; in InferDevMatrixShape() 420 dev_matrix_shape_ = {index_strategy[0], param_strategy[0]}; in InferDevMatrixShape() 422 out_dev_matrix_shape_ = dev_matrix_shape_; in InferDevMatrixShape() 423 MS_LOG(INFO) << name_ << ": Sharding batch and axis, the dev matrix is " << dev_matrix_shape_ in InferDevMatrixShape() 428 dev_matrix_shape_ = param_strategy; in InferDevMatrixShape() 432 dev_matrix_shape_.insert(dev_matrix_shape_.end(), index_strategy.begin(), index_strategy.end()); in InferDevMatrixShape() 447 out_dev_matrix_shape_ = dev_matrix_shape_; in InferDevMatrixShape() 572 input_shape[0] = param_split_shapes_[LongToSize(rank / dev_matrix_shape_[1])]; in InferTensorInfo() [all …]
|
D | reluv2_info.cc | 71 dev_matrix_shape_ = input_strategy; in InferDevMatrixShape() 117 as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[0]); in InferAsLossDivisor() 118 MS_LOG(INFO) << name_ << ": the dev matrix shape is " << ShapeToString(dev_matrix_shape_) in InferAsLossDivisor()
|
D | matmul_dds_info.cc | 116 dev_matrix_shape_ = input_strategy; in InferDevMatrixShape() 117 dev_matrix_shape_.push_back(1); in InferDevMatrixShape() 118 dev_matrix_shape_.push_back(1); in InferDevMatrixShape() 119 dev_matrix_shape_.push_back(1); in InferDevMatrixShape() 120 dev_matrix_shape_origin_ = dev_matrix_shape_; in InferDevMatrixShape()
|
D | loss_info.cc | 75 dev_matrix_shape_ = input_strategy; in InferDevMatrixShape() 101 as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[1]); in InferAsLossDivisor() 102 MS_LOG(INFO) << name_ << " : The dev matrix shape is " << ShapeToString(dev_matrix_shape_) in InferAsLossDivisor()
|
D | topk_info.cc | 60 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape() 98 as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[0]); in InferAsLossDivisor() 100 std::string dev_matrix_shape_str = ShapeToString(dev_matrix_shape_); in InferAsLossDivisor()
|
D | onehot_info.cc | 79 dev_matrix_shape_.push_back(input_strategy[1]); // the depth is un-splittable in InferDevMatrixShape() 80 dev_matrix_shape_.push_back(input_strategy[0]); // the features is splittable in InferDevMatrixShape() 83 dev_matrix_shape_.push_back(input_stra); in InferDevMatrixShape() 86 old_dev_matrix_back_ = dev_matrix_shape_.back(); in InferDevMatrixShape()
|
D | split_info.cc | 111 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape() 193 as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[0]); in InferAsLossDivisor() 194 MS_LOG(INFO) << name_ << ": the dev matrix shape is " << ShapeToString(dev_matrix_shape_) in InferAsLossDivisor()
|
D | batchnorm_info.cc | 106 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape() 243 as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[0]); in InferAsLossDivisor() 244 MS_LOG(INFO) << name_ << " : The dev matrix shape is " << ShapeToString(dev_matrix_shape_) in InferAsLossDivisor()
|
D | gatherd_info.cc | 95 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape() 120 …if (input_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[i], inputs_shape_[i]) != SUC… in InferTensorInfo() 133 …if (output_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[i], outputs_shape_[i]) != … in InferTensorInfo()
|
D | layer_norm_info.cc | 119 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape() 163 as_loss_divisor_ = ComputeRepeatDeviceNumByTensorMap(dev_matrix_shape_, outputs_tensor_map_[0]); in InferAsLossDivisor() 164 MS_LOG(INFO) << name_ << " : The dev matrix shape is " << ShapeToString(dev_matrix_shape_) in InferAsLossDivisor()
|
D | operator_info.cc | 141 dev_matrix_shape_.clear(); in ResetQueueMember() 210 …if (input_layout.InitFromVector(dev_matrix_shape_, inputs_tensor_map_[i], inputs_shape_[i]) != SUC… in InferTensorInfo() 220 …if (output_layout.InitFromVector(dev_matrix_shape_, outputs_tensor_map_[i], outputs_shape_[i]) != … in InferTensorInfo() 234 …std::accumulate(dev_matrix_shape_.begin(), dev_matrix_shape_.end(), 1, std::multiplies<int64_t>()); in InferRepeatedCalcInfo() 262 dev_matrix_shape_.push_back(repeated_calc_num_); in SetRepeatedCalcDevMatrix() 264 (void)dev_matrix_shape_.insert(dev_matrix_shape_.begin(), repeated_calc_num_); in SetRepeatedCalcDevMatrix() 531 DeviceMatrix dev_matrix(rank, stage_device_list_, dev_matrix_shape_); in CreateGroupByTensorMap() 554 DeviceMatrix dev_matrix(rank, stage_device_list_, dev_matrix_shape_); in CreateGroupForOptShard() 625 DeviceMatrix dev_matrix(rank, stage_device_list_, dev_matrix_shape_); in CreateGroupByDim() 747 …((int64_t)(std::accumulate(dev_matrix_shape_.begin(), dev_matrix_shape_.end(), 1, std::multiplies<… in InitForCostModelWithAutoRepeatCalc() [all …]
|
D | reshape_info.cc | 42 dev_matrix_shape_ = stra.at(0); in InferDevMatrixShape() 198 Status status = dev_matrix.Init(dev_matrix_shape_); in InferTensorLayout() 348 dev_matrix_shape_ = input_layout_.device_arrangement().array(); in Init() 354 dev_matrix_shape_ = output_layout_.device_arrangement().array(); in Init()
|
D | tile_info.cc | 96 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape() 103 slice_multiples_[i] = slice_multiples_[i] / dev_matrix_shape_[i]; in InferDevMatrixShape()
|
D | tmp_identity_info.cc | 34 dev_matrix_shape_ = input_strategy; in InferDevMatrixShape()
|
D | conv2d_info.cc | 348 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape() 349 dev_matrix_shape_.push_back(stra[1][0]); in InferDevMatrixShape() 377 DeviceMatrix dev_matrix(rank, stage_device_list_, dev_matrix_shape_); in InferRankBias() 508 if (dev_matrix_shape_[relevant_dim_index] == MIN_SLICE_NUM) { in InferForwardCommunication() 965 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape() 966 dev_matrix_shape_.push_back(stra[1][1]); in InferDevMatrixShape()
|
D | range_info.cc | 69 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape()
|
D | prelu_info.cc | 60 dev_matrix_shape_ = input_strategy; in InferDevMatrixShape()
|
D | resizebilinear_info.cc | 88 dev_matrix_shape_ = stra[0]; in InferDevMatrixShape()
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/tensor_layout/ |
D | construct_operator.h | 53 Shape dev_matrix_shape_; variable
|