/third_party/mindspore/mindspore/ccsrc/fl/server/ |
D | collective_ops_impl.cc | 40 uint32_t rank_size = server_num_; in RingAllReduce() local 41 size_t chunk_size = count / rank_size; in RingAllReduce() 42 size_t remainder_size = count % rank_size; in RingAllReduce() 43 std::vector<size_t> chunk_sizes(rank_size, chunk_size); in RingAllReduce() 50 for (size_t i = 0; i < rank_size; i++) { in RingAllReduce() 57 uint32_t send_to_rank = (local_rank_ + 1) % rank_size; in RingAllReduce() 58 uint32_t recv_from_rank = (local_rank_ - 1 + rank_size) % rank_size; in RingAllReduce() 59 …MS_LOG(DEBUG) << "AllReduce count:" << count << ", rank_size:" << rank_size << ", local_rank_:" <<… in RingAllReduce() 68 for (size_t i = 0; i < rank_size - 1; i++) { in RingAllReduce() 70 size_t send_chunk_index = (local_rank_ - i + rank_size) % rank_size; in RingAllReduce() [all …]
|
/third_party/mindspore/tests/st/model_zoo_tests/wide_and_deep/python_file_for_ci/ |
D | datasets.py | 34 line_per_sample=1000, rank_size=None, rank_id=None): argument 49 if rank_size is not None and rank_id is not None: 52 num_shards=rank_size, shard_id=rank_id, shard_equal_rows=True) 70 line_per_sample=1000, rank_size=None, rank_id=None): argument 90 if rank_size is not None and rank_id is not None: 93 num_shards=rank_size, shard_id=rank_id, shuffle=shuffle, 111 data_type=DataType.TFRECORD, line_per_sample=1000, rank_size=None, rank_id=None): argument 117 line_per_sample, rank_size=rank_size, rank_id=rank_id) 120 rank_size, rank_id)
|
/third_party/mindspore/tests/ut/cpp/device/ |
D | hccl_adapter_test.cc | 95 uint32_t rank_size = 2; in TEST_F() local 101 AllToAllvCalcParam calc(alltoall, rank_size); in TEST_F() 118 uint32_t rank_size = 2; in TEST_F() local 124 AllToAllvCalcParam calc(alltoall, rank_size); in TEST_F() 141 uint32_t rank_size = 4; in TEST_F() local 148 AllToAllvCalcParam calc(alltoall, rank_size); in TEST_F() 165 uint32_t rank_size = 4; in TEST_F() local 172 AllToAllvCalcParam calc(alltoall, rank_size); in TEST_F() 189 uint32_t rank_size = 2; in TEST_F() local 195 AllToAllvCalcParam calc(alltoall, rank_size); in TEST_F() [all …]
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/enhancer/ |
D | split_inputs_for_reduce_scatter.cc | 24 int64_t rank_size) const { in InsertSplitForInput() 28 size_t rank_size_t = LongToSize(rank_size); in InsertSplitForInput() 34 std::vector<TypeId> dtypes(rank_size, AnfAlgo::GetPrevNodeOutputInferDataType(node, i)); in InsertSplitForInput() 45 AnfAlgo::SetNodeAttr("num_split", MakeValue(rank_size), split); in InsertSplitForInput() 60 int64_t rank_size) const { in RearrangeInputsForReduceScatter() 65 size_t rank_size_t = LongToSize(rank_size); in RearrangeInputsForReduceScatter() 108 auto rank_size = AnfAlgo::GetNodeAttr<int64_t>(node, kAttrRankSize); in Process() local 109 std::vector<AnfNodePtr> split_outputs = InsertSplitForInput(func_graph, cnode, rank_size); in Process() 110 return RearrangeInputsForReduceScatter(func_graph, node, split_outputs, rank_size); in Process()
|
D | concat_outputs_for_all_gather.cc | 92 … const std::vector<AnfNodePtr> &new_tuple_getitems, int64_t rank_size) { in InsertConcatForOutput() argument 98 for (size_t j = 0, idx = i; j < LongToSize(rank_size); ++j, idx += inputs_size) { in InsertConcatForOutput() 107 shapes[0][0] *= LongToSize(rank_size); in InsertConcatForOutput() 110 AnfAlgo::SetNodeAttr(kAttrInputNums, MakeValue(rank_size), concat); in InsertConcatForOutput() 111 std::vector<int64_t> dyn_input_size{rank_size}; in InsertConcatForOutput() 150 auto rank_size = AnfAlgo::GetNodeAttr<int64_t>(node, kAttrRankSize); in Process() local 167 return InsertConcatForOutput(func_graph, node, output_info, new_outputs, rank_size); in Process()
|
D | split_inputs_for_reduce_scatter.h | 37 … const std::vector<AnfNodePtr> &inputs, int64_t rank_size) const; 39 int64_t rank_size) const;
|
/third_party/mindspore/mindspore/ccsrc/utils/ |
D | comm_manager.cc | 71 auto rank_size = rank_id_list.size(); in CreateGroupSync() local 82 hccl::HcclAdapter::GetInstance().HcclCreateGroup(group, UlongToUint(rank_size), in CreateGroupSync() 104 bool CommManager::GetRankSize(const string &group, unsigned int *rank_size) const { in GetRankSize() 110 *rank_size = static_cast<unsigned int>(HcclCollectiveGroup::instance().GetRankSize(group)); in GetRankSize() 113 hccl::HcclAdapter::GetInstance().HcclGetRankSize(group, rank_size)); in GetRankSize() 116 …CHECK(string("get rank size"), group, hccl::HcclAdapter::GetInstance().HcclGetRankSize(rank_size)); in GetRankSize() 164 bool CommManager::GetRankSize(const string &group, unsigned int *rank_size) const { 173 *rank_size = static_cast<unsigned int>(size); 174 MS_LOG(INFO) << "Group " << group << " size is " << *rank_size; 204 bool CommManager::GetRankSize(const string &group, unsigned int *rank_size) const { [all …]
|
/third_party/mindspore/tests/st/model_zoo_tests/DeepFM/src/ |
D | dataset.py | 181 line_per_sample=1000, rank_size=None, rank_id=None): argument 201 if rank_size is not None and rank_id is not None: 204 num_shards=rank_size, shard_id=rank_id, shuffle=shuffle, 222 line_per_sample=1000, rank_size=None, rank_id=None): argument 249 if rank_size is not None and rank_id is not None: 252 num_shards=rank_size, shard_id=rank_id, 271 rank_size=None, rank_id=None): argument 291 rank_size, rank_id) 294 line_per_sample, rank_size=rank_size, rank_id=rank_id) 296 if rank_size is not None and rank_size > 1:
|
/third_party/mindspore/mindspore/ops/operations/ |
D | comm_ops.py | 227 self.rank_size = get_group_size(_get_group(group)) 228 validator.check('rank', self.rank, 'rank_size', self.rank_size, Rel.LT, self.name) 229 self.add_prim_attr('rank_size', self.rank_size) 238 x_shape[0] = x_shape[0] * self.rank_size 264 self.rank_size = get_group_size(_get_group(group)) 265 validator.check('rank', self.rank, 'rank_size', self.rank_size, Rel.LT, self.name) 266 self.add_prim_attr('rank_size', self.rank_size) 275 x_shape[0] = x_shape[0] * self.rank_size 296 self.rank_size = get_group_size(_get_group(group)) 297 validator.check('rank', self.rank, 'rank_size', self.rank_size, Rel.LT, self.name) [all …]
|
/third_party/mindspore/tests/ut/python/hccl_test/manage/ |
D | api.py | 49 def rank_size(self): member in Hccl 52 @rank_size.setter 53 def rank_size(self, size): member in Hccl 85 def create_group(group, rank_size, rank_ids): argument
|
/third_party/mindspore/mindspore/ccsrc/runtime/hccl_adapter/ |
D | all_to_all_v_calc_param.cc | 37 AllToAllvCalcParam::AllToAllvCalcParam(const CNodeWeakPtr &cnode, uint32_t rank_size) in AllToAllvCalcParam() argument 39 rank_size_(rank_size), in AllToAllvCalcParam() 40 send_counts_(rank_size, 0), in AllToAllvCalcParam() 41 sdispls_(rank_size, 0), in AllToAllvCalcParam() 42 recv_counts_(rank_size, 0), in AllToAllvCalcParam() 43 rdispls_(rank_size, 0) {} in AllToAllvCalcParam()
|
/third_party/mindspore/tests/ut/cpp/utils/ |
D | comm_manager_test.cc | 32 unsigned int rank_size = 0; in TEST_F() local 33 ASSERT_TRUE(CommManager::GetInstance().GetRankSize(string("1-2-3"), &rank_size)); in TEST_F() 34 ASSERT_TRUE(CommManager::GetInstance().GetRankSize(string("hccl_world_group"), &rank_size)); in TEST_F()
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/gpu/ |
D | concat_outputs_for_all_gather.cc | 90 … const std::vector<AnfNodePtr> &new_tuple_getitems, int64_t rank_size) { in InsertConcatForOutput() argument 96 for (size_t j = 0, idx = i; j < LongToSize(rank_size); ++j, idx += inputs_size) { in InsertConcatForOutput() 105 shapes[0][0] *= rank_size; in InsertConcatForOutput() 108 AnfAlgo::SetNodeAttr(kAttrInputNums, MakeValue(rank_size), concat); in InsertConcatForOutput() 109 std::vector<int64_t> dyn_input_size{rank_size}; in InsertConcatForOutput() 143 auto rank_size = AnfAlgo::GetNodeAttr<int64_t>(node, kAttrRankSize); in Process() local 160 return InsertConcatForOutput(func_graph, node, output_info, new_outputs, rank_size); in Process()
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/ascend/mindir/ |
D | all_to_all_unify_mindir.cc | 49 uint32_t rank_size; in GetRankSize() local 50 auto hccl_ret = hccl::HcclAdapter::GetInstance().HcclGetRankSize(group, &rank_size); in GetRankSize() 54 return rank_size; in GetRankSize() 113 uint32_t rank_size = GetRankSize(group); in CreateAllToAllvNode() local 114 std::vector<int64_t> rank_ids(rank_size, 0); in CreateAllToAllvNode() 115 for (uint32_t i = 0; i < rank_size; ++i) { in CreateAllToAllvNode() 122 …OG(INFO) << "Create AllToAllv success, split count " << split_count << ", rank size " << rank_size; in CreateAllToAllvNode()
|
/third_party/mindspore/tests/st/mix_precision/ |
D | utils.py | 76 self.rank_size = 1 86 self.rank_size = get_group_size() 89 self.total_batch_size = self.rank_batch_size * self.rank_size 93 self.total_batch_data_size = (self.rank_size, self.rank_batch_size) + image_size 130 target = np.random.randint(0, self.num_classes, size=(self.rank_size, self.rank_batch_size))
|
/third_party/mindspore/tests/st/model_zoo_tests/DeepFM/ |
D | test_deepfm.py | 39 rank_size = None 49 rank_size=rank_size,
|
/third_party/mindspore/tests/st/tbe_networks/ |
D | test_resnet_cifar_8p.py | 45 def create_dataset(repeat_num=1, training=True, batch_size=32, rank_id=0, rank_size=1, argument 54 rank_size = rank_size 56 data_dir, num_shards=rank_size, shard_id=rank_id) 166 batch_size=batch_size, rank_id=device_id, rank_size=device_num,
|
/third_party/mindspore/tests/st/hcom/ |
D | hcom_sparsetensor.py | 45 self.rank_size = 1 58 self.rank_size = get_group_size() 60 self.total_batch_size = self.rank_batch_size * self.rank_size 62 self.total_batch_data_size = (self.rank_size, self.rank_batch_size) + image_size 98 target = np.random.randint(0, self.num_class, size=(self.rank_size, self.rank_batch_size))
|
/third_party/mindspore/mindspore/dataset/core/ |
D | config.py | 72 rank_size = 0 76 rank_size = int(env_rank_size.strip()) 80 if rank_size > 1:
|
/third_party/mindspore/mindspore/communication/ |
D | _comm_helper.py | 392 rank_size = len(rank_ids) 393 if rank_size < 1: 394 raise ValueError("Rank_ids size {} should be large than 0".format(rank_size)) 397 hccl.create_group(group, rank_size, rank_ids)
|
/third_party/mindspore/mindspore/nn/layer/ |
D | normalization.py | 98 self.rank_size = get_group_size() 99 self.device_list = [i for i in range(0, self.rank_size)] 106 self.rank_size = get_group_size() 109 self._check_rank_ids(self.process_groups, self.rank_size) 111 elif self.rank_size > 1: 113 self.group_device_num = self.rank_size 114 self.device_list = [i for i in range(0, self.rank_size)] 179 def _check_rank_ids(self, process_groups, rank_size): argument 182 …validator.check_int_range(rid, 0, rank_size, Rel.INC_LEFT, "rank id in process_groups", self.cls_n…
|
/third_party/mindspore/mindspore/ccsrc/transform/graph_ir/op_declare/ |
D | hcom_ops_declare.cc | 40 {"rank_size", ATTR_DESC(rank_size, AnyTraits<int64_t>())}}; 48 {"rank_size", ATTR_DESC(rank_size, AnyTraits<int64_t>())}};
|
/third_party/mindspore/mindspore/ccsrc/utils/context/ |
D | context_extends.cc | 68 uint32_t rank_size = 1; in OpenTsd() local 74 rank_size = 1; in OpenTsd() 80 rank_size = IntToUint(rank_env); in OpenTsd() 88 MS_LOG(INFO) << "Device id = " << device_id << ", rank size = " << rank_size << "."; in OpenTsd()
|
/third_party/mindspore/mindspore/ccsrc/backend/optimizer/pass/ |
D | communication_op_fusion.cc | 50 int64_t rank_size = 1; in GenerateKernelBuildInfo() local 52 rank_size = AnfAlgo::GetNodeAttr<int64_t>(cnode, kAttrRankSize); in GenerateKernelBuildInfo() 54 size_t rank_size_t = LongToSize(rank_size); in GenerateKernelBuildInfo() 337 int64_t rank_size = 1; in CreateFusedCommunicationOp() local 339 rank_size = AnfAlgo::GetNodeAttr<int64_t>(final_node, kAttrRankSize); in CreateFusedCommunicationOp() 341 size_t rank_size_t = LongToSize(rank_size); in CreateFusedCommunicationOp()
|
/third_party/mindspore/tests/st/auto_parallel/ |
D | optimizer_parallel.py | 94 self.rank_size = 1 104 self.rank_size = get_group_size() 107 self.total_batch_size = self.rank_batch_size * self.rank_size 111 self.total_batch_data_size = (self.rank_size, self.rank_batch_size) + image_size 148 target = np.random.randint(0, self.num_classes, size=(self.rank_size, self.rank_batch_size))
|