Searched refs:allgather (Results 1 – 4 of 4) sorted by relevance
/third_party/mindspore/mindspore/nn/wrap/ |
D | grad_reducer.py | 83 def _tensors_allreduce(degree, mean, allgather, allreduce, allreduce_filter, grad): argument 130 def _tensors_allreduce_ps(degree, mean, allgather, allreduce, allreduce_filter, grad, ps_parameter): argument 158 def _tensors_allreduce_with_sparse(degree, mean, allgather, allreduce, allreduce_filter, grad): argument 175 indices = allgather(grad.indices) 176 dout = allgather(grad.values) 184 def _tensors_allreduce_with_sparse_ps(degree, mean, allgather, allreduce, allreduce_filter, grad, p… argument 205 indices = allgather(grad.indices) 206 dout = allgather(grad.values) 390 self.allgather = AllGather(group) 414 new_grad = self.map_(F.partial(reduce_opt, self.degree, self.mean, self.allgather), [all …]
|
/third_party/mindspore/mindspore/parallel/ |
D | _cell_wrapper.py | 32 self.allgather = AllGather(group) 35 x = self.allgather(x)
|
/third_party/mindspore/tests/ut/python/communication/ |
D | test_comm.py | 81 self.allgather = AllGather(group=HCCL_WORLD_COMM_GROUP) 83 self.allgather = AllGather(group=NCCL_WORLD_COMM_GROUP) 85 self.allgather = AllGather() 91 x = self.allgather(x)
|
/third_party/mindspore/mindspore/ccsrc/frontend/parallel/ |
D | step_parallel.cc | 1530 CNodePtr allgather; in InsertAllGatherOp() local 1541 …allgather = ReplaceNode(op, cast_node, graph, PARALLEL_OPTIMIZER_ALLGATHER_NOT_COMPUTE, param_name… in InsertAllGatherOp() 1546 allgather = cnode->input(IntToSize(res.second))->cast<CNodePtr>(); in InsertAllGatherOp() 1550 AddCommOpFusionType(allgather, node); in InsertAllGatherOp() 1552 AddCommOpMeanFlag(allgather); in InsertAllGatherOp()
|