/third_party/mindspore/mindspore/parallel/ |
D | _cell_wrapper.py | 18 from mindspore.ops.operations.comm_ops import AllGather 32 self.allgather = AllGather(group) 51 self.allgather1 = AllGather(group) 52 self.allgather2 = AllGather()
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/nccl/ |
D | nccl_collective_gpu_kernel.cc | 32 …AllGather, KernelAttr().AddAllSameAttr(true).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumbe… 35 …AllGather, KernelAttr().AddAllSameAttr(true).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumbe… 37 MS_REG_GPU_KERNEL_ONE(AllGather,
|
D | nccl_gpu_kernel.h | 36 typedef ncclResult_t (*AllGather)(const void *, void *, size_t, ncclDataType_t, cudaStream_t, const… typedef
|
D | nccl_collective_gpu_kernel.h | 174 …auto all_gather_funcptr = reinterpret_cast<AllGather>(dlsym(const_cast<void *>(collective_handle_)… in LaunchAllGather()
|
D | sync_batch_norm_gpu_kernel.h | 211 …auto all_gather_funcptr = reinterpret_cast<AllGather>(dlsym(const_cast<void *>(collective_handle_)… in LaunchAllGather()
|
/third_party/mindspore/tests/ut/python/communication/ |
D | test_comm.py | 28 from mindspore.ops.operations.comm_ops import AllReduce, AllGather, AlltoAll, ReduceOp, ReduceScatt… 81 self.allgather = AllGather(group=HCCL_WORLD_COMM_GROUP) 83 self.allgather = AllGather(group=NCCL_WORLD_COMM_GROUP) 85 self.allgather = AllGather()
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/gpu/distribution/ |
D | collective_wrapper.cc | 42 ncclResult_t AllGather(const void *input_addr, void *output_addr, size_t count, ncclDataType_t data… in AllGather() function 44 …return NCCLWrapper::instance().AllGather(input_addr, output_addr, count, data_type, stream, group); in AllGather()
|
D | nccl_wrapper.h | 40 …ncclResult_t AllGather(const void *input_addr, void *output_addr, size_t count, ncclDataType_t dat…
|
D | collective_wrapper.h | 42 extern "C" EXPORT_WRAPPER ncclResult_t AllGather(const void *input_addr, void *output_addr, size_t …
|
D | nccl_wrapper.cc | 57 ncclResult_t NCCLWrapper::AllGather(const void *input_addr, void *output_addr, size_t count, ncclDa… in AllGather() function in mindspore::device::gpu::NCCLWrapper
|
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/hccl/ |
D | hcom_all_gather.h | 37 MS_HCCL_REG_KERNEL(AllGather, HcomAllGatherKernel);
|
/third_party/mindspore/mindspore/ops/_grad/ |
D | grad_comm_ops.py | 25 from ..operations.comm_ops import (AllGather, _MiniStepAllGather, _HostAllGather, AllReduce, Neighb… 38 all_gather = AllGather(group=self.group) 213 @bprop_getters.register(AllGather) 312 reduce_scatter_grad = AllGather(self.group) 403 all_gather = AllGather(group=group)
|
/third_party/mindspore/mindspore/ccsrc/runtime/device/cpu/mpi/ |
D | mpi_adapter.h | 42 …FUNC_EXPORT bool AllGather(const float *input, float *output, const std::vector<int> &ranks_group,…
|
D | mpi_export.cc | 60 return inst->AllGather(input, output, ranks_group, data_num); in MPIAllGather()
|
D | mpi_adapter.cc | 250 bool MPIAdapter::AllGather(const float *input, float *output, const std::vector<int> &ranks_group, … in AllGather() function in mindspore::device::cpu::MPIAdapter
|
/third_party/mindspore/tests/st/nccl/ |
D | test_nccl_all_gather_op.py | 36 self.all_gather = P.AllGather(group=NCCL_WORLD_COMM_GROUP)
|
/third_party/mindspore/mindspore/nn/wrap/ |
D | grad_reducer.py | 21 from mindspore.ops.operations.comm_ops import AllReduce, AllGather 390 self.allgather = AllGather(group)
|
/third_party/mindspore/mindspore/ops/operations/ |
D | __init__.py | 38 from .comm_ops import (AllGather, AllReduce, NeighborExchange, AlltoAll, AllSwap, ReduceScatter, Br…
|
D | comm_ops.py | 169 class AllGather(PrimitiveWithInfer): class
|
/third_party/mindspore/ |
D | RELEASE.md | 431 - Fix AllGather op select problem when the shape is not divisible by 16. ([!18878](https://gitee.co… 712 - [STABLE] Support AllGather and ReduceScatter fusion.(Ascend) 3082 - Support AllReduce, AllGather, and BroadCast collective communication.
|