Home
last modified time | relevance | path

Searched refs:AllGather (Results 1 – 20 of 20) sorted by relevance

/third_party/mindspore/mindspore/parallel/
D_cell_wrapper.py18 from mindspore.ops.operations.comm_ops import AllGather
32 self.allgather = AllGather(group)
51 self.allgather1 = AllGather(group)
52 self.allgather2 = AllGather()
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/gpu/nccl/
Dnccl_collective_gpu_kernel.cc32AllGather, KernelAttr().AddAllSameAttr(true).AddInputAttr(kNumberTypeFloat32).AddOutputAttr(kNumbe…
35AllGather, KernelAttr().AddAllSameAttr(true).AddInputAttr(kNumberTypeFloat16).AddOutputAttr(kNumbe…
37 MS_REG_GPU_KERNEL_ONE(AllGather,
Dnccl_gpu_kernel.h36 typedef ncclResult_t (*AllGather)(const void *, void *, size_t, ncclDataType_t, cudaStream_t, const… typedef
Dnccl_collective_gpu_kernel.h174 …auto all_gather_funcptr = reinterpret_cast<AllGather>(dlsym(const_cast<void *>(collective_handle_)… in LaunchAllGather()
Dsync_batch_norm_gpu_kernel.h211 …auto all_gather_funcptr = reinterpret_cast<AllGather>(dlsym(const_cast<void *>(collective_handle_)… in LaunchAllGather()
/third_party/mindspore/tests/ut/python/communication/
Dtest_comm.py28 from mindspore.ops.operations.comm_ops import AllReduce, AllGather, AlltoAll, ReduceOp, ReduceScatt…
81 self.allgather = AllGather(group=HCCL_WORLD_COMM_GROUP)
83 self.allgather = AllGather(group=NCCL_WORLD_COMM_GROUP)
85 self.allgather = AllGather()
/third_party/mindspore/mindspore/ccsrc/runtime/device/gpu/distribution/
Dcollective_wrapper.cc42 ncclResult_t AllGather(const void *input_addr, void *output_addr, size_t count, ncclDataType_t data… in AllGather() function
44 …return NCCLWrapper::instance().AllGather(input_addr, output_addr, count, data_type, stream, group); in AllGather()
Dnccl_wrapper.h40 …ncclResult_t AllGather(const void *input_addr, void *output_addr, size_t count, ncclDataType_t dat…
Dcollective_wrapper.h42 extern "C" EXPORT_WRAPPER ncclResult_t AllGather(const void *input_addr, void *output_addr, size_t …
Dnccl_wrapper.cc57 ncclResult_t NCCLWrapper::AllGather(const void *input_addr, void *output_addr, size_t count, ncclDa… in AllGather() function in mindspore::device::gpu::NCCLWrapper
/third_party/mindspore/mindspore/ccsrc/backend/kernel_compiler/hccl/
Dhcom_all_gather.h37 MS_HCCL_REG_KERNEL(AllGather, HcomAllGatherKernel);
/third_party/mindspore/mindspore/ops/_grad/
Dgrad_comm_ops.py25 from ..operations.comm_ops import (AllGather, _MiniStepAllGather, _HostAllGather, AllReduce, Neighb…
38 all_gather = AllGather(group=self.group)
213 @bprop_getters.register(AllGather)
312 reduce_scatter_grad = AllGather(self.group)
403 all_gather = AllGather(group=group)
/third_party/mindspore/mindspore/ccsrc/runtime/device/cpu/mpi/
Dmpi_adapter.h42 …FUNC_EXPORT bool AllGather(const float *input, float *output, const std::vector<int> &ranks_group,…
Dmpi_export.cc60 return inst->AllGather(input, output, ranks_group, data_num); in MPIAllGather()
Dmpi_adapter.cc250 bool MPIAdapter::AllGather(const float *input, float *output, const std::vector<int> &ranks_group, … in AllGather() function in mindspore::device::cpu::MPIAdapter
/third_party/mindspore/tests/st/nccl/
Dtest_nccl_all_gather_op.py36 self.all_gather = P.AllGather(group=NCCL_WORLD_COMM_GROUP)
/third_party/mindspore/mindspore/nn/wrap/
Dgrad_reducer.py21 from mindspore.ops.operations.comm_ops import AllReduce, AllGather
390 self.allgather = AllGather(group)
/third_party/mindspore/mindspore/ops/operations/
D__init__.py38 from .comm_ops import (AllGather, AllReduce, NeighborExchange, AlltoAll, AllSwap, ReduceScatter, Br…
Dcomm_ops.py169 class AllGather(PrimitiveWithInfer): class
/third_party/mindspore/
DRELEASE.md431 - Fix AllGather op select problem when the shape is not divisible by 16. ([!18878](https://gitee.co…
712 - [STABLE] Support AllGather and ReduceScatter fusion.(Ascend)
3082 - Support AllReduce, AllGather, and BroadCast collective communication.