• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /**
2  * Copyright 2019-2020 Huawei Technologies Co., Ltd
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  * http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #ifndef MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NCCL_GPU_KERNEL_H_
18 #define MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NCCL_GPU_KERNEL_H_
19 
20 #include <nccl.h>
21 #include <map>
22 #include <string>
23 #include <vector>
24 #include "backend/kernel_compiler/gpu/gpu_kernel.h"
25 #include "backend/kernel_compiler/gpu/gpu_kernel_factory.h"
26 #include "backend/kernel_compiler/gpu/kernel_constants.h"
27 #include "runtime/device/gpu/distribution/collective_init.h"
28 
29 namespace mindspore {
30 namespace kernel {
31 static std::map<std::string, ncclDataType_t> kNcclDtypeMap = {
32   {"kNumberTypeFloat32", ncclFloat}, {"kNumberTypeFloat16", ncclHalf}, {"kNumberTypeInt32", ncclInt}};
33 
34 typedef ncclResult_t (*AllReduce)(const void *, void *, size_t, ncclDataType_t, ncclRedOp_t, cudaStream_t,
35                                   const std::string &);
36 typedef ncclResult_t (*AllGather)(const void *, void *, size_t, ncclDataType_t, cudaStream_t, const std::string &);
37 typedef ncclResult_t (*ReduceScatter)(const void *, void *, size_t, ncclDataType_t, ncclRedOp_t, cudaStream_t,
38                                       const std::string &);
39 typedef ncclResult_t (*Broadcast)(const void *, void *, size_t, ncclDataType_t, int, cudaStream_t, const std::string &);
40 typedef ncclResult_t (*Send)(const void *, size_t, ncclDataType_t, int, cudaStream_t, const std::string &);
41 typedef ncclResult_t (*Recv)(void *, size_t, ncclDataType_t, int, cudaStream_t, const std::string &);
42 typedef ncclResult_t (*GroupStart)();
43 typedef ncclResult_t (*GroupEnd)();
44 typedef std::vector<int> (*GetGroupRanks)(const std::string &);
45 
46 class NcclGpuKernel : public GpuKernel {
47  public:
NcclGpuKernel()48   NcclGpuKernel() : group_name_(""), nccl_data_type_(ncclHalf) {}
49   ~NcclGpuKernel() override = default;
50 
51  protected:
nccl_dtype(const TypeId & type_id)52   ncclDataType_t nccl_dtype(const TypeId &type_id) { return kNcclDtypeMap[TypeIdLabel(type_id)]; }
53 
54   std::string group_name_;
55   ncclDataType_t nccl_data_type_;
56 };
57 }  // namespace kernel
58 }  // namespace mindspore
59 
60 #endif  // MINDSPORE_CCSRC_BACKEND_KERNEL_COMPILER_GPU_NCCL_GPU_KERNEL_H_
61