Home
last modified time | relevance | path

Searched refs:nccl (Results 1 – 25 of 27) sorted by relevance

12

/external/tensorflow/third_party/nccl/
Dsystem.BUILD.tpl7 name = "nccl",
9 hdrs = ["nccl.h"],
10 include_prefix = "third_party/nccl",
18 name = "nccl-files",
21 "nccl.h",
24 cp "%{nccl_header_dir}/nccl.h" "$(@D)/nccl.h" &&
Darchive.patch29 diff --git a/src/nccl.h.in b/src/nccl.h
31 rename from src/nccl.h.in
32 rename to src/nccl.h
34 --- a/src/nccl.h.in
35 +++ b/src/nccl.h
40 -#define NCCL_MAJOR ${nccl:Major}
41 -#define NCCL_MINOR ${nccl:Minor}
42 -#define NCCL_PATCH ${nccl:Patch}
43 -#define NCCL_SUFFIX "${nccl:Suffix}"
49 -#define NCCL_VERSION_CODE ${nccl:Version}
Darchive.BUILD19 "src/nccl.h",
62 "src/nccl.h",
78 name = "nccl",
90 # Files in src/ which #include "nccl.h" load it from there rather than
93 "src/nccl.h",
95 hdrs = ["src/nccl.h"],
96 include_prefix = "third_party/nccl",
Dnccl_configure.bzl44 name = "nccl",
57 name = "nccl",
58 actual = "@nccl_archive//:nccl",
64 return Label("//third_party/nccl:{}".format(file))
91 config = find_cuda_config(repository_ctx, find_cuda_config_path, ["nccl"])
Dbuild_defs.bzl.tpl45 …# https://github.com/NVIDIA/nccl/blob/f93fe9bfd94884cec2ba711897222e0df5569a53/makefiles/common.mk…
/external/tensorflow/tensorflow/core/nccl/
DBUILD2 # Wrap NVIDIA (https://github.com/NVIDIA/nccl) NCCL with tensorflow ops.
39 "@local_config_nccl//:nccl",
78 "@local_config_nccl//:nccl",
/external/tensorflow/tensorflow/core/distributed_runtime/
Dcollective_param_resolver_distributed_test.cc139 const string& device_type, bool nccl) { in DefineWorkers() argument
142 DefineWorker(name, device_type, num_devices, nccl); in DefineWorkers()
147 int num_devices, bool nccl) { in DefineWorker() argument
151 config.mutable_experimental()->set_collective_nccl(nccl); in DefineWorker()
290 const string& device_type, bool nccl) { in RestartWorker() argument
293 DefineWorker(worker_name, device_type, num_devices, nccl); in RestartWorker()
DBUILD525 "//tensorflow/core/nccl:collective_communicator",
/external/tensorflow/tensorflow/tools/ci_build/
DDockerfile.gpu35 ln -s /usr/include/nccl.h /usr/local/cuda/include/nccl.h
/external/tensorflow/tensorflow/tools/ci_build/linux/rocm/
Drun_gpu_multi.sh53 //tensorflow/core/nccl:nccl_manager_test
/external/tensorflow/tensorflow/
Dopensource_only.files154 tensorflow/third_party/nccl/BUILD
155 tensorflow/third_party/nccl/LICENSE
156 tensorflow/third_party/nccl/archive.BUILD
157 tensorflow/third_party/nccl/archive.patch
158 tensorflow/third_party/nccl/build_defs.bzl.tpl
159 tensorflow/third_party/nccl/nccl_configure.bzl
160 tensorflow/third_party/nccl/system.BUILD.tpl
Dworkspace.bzl6 load("//third_party/nccl:nccl_configure.bzl", "nccl_configure")
789 build_file = clean_dep("//third_party:nccl/archive.BUILD"),
790 patch_file = clean_dep("//third_party/nccl:archive.patch"),
792 strip_prefix = "nccl-2.8.3-1",
794 …"https://storage.googleapis.com/mirror.tensorflow.org/github.com/nvidia/nccl/archive/v2.8.3-1.tar.…
795 "https://github.com/nvidia/nccl/archive/v2.8.3-1.tar.gz",
/external/tensorflow/
DCODEOWNERS8 /tensorflow/core/nccl/ @azaks2 @chsigg
D.bazelrc62 # nonccl: Disable nccl support.
/external/tensorflow/tensorflow/core/common_runtime/gpu/
Dgpu_device.cc258 group->nccl = GetStream(executor, priority); in GetOrCreate()
259 group->nccl->Init(); in GetOrCreate()
261 << "] = " << group->nccl; in GetOrCreate()
264 group->compute->ThenWaitFor(group->nccl); in GetOrCreate()
265 group->nccl->ThenWaitFor(group->compute); in GetOrCreate()
315 if (stream.nccl) { in TestOnlyReset()
316 delete stream.nccl; in TestOnlyReset()
317 stream.nccl = nullptr; in TestOnlyReset()
439 stream_->nccl, in Init()
Dgpu_device.h134 se::Stream* nccl = nullptr; member
/external/tensorflow/tensorflow/core/common_runtime/
Dcollective_param_resolver_local.cc59 const char* GetCollectiveName(const CollectiveParams* cp, bool nccl) { in GetCollectiveName() argument
62 return nccl ? "NcclBroadcast" : "HierarchicalTreeBroadcast"; in GetCollectiveName()
65 return nccl ? "NcclReduce" : "RingReduce"; in GetCollectiveName()
68 return nccl ? "NcclGather" : "RingGather"; in GetCollectiveName()
DBUILD1734 "//tensorflow/core/nccl:collective_communicator",
1864 "//tensorflow/core/nccl:collective_communicator",
/external/tensorflow/tensorflow/compiler/xla/pjrt/
DBUILD324 deps = if_cuda(["@local_config_nccl//:nccl"]),
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
DBUILD320 "//tensorflow/core/nccl:collective_communicator",
/external/tensorflow/tensorflow/core/common_runtime/eager/
DBUILD100 "//tensorflow/core/nccl:collective_communicator",
/external/tensorflow/tensorflow/core/kernels/
DBUILD240 "//tensorflow/core/nccl:collective_communicator",
263 "//tensorflow/core/nccl:collective_communicator",
395 "@local_config_nccl//:nccl",
399 "//tensorflow/core/nccl:nccl_lib",
/external/tensorflow/third_party/gpus/
Dcuda_configure.bzl1088 # copy files mentioned in third_party/nccl/build_defs.bzl.tpl
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
DBUILD428 actual = if_cuda("@local_config_nccl//:nccl", ":empty"),
/external/tensorflow/tensorflow/core/
DBUILD887 "//tensorflow/core/nccl:mobile_srcs",

12