Home
last modified time | relevance | path

Searched refs:CUDA (Results 1 – 25 of 289) sorted by relevance

12345678910>>...12

/external/tensorflow/tensorflow/tools/dockerfiles/partials/ubuntu/
Ddevel-nvidia.partial.Dockerfile2 ARG CUDA=11.0
3 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
4 # ARCH and CUDA are specified again because the FROM directive resets ARGs
7 ARG CUDA
18 cuda-command-line-tools-${CUDA/./-} \
19 libcublas-${CUDA/./-} \
20 libcublas-dev-${CUDA/./-} \
21 cuda-nvprune-${CUDA/./-} \
22 cuda-nvrtc-${CUDA/./-} \
23 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Dnvidia.partial.Dockerfile2 ARG CUDA=11.0
3 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
4 # ARCH and CUDA are specified again because the FROM directive resets ARGs
7 ARG CUDA
19 cuda-command-line-tools-${CUDA/./-} \
20 libcublas-${CUDA/./-} \
21 cuda-nvrtc-${CUDA/./-} \
22 libcufft-${CUDA/./-} \
23 libcurand-${CUDA/./-} \
24 libcusolver-${CUDA/./-} \
[all …]
/external/tensorflow/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/
Ddevel-gpu-ppc64le.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
41 cuda-command-line-tools-${CUDA/./-} \
42 libcublas-${CUDA/./-} \
43 libcublas-dev-${CUDA/./-} \
44 cuda-nvprune-${CUDA/./-} \
45 cuda-nvrtc-${CUDA/./-} \
46 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Ddevel-gpu-ppc64le-jupyter.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
41 cuda-command-line-tools-${CUDA/./-} \
42 libcublas-${CUDA/./-} \
43 libcublas-dev-${CUDA/./-} \
44 cuda-nvprune-${CUDA/./-} \
45 cuda-nvrtc-${CUDA/./-} \
46 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Dgpu-ppc64le.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
42 cuda-command-line-tools-${CUDA/./-} \
43 libcublas-${CUDA/./-} \
44 cuda-nvrtc-${CUDA/./-} \
45 libcufft-${CUDA/./-} \
46 libcurand-${CUDA/./-} \
47 libcusolver-${CUDA/./-} \
[all …]
Dgpu-ppc64le-jupyter.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
42 cuda-command-line-tools-${CUDA/./-} \
43 libcublas-${CUDA/./-} \
44 cuda-nvrtc-${CUDA/./-} \
45 libcufft-${CUDA/./-} \
46 libcurand-${CUDA/./-} \
47 libcusolver-${CUDA/./-} \
[all …]
/external/tensorflow/tensorflow/tools/dockerfiles/dockerfiles/
Ddevel-gpu.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
41 cuda-command-line-tools-${CUDA/./-} \
42 libcublas-${CUDA/./-} \
43 libcublas-dev-${CUDA/./-} \
44 cuda-nvprune-${CUDA/./-} \
45 cuda-nvrtc-${CUDA/./-} \
46 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Ddevel-gpu-jupyter.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
41 cuda-command-line-tools-${CUDA/./-} \
42 libcublas-${CUDA/./-} \
43 libcublas-dev-${CUDA/./-} \
44 cuda-nvprune-${CUDA/./-} \
45 cuda-nvrtc-${CUDA/./-} \
46 cuda-nvrtc-dev-${CUDA/./-} \
[all …]
Dgpu.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
42 cuda-command-line-tools-${CUDA/./-} \
43 libcublas-${CUDA/./-} \
44 cuda-nvrtc-${CUDA/./-} \
45 libcufft-${CUDA/./-} \
46 libcurand-${CUDA/./-} \
47 libcusolver-${CUDA/./-} \
[all …]
Dgpu-jupyter.Dockerfile25 ARG CUDA=11.0
26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base
27 # ARCH and CUDA are specified again because the FROM directive resets ARGs
30 ARG CUDA
42 cuda-command-line-tools-${CUDA/./-} \
43 libcublas-${CUDA/./-} \
44 cuda-nvrtc-${CUDA/./-} \
45 libcufft-${CUDA/./-} \
46 libcurand-${CUDA/./-} \
47 libcusolver-${CUDA/./-} \
[all …]
/external/llvm-project/openmp/libomptarget/plugins/cuda/
DCMakeLists.txt9 # Build a plugin for a CUDA machine if available.
13 …libomptarget_say("Not building CUDA offloading plugin: only support CUDA in Linux x86_64, ppc64le,…
16 libomptarget_say("Not building CUDA offloading plugin: libelf dependency not found.")
19 libomptarget_say("Not building CUDA offloading plugin: CUDA not found in system.")
22 libomptarget_say("Not building CUDA offloading plugin: CUDA Driver API not found in system.")
26 libomptarget_say("Building CUDA offloading plugin.")
29 add_definitions(-DTARGET_NAME=CUDA)
45 # Report to the parent scope that we are building a plugin for CUDA.
/external/eigen/doc/
DUsingNVCC.dox4 /** \page TopicCUDA Using Eigen in CUDA kernels
8 Staring from CUDA 5.0, the CUDA compiler, \c nvcc, is able to properly parse %Eigen's code (almost).
9 A few adaptations of the %Eigen's code already allows to use some parts of %Eigen in your own CUDA
10 To this end you need the devel branch of %Eigen, CUDA 5.0 or greater with GCC.
27 …- On 64bits system Eigen uses \c long \c int as the default type for indexes and sizes. On CUDA de…
28CUDA code compatible, this cannot be done automatically by %Eigen, and the user is thus required t…
/external/llvm/docs/
DCompileCudaWithLLVM.rst2 Compiling CUDA C/C++ with LLVM
11 This document contains the user guides and the internals of compiling CUDA
12 C/C++ with LLVM. It is aimed at both users who want to compile CUDA with LLVM
14 familiarity with CUDA. Information about CUDA programming can be found in the
15 `CUDA programming guide
18 How to Build LLVM with CUDA Support
21 CUDA support is still in development and works the best in the trunk version
52 How to Compile CUDA C/C++ with LLVM
55 We assume you have installed the CUDA driver and runtime. Consult the `NVIDIA
56 CUDA installation guide
[all …]
/external/tensorflow/third_party/gpus/cuda/
Dbuild_defs.bzl.tpl1 # Macros for building CUDA code.
3 """Shorthand for select()'ing on whether we're building with CUDA.
6 with CUDA enabled. Otherwise, the select statement evaluates to if_false.
42 """Default options for all CUDA compilations."""
49 # Some important CUDA optimizations are only enabled at O3.
58 """Tests if the CUDA was enabled during the configure process.
61 --config=cuda. Used to allow non-CUDA code to depend on CUDA libraries.
97 """Wrapper over cc_library which adds default CUDA options."""
/external/llvm-project/openmp/libomptarget/deviceRTLs/nvptx/
DCMakeLists.txt9 # Build the NVPTX (CUDA) Device RTL if the CUDA tools are available
19 …libomptarget_say("Not building CUDA offloading device RTL: invalid NVPTX alternate host compiler.")
31 …libomptarget_say("Not building CUDA offloading device RTL: clang is not supported as NVCC host com…
47 libomptarget_say("Building CUDA offloading device RTL.")
83 "List of CUDA Compute Capabilities to be used to compile the NVPTX device RTL.")
103 # yet supported by the CUDA toolchain on the device.
119 # a Clang compiler capable of compiling our CUDA files to LLVM bitcode and
122 "Location of a CUDA compiler capable of emitting LLVM bitcode.")
133 "Enable CUDA LLVM bitcode offloading device RTL.")
136 libomptarget_error_say("Cannot build CUDA LLVM bitcode offloading device RTL!")
[all …]
/external/llvm-project/llvm/docs/
DCompileCudaWithLLVM.rst2 Compiling CUDA with clang
11 This document describes how to compile CUDA code with clang, and gives some
12 details about LLVM and clang's CUDA implementations.
14 This document assumes a basic familiarity with CUDA. Information about CUDA
16 `CUDA programming guide
19 Compiling CUDA Code
25 CUDA is supported since llvm 3.9. Clang currently supports CUDA 7.0 through
26 10.1. If clang detects a newer CUDA version, it will issue a warning and will
27 attempt to use detected CUDA SDK it as if it were CUDA-10.1.
29 Before you build CUDA code, you'll need to have installed the CUDA SDK. See
[all …]
DNVPTXUsage.rst21 This document assumes a basic familiarity with CUDA and the PTX
22 assembly language. Information about the CUDA Driver API and the PTX assembly
23 language can be found in the `CUDA documentation
100 copy data to it by name with the CUDA Driver API.
117 generated PTX compatible with the CUDA Driver API.
119 Example: 32-bit PTX for CUDA Driver API: ``nvptx-nvidia-cuda``
121 Example: 64-bit PTX for CUDA Driver API: ``nvptx64-nvidia-cuda``
223 map in the following way to CUDA builtins:
226 CUDA Builtin PTX Special Register Intrinsic
252 instruction, equivalent to the ``__syncthreads()`` call in CUDA.
[all …]
/external/fmtlib/test/cuda-test/
DCMakeLists.txt2 # `enable_language(CUDA)` instead of `find_package(CUDA)` and let the CMake
8 # of the CUDA projects are using those.
10 # This test relies on `find_package(CUDA)` in the parent CMake config.
17 # In this test, we assume that the user is going to compile CUDA source code
21 # by providing another (non-CUDA) C++ source code.
35 # This part is for (non-CUDA) C++ code. MSVC can define incorrect
44 # now using a "new" way of handling CUDA
/external/tensorflow/tensorflow/core/grappler/optimizers/
Dauto_mixed_precision.h25 enum class AutoMixedPrecisionMode { CUDA, MKL }; enumerator
35 AutoMixedPrecisionMode mode = AutoMixedPrecisionMode::CUDA)
41 return mode_ == AutoMixedPrecisionMode::CUDA ? "auto_mixed_precision_cuda" in name()
/external/llvm-project/parallel-libs/acxxel/examples/
Dsimple_example.cu31 acxxel::Platform *CUDA = acxxel::getCUDAPlatform().getValue(); in saxpy() local
32 acxxel::Stream Stream = CUDA->createStream().takeValue(); in saxpy()
35 auto DeviceX = CUDA->mallocD<float>(N).takeValue(); in saxpy()
36 auto DeviceY = CUDA->mallocD<float>(N).takeValue(); in saxpy()
/external/llvm-project/mlir/tools/mlir-cuda-runner/
DCMakeLists.txt16 # Configure CUDA runner support. Using check_language first allows us to give
19 check_language(CUDA)
21 enable_language(CUDA)
24 "Building the mlir cuda runner requires a working CUDA install")
/external/llvm-project/openmp/libomptarget/cmake/Modules/
DLibomptargetGetDependencies.cmake17 # CUDA : required to control offloading to NVIDIA GPUs.
113 # Looking for CUDA...
118 find_package(CUDA QUIET)
140 # Looking for CUDA Driver API... (needed for CUDA plugin)
224 # Looking for CUDA libdevice subdirectory
/external/llvm-project/parallel-libs/acxxel/
DCMakeLists.txt7 option(ACXXEL_ENABLE_CUDA "enable CUDA for acxxel" ON)
13 find_package(CUDA REQUIRED)
17 message(FATAL_ERROR "could not find libcuda, is the CUDA driver installed on your system?")
/external/llvm-project/polly/lib/External/ppcg/
DREADME49 Using PPCG to generate CUDA or OpenCL code
51 To convert a fragment of a C program to CUDA, insert a line containing
59 after the fragment. To generate CUDA code run
115 Compiling the generated CUDA code with nvcc
141 corresponding CUDA code runs fine.
157 in the CUDA or OpenCL code, but for now it is left to the user
195 the PPCG generated code using nvcc since CUDA does not support VLAs.
198 CUDA and function overloading
200 While CUDA supports function overloading based on the arguments types,
207 In the transformed (CUDA) code, however, overloading will cause the
[all …]
/external/llvm-project/clang/lib/Frontend/
DFrontendOptions.cpp22 .Case("cui", InputKind(Language::CUDA).getPreprocessed()) in getInputKindForExtension()
32 .Cases("cu", "cuh", Language::CUDA) in getInputKindForExtension()

12345678910>>...12