/external/tensorflow/tensorflow/tools/dockerfiles/partials/ubuntu/ |
D | devel-nvidia.partial.Dockerfile | 2 ARG CUDA=10.0 3 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 4 # ARCH and CUDA are specified again because the FROM directive resets ARGs 7 ARG CUDA 16 cuda-command-line-tools-${CUDA/./-} \ 17 cuda-cublas-dev-${CUDA/./-} \ 18 cuda-cudart-dev-${CUDA/./-} \ 19 cuda-cufft-dev-${CUDA/./-} \ 20 cuda-curand-dev-${CUDA/./-} \ 21 cuda-cusolver-dev-${CUDA/./-} \ [all …]
|
D | nvidia.partial.Dockerfile | 2 ARG CUDA=10.0 3 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 4 # ARCH and CUDA are specified again because the FROM directive resets ARGs 7 ARG CUDA 15 cuda-command-line-tools-${CUDA/./-} \ 16 cuda-cublas-${CUDA/./-} \ 17 cuda-cufft-${CUDA/./-} \ 18 cuda-curand-${CUDA/./-} \ 19 cuda-cusolver-${CUDA/./-} \ 20 cuda-cusparse-${CUDA/./-} \ [all …]
|
/external/tensorflow/tensorflow/tools/dockerfiles/dockerfiles/ppc64le/ |
D | devel-gpu-ppc64le.Dockerfile | 25 ARG CUDA=10.0 26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 27 # ARCH and CUDA are specified again because the FROM directive resets ARGs 30 ARG CUDA 39 cuda-command-line-tools-${CUDA/./-} \ 40 cuda-cublas-dev-${CUDA/./-} \ 41 cuda-cudart-dev-${CUDA/./-} \ 42 cuda-cufft-dev-${CUDA/./-} \ 43 cuda-curand-dev-${CUDA/./-} \ 44 cuda-cusolver-dev-${CUDA/./-} \ [all …]
|
D | devel-gpu-ppc64le-jupyter.Dockerfile | 25 ARG CUDA=10.0 26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 27 # ARCH and CUDA are specified again because the FROM directive resets ARGs 30 ARG CUDA 39 cuda-command-line-tools-${CUDA/./-} \ 40 cuda-cublas-dev-${CUDA/./-} \ 41 cuda-cudart-dev-${CUDA/./-} \ 42 cuda-cufft-dev-${CUDA/./-} \ 43 cuda-curand-dev-${CUDA/./-} \ 44 cuda-cusolver-dev-${CUDA/./-} \ [all …]
|
D | gpu-ppc64le.Dockerfile | 25 ARG CUDA=10.0 26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 27 # ARCH and CUDA are specified again because the FROM directive resets ARGs 30 ARG CUDA 38 cuda-command-line-tools-${CUDA/./-} \ 39 cuda-cublas-${CUDA/./-} \ 40 cuda-cufft-${CUDA/./-} \ 41 cuda-curand-${CUDA/./-} \ 42 cuda-cusolver-${CUDA/./-} \ 43 cuda-cusparse-${CUDA/./-} \ [all …]
|
D | gpu-ppc64le-jupyter.Dockerfile | 25 ARG CUDA=10.0 26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 27 # ARCH and CUDA are specified again because the FROM directive resets ARGs 30 ARG CUDA 38 cuda-command-line-tools-${CUDA/./-} \ 39 cuda-cublas-${CUDA/./-} \ 40 cuda-cufft-${CUDA/./-} \ 41 cuda-curand-${CUDA/./-} \ 42 cuda-cusolver-${CUDA/./-} \ 43 cuda-cusparse-${CUDA/./-} \ [all …]
|
/external/tensorflow/tensorflow/tools/dockerfiles/dockerfiles/ |
D | devel-gpu.Dockerfile | 25 ARG CUDA=10.0 26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 27 # ARCH and CUDA are specified again because the FROM directive resets ARGs 30 ARG CUDA 39 cuda-command-line-tools-${CUDA/./-} \ 40 cuda-cublas-dev-${CUDA/./-} \ 41 cuda-cudart-dev-${CUDA/./-} \ 42 cuda-cufft-dev-${CUDA/./-} \ 43 cuda-curand-dev-${CUDA/./-} \ 44 cuda-cusolver-dev-${CUDA/./-} \ [all …]
|
D | gpu.Dockerfile | 25 ARG CUDA=10.0 26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 27 # ARCH and CUDA are specified again because the FROM directive resets ARGs 30 ARG CUDA 38 cuda-command-line-tools-${CUDA/./-} \ 39 cuda-cublas-${CUDA/./-} \ 40 cuda-cufft-${CUDA/./-} \ 41 cuda-curand-${CUDA/./-} \ 42 cuda-cusolver-${CUDA/./-} \ 43 cuda-cusparse-${CUDA/./-} \ [all …]
|
D | devel-gpu-jupyter.Dockerfile | 25 ARG CUDA=10.0 26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 27 # ARCH and CUDA are specified again because the FROM directive resets ARGs 30 ARG CUDA 39 cuda-command-line-tools-${CUDA/./-} \ 40 cuda-cublas-dev-${CUDA/./-} \ 41 cuda-cudart-dev-${CUDA/./-} \ 42 cuda-cufft-dev-${CUDA/./-} \ 43 cuda-curand-dev-${CUDA/./-} \ 44 cuda-cusolver-dev-${CUDA/./-} \ [all …]
|
D | gpu-jupyter.Dockerfile | 25 ARG CUDA=10.0 26 FROM nvidia/cuda${ARCH:+-$ARCH}:${CUDA}-base-ubuntu${UBUNTU_VERSION} as base 27 # ARCH and CUDA are specified again because the FROM directive resets ARGs 30 ARG CUDA 38 cuda-command-line-tools-${CUDA/./-} \ 39 cuda-cublas-${CUDA/./-} \ 40 cuda-cufft-${CUDA/./-} \ 41 cuda-curand-${CUDA/./-} \ 42 cuda-cusolver-${CUDA/./-} \ 43 cuda-cusparse-${CUDA/./-} \ [all …]
|
/external/eigen/doc/ |
D | UsingNVCC.dox | 4 /** \page TopicCUDA Using Eigen in CUDA kernels 8 Staring from CUDA 5.0, the CUDA compiler, \c nvcc, is able to properly parse %Eigen's code (almost). 9 A few adaptations of the %Eigen's code already allows to use some parts of %Eigen in your own CUDA … 10 To this end you need the devel branch of %Eigen, CUDA 5.0 or greater with GCC. 27 …- On 64bits system Eigen uses \c long \c int as the default type for indexes and sizes. On CUDA de… 28 …CUDA code compatible, this cannot be done automatically by %Eigen, and the user is thus required t…
|
/external/llvm/docs/ |
D | CompileCudaWithLLVM.rst | 2 Compiling CUDA C/C++ with LLVM 11 This document contains the user guides and the internals of compiling CUDA 12 C/C++ with LLVM. It is aimed at both users who want to compile CUDA with LLVM 14 familiarity with CUDA. Information about CUDA programming can be found in the 15 `CUDA programming guide 18 How to Build LLVM with CUDA Support 21 CUDA support is still in development and works the best in the trunk version 52 How to Compile CUDA C/C++ with LLVM 55 We assume you have installed the CUDA driver and runtime. Consult the `NVIDIA 56 CUDA installation guide [all …]
|
D | NVPTXUsage.rst | 21 This document assumes a basic familiarity with CUDA and the PTX 22 assembly language. Information about the CUDA Driver API and the PTX assembly 23 language can be found in the `CUDA documentation 100 copy data to it by name with the CUDA Driver API. 117 generated PTX compatible with the CUDA Driver API. 119 Example: 32-bit PTX for CUDA Driver API: ``nvptx-nvidia-cuda`` 121 Example: 64-bit PTX for CUDA Driver API: ``nvptx64-nvidia-cuda`` 223 map in the following way to CUDA builtins: 226 CUDA Builtin PTX Special Register Intrinsic 252 instruction, equivalent to the ``__syncthreads()`` call in CUDA. [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/docs/ |
D | CompileCudaWithLLVM.rst | 2 Compiling CUDA with clang 11 This document describes how to compile CUDA code with clang, and gives some 12 details about LLVM and clang's CUDA implementations. 14 This document assumes a basic familiarity with CUDA. Information about CUDA 16 `CUDA programming guide 19 Compiling CUDA Code 25 CUDA is supported in llvm 3.9, but it's still in active development, so we 29 Before you build CUDA code, you'll need to have installed the appropriate 30 driver for your nvidia GPU and the CUDA SDK. See `NVIDIA's CUDA installation 33 <https://llvm.org/bugs/show_bug.cgi?id=26966>`_ the CUDA toolkit as installed [all …]
|
D | NVPTXUsage.rst | 21 This document assumes a basic familiarity with CUDA and the PTX 22 assembly language. Information about the CUDA Driver API and the PTX assembly 23 language can be found in the `CUDA documentation 100 copy data to it by name with the CUDA Driver API. 117 generated PTX compatible with the CUDA Driver API. 119 Example: 32-bit PTX for CUDA Driver API: ``nvptx-nvidia-cuda`` 121 Example: 64-bit PTX for CUDA Driver API: ``nvptx64-nvidia-cuda`` 223 map in the following way to CUDA builtins: 226 CUDA Builtin PTX Special Register Intrinsic 252 instruction, equivalent to the ``__syncthreads()`` call in CUDA. [all …]
|
/external/skqp/src/compute/hs/ |
D | README.md | 5 for Vulkan, CUDA and OpenCL compute APIs. 24 Here is a throughput plot for HotSort on Vulkan and CUDA sorting 50 There are HotSort implementations for Vulkan, CUDA and OpenCL. 55 [Vulkan](vk/bench/main.c), [CUDA](cuda/bench/main.c) and 122 ## CUDA section in Usage 132 `.cu` CUDA source and `.h` header file: 139 Usage on CUDA is very simple. 160 HotSort on CUDA requires two auxilary streams in order to maximize concurrency.
|
/external/skia/src/compute/hs/ |
D | README.md | 5 for Vulkan, CUDA and OpenCL compute APIs. 24 Here is a throughput plot for HotSort on Vulkan and CUDA sorting 50 There are HotSort implementations for Vulkan, CUDA and OpenCL. 55 [Vulkan](vk/bench/main.c), [CUDA](cuda/bench/main.c) and 122 ## CUDA section in Usage 132 `.cu` CUDA source and `.h` header file: 139 Usage on CUDA is very simple. 160 HotSort on CUDA requires two auxilary streams in order to maximize concurrency.
|
/external/eigen/Eigen/ |
D | Core | 17 // Handle NVCC/CUDA/SYCL 19 // Do not try asserts on CUDA and SYCL! 32 // All functions callable from CUDA code must be qualified with __device__ 34 // Do not try to vectorize on CUDA and SYCL! 52 // When compiling CUDA device code with NVCC, pull in math functions from the 390 #include "src/Core/arch/CUDA/Half.h" 391 #include "src/Core/arch/CUDA/PacketMathHalf.h" 392 #include "src/Core/arch/CUDA/TypeCasting.h" 395 #include "src/Core/arch/CUDA/PacketMath.h" 396 #include "src/Core/arch/CUDA/MathFunctions.h" [all …]
|
/external/tensorflow/tensorflow/contrib/cmake/ |
D | CMakeLists.txt | 66 # GPU, CUDA and cuDNN options 81 # Options for linking CUDA/CUDNN libraries 103 …option(tensorflow_CUDA_LIBRARY_PATH "Designate the default CUDA library paths" /usr/local/cuda/lib… 396 …# In some Linux distros, find_package(CUDA) seems to require CMAKE_LIBRARY_PATH to include cuda-li… 402 find_package(CUDA 9.0 REQUIRED) 404 message(FATAL_ERROR "CUDA not found.") 412 message(STATUS "Using CUDA arch flags: ${NVCC_ARCH_FLAGS_readable}") 498 # Remove "." from CUDA version variable. 501 # List of enumerated CUDA caps 554 # NOTE(mrry): Update these flags when the version of CUDA or cuDNN used
|
/external/llvm/lib/Target/NVPTX/ |
D | NVPTXLowerKernelArgs.cpp | 200 if (TM && TM->getDrvInterface() == NVPTX::CUDA) { in runOnFunction() 224 else if (TM && TM->getDrvInterface() == NVPTX::CUDA) in runOnFunction()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/NVPTX/ |
D | NVPTXLowerArgs.cpp | 208 if (TM && TM->getDrvInterface() == NVPTX::CUDA) { in runOnKernelFunction() 232 else if (TM && TM->getDrvInterface() == NVPTX::CUDA) in runOnKernelFunction()
|
/external/clang/test/Driver/Inputs/CUDA_80/usr/local/cuda/ |
D | version.txt | 1 CUDA Version 8.0.42
|
/external/clang/test/Frontend/ |
D | stdlang.c | 16 #if defined(CUDA)
|
/external/clang/include/clang/Basic/ |
D | DiagnosticDriverKinds.td | 25 def err_drv_cuda_bad_gpu_arch : Error<"Unsupported CUDA gpu architecture: %0">; 27 "cannot find CUDA installation. Provide its path via --cuda-path, or pass " 28 "-nocudainc to build without CUDA includes.">; 30 "GPU arch %1 requires CUDA version at least %3, but installation at %0 is %2. " 31 "Use --cuda-path to specify a different CUDA install, or pass "
|
/external/eigen/test/ |
D | CMakeLists.txt | 352 # CUDA unit tests 353 option(EIGEN_TEST_CUDA "Enable CUDA support in unit tests" OFF) 354 option(EIGEN_TEST_CUDA_CLANG "Use clang instead of nvcc to compile the CUDA tests" OFF) 362 find_package(CUDA 5.0)
|