Searched refs:GpuDeviceHandle (Results 1 – 9 of 9) sorted by relevance
/external/tensorflow/tensorflow/stream_executor/gpu/ |
D | gpu_driver.h | 68 static port::StatusOr<GpuDeviceHandle> DeviceFromContext(GpuContext* context); 167 GpuDeviceHandle device); 191 const std::vector<GpuDeviceHandle>& device_handles); 206 static port::Status GetDevice(int device_ordinal, GpuDeviceHandle* device); 210 static port::Status GetDeviceName(GpuDeviceHandle device, 220 static port::Status CreateContext(int device_ordinal, GpuDeviceHandle device, 417 static bool CanEnablePeerAccess(GpuDeviceHandle from, GpuDeviceHandle to); 448 static port::StatusOr<GpuDeviceHandle> GetPointerDevice(GpuDevicePtr pointer); 465 GpuDeviceHandle device); 469 static port::Status GetGpuISAVersion(int* version, GpuDeviceHandle device); [all …]
|
D | gpu_types.h | 51 using GpuDeviceHandle = hipDevice_t; variable 69 using GpuDeviceHandle = CUdevice;
|
D | gpu_executor.h | 319 GpuDeviceHandle device_;
|
D | asm_compiler.cc | 144 gpu::GpuDeviceHandle handle; in CompileGpuAsm()
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_virtual_mem_allocator.cc | 28 using ::stream_executor::gpu::GpuDeviceHandle; 41 StatusOr<bool> SupportsVirtualAddressManagement(GpuDeviceHandle device) { in SupportsVirtualAddressManagement() 46 Status CheckVirtualAddressManagementSupport(GpuDeviceHandle device, in CheckVirtualAddressManagementSupport() 67 std::vector<GpuDeviceHandle> access_gpu_handles; in Create() 70 GpuDeviceHandle gpu_handle; in Create() 76 GpuDeviceHandle peer_handle; in Create() 114 PlatformGpuId gpu_id, const std::vector<GpuDeviceHandle> access_gpu_handles, in GpuVirtualMemAllocator()
|
D | gpu_virtual_mem_allocator.h | 78 std::vector<stream_executor::gpu::GpuDeviceHandle> access_device_handles, 88 const std::vector<stream_executor::gpu::GpuDeviceHandle> access_gpu_handles_;
|
/external/tensorflow/tensorflow/stream_executor/cuda/ |
D | cuda_driver.cc | 919 GpuDeviceHandle device) { in GetMinAllocationGranularity() 973 const std::vector<GpuDeviceHandle>& device_handles) { in MapMemory() 1592 /* static */ bool GpuDriver::CanEnablePeerAccess(GpuDeviceHandle from, in CanEnablePeerAccess() 1593 GpuDeviceHandle to) { in CanEnablePeerAccess()
|
D | cuda_gpu_executor.cc | 848 bool FillBlockDimLimit(GpuDeviceHandle device, BlockDim* block_dim_limit) { in FillBlockDimLimit() 963 GpuDeviceHandle device; in CreateDeviceDescription()
|
/external/tensorflow/tensorflow/stream_executor/rocm/ |
D | rocm_gpu_executor.cc | 753 bool FillBlockDimLimit(GpuDeviceHandle device, BlockDim* block_dim_limit) { in FillBlockDimLimit() 811 GpuDeviceHandle device; in CreateDeviceDescription()
|