/external/tensorflow/tensorflow/stream_executor/ |
D | event.cc | 25 : stream_exec_(stream_exec), in Event() 27 stream_exec_->implementation()->CreateEventImplementation()) {} in Event() 31 if (stream_exec_ && implementation_) { in ~Event() 32 auto status = stream_exec_->DeallocateEvent(this); in ~Event() 40 auto status = stream_exec_->AllocateEvent(this); in Init() 50 return stream_exec_->PollForEventStatus(this); in PollForStatus()
|
D | stream_executor_pimpl.cc | 105 : stream_exec_(stream_exec), in ScopedTracer() 108 if (stream_exec_->tracing_enabled_) { in ScopedTracer() 116 if (stream_exec_->tracing_enabled_) { in ~ScopedTracer() 126 tf_shared_lock lock{stream_exec_->mu_}; in Trace() 127 for (TraceListener *listener : stream_exec_->listeners_) { in Trace() 134 StreamExecutor *stream_exec_; member in stream_executor::ScopedTracer
|
D | event.h | 72 StreamExecutor* stream_exec_; variable
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_bfc_allocator.h | 45 stream_exec_(stream_exec), in GPUMemAllocator() 48 CHECK(stream_exec_ != nullptr); in GPUMemAllocator() 56 ptr = stream_exec_->UnifiedMemoryAllocate(num_bytes); in Alloc() 58 ptr = stream_exec_->AllocateArray<char>(num_bytes).opaque(); in Alloc() 69 stream_exec_->UnifiedMemoryDeallocate(ptr); in Free() 72 stream_exec_->Deallocate(&gpu_ptr); in Free() 78 se::StreamExecutor* stream_exec_; // not owned, non-null
|
D | gpu_host_allocator.h | 33 stream_exec_(stream_exec), in GpuHostAllocator() 35 CHECK(stream_exec_ != nullptr); in GpuHostAllocator() 42 ptr = stream_exec_->HostMemoryAllocate(num_bytes); in Alloc() 56 stream_exec_->HostMemoryDeallocate(ptr); in Free() 61 se::StreamExecutor* stream_exec_; // not owned, non-null
|
D | gpu_debug_allocator.cc | 81 stream_exec_ = in GPUDebugAllocator() 96 InitMask(stream_exec_, allocated_ptr, before_mask); in AllocateRaw() 100 InitMask(stream_exec_, in AllocateRaw() 142 return CheckMask(stream_exec_, static_cast<char*>(ptr) - MASK_BYTES, in CheckHeader() 149 return CheckMask(stream_exec_, original_ptr + req_size - MASK_BYTES, in CheckFooter() 159 stream_exec_ = in GPUNanResetAllocator() 177 stream_exec_->SynchronousMemcpyH2D(&nans[0], req_size, &nan_ptr); in AllocateRaw() 193 stream_exec_->SynchronousMemcpyH2D(&nans[0], req_size, &nan_ptr); in DeallocateRaw()
|
D | gpu_cudamalloc_allocator.cc | 33 stream_exec_ = in GPUcudaMallocAllocator() 42 se::cuda::ScopedActivateExecutorContext scoped_activation{stream_exec_}; in AllocateRaw()
|
D | gpu_debug_allocator.h | 56 se::StreamExecutor* stream_exec_; // Not owned. variable 80 se::StreamExecutor* stream_exec_; // Not owned. variable
|
D | gpu_cudamalloc_allocator.h | 45 se::StreamExecutor* stream_exec_; // Not owned. variable
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | buffer_comparator_test.cc | 31 stream_exec_(backend_->default_stream_executor()), in BufferComparatorTest() 32 allocator_(stream_exec_->platform(), {stream_exec_}), in BufferComparatorTest() 33 compiler_(Compiler::GetForPlatform(stream_exec_->platform()) 41 se::Stream stream(stream_exec_); in CompareEqualFloatBuffers() 46 .Allocate(stream_exec_->device_ordinal(), lhs.size() * sizeof(half)) in CompareEqualFloatBuffers() 51 .Allocate(stream_exec_->device_ordinal(), rhs.size() * sizeof(half)) in CompareEqualFloatBuffers() 72 se::StreamExecutor* stream_exec_; member in xla::gpu::__anon7e51ca1b0111::BufferComparatorTest
|
D | cudnn_conv_algorithm_picker.cc | 150 tensorflow::mutex_lock lock = LockGpu(stream_exec_); in PickBestAlgorithm() 154 if (!stream_exec_->SynchronizeAllActivity()) { in PickBestAlgorithm() 159 se::Stream stream{stream_exec_}; in PickBestAlgorithm() 161 const auto device_ordinal = stream_exec_->device_ordinal(); in PickBestAlgorithm() 170 se_allocator.emplace(stream_exec_->platform(), in PickBestAlgorithm() 171 absl::Span<se::StreamExecutor* const>({stream_exec_})); in PickBestAlgorithm() 235 for (const AlgorithmDesc& alg : GetAlgorithms(kind, stream_exec_)) { in PickBestAlgorithm() 324 *log.mutable_compute_capability() = GetComputeCapability(stream_exec_); in PickBestAlgorithm() 325 *log.mutable_cudnn_version() = GetCudnnVersion(stream_exec_); in PickBestAlgorithm()
|
D | cusolver_rewriter.cc | 172 se::Stream stream{stream_exec_}; in RunOnComputation() 174 const auto device_ordinal = stream_exec_->device_ordinal(); in RunOnComputation() 183 se_allocator.emplace(stream_exec_->platform(), in RunOnComputation() 184 absl::Span<se::StreamExecutor* const>({stream_exec_})); in RunOnComputation() 204 : stream_exec_(stream_exec), allocator_(allocator) {} in CusolverRewriter()
|
D | cudnn_conv_algorithm_picker.h | 42 : stream_exec_(stream_exec), allocator_(allocator), compiler_(compiler) {} in CudnnConvAlgorithmPicker() 56 se::StreamExecutor* stream_exec_; // never null variable
|
D | cusolver_rewriter.h | 41 se::StreamExecutor* stream_exec_; // never null variable
|