Home
last modified time | relevance | path

Searched refs:DMAHelper (Results 1 – 25 of 31) sorted by relevance

12

/external/tensorflow/tensorflow/core/common_runtime/
Dcollective_rma_local_test.cc82 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
101 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
125 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
146 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
Dbase_collective_executor.cc92 data_start_(reinterpret_cast<T*>(DMAHelper::base(&output_))), in CollectiveAdapterImpl()
109 DMAHelper::UnsafeSetShape(&output_, new_shape); in Flatten()
115 DMAHelper::UnsafeSetShape(&output_, old_shape_); in ConsumeFinalValue()
148 "base addr ", reinterpret_cast<int64>(DMAHelper::base(&output_)), in DebugString()
155 int64 base_addr = reinterpret_cast<int64>(DMAHelper::base(&t)); in TBounds()
Dcollective_rma_local.cc124 memcpy(DMAHelper::base(dst), DMAHelper::base(src), bytes); in MemCpyAsync()
Dcopy_tensor.cc74 if (!DMAHelper::CanUseDMA(&from)) { in CopyHostToDevice()
139 if (!DMAHelper::CanUseDMA(&from)) { in CopyDeviceToHost()
203 if (!DMAHelper::CanUseDMA(&from)) { in CopyDeviceToDevice()
369 } else if (DMAHelper::CanUseDMA(&from)) { in WrappedTensorDeviceCopy()
Ddma_helper.h24 class DMAHelper {
Dring_reducer.cc85 (DMAHelper::base(col_ctx_->input) != DMAHelper::base(col_ctx_->output))) { in Run()
Dhierarchical_tree_broadcaster.cc363 (DMAHelper::base(col_ctx_->input) != in RunTree()
364 DMAHelper::base(col_ctx_->output))) { in RunTree()
Dscoped_allocator_mgr_test.cc159 static_cast<const char*>(DMAHelper::base(&backing_tensor_)); in TEST_F()
/external/tensorflow/tensorflow/core/kernels/
Dscoped_allocator_ops.cc63 << num_elements_ << " buffer " << DMAHelper::buffer(backing_tensor) in Compute()
64 << " base addr " << DMAHelper::base(backing_tensor); in Compute()
126 const TensorBuffer* backing_buf = DMAHelper::buffer(&output); in Compute()
132 const TensorBuffer* input_buf = DMAHelper::buffer(&context->input(i)); in Compute()
191 const TensorBuffer* backing_buf = DMAHelper::buffer(&backing_copy); in Compute()
198 << DMAHelper::base(&context->input(i)); in Compute()
206 const TensorBuffer* input_buf = DMAHelper::buffer(&copy); in Compute()
Dscoped_allocator_ops_test.cc184 CHECK_EQ(DMAHelper::base(&input), DMAHelper::base(&output)); in ExecOp()
299 const void* lower_limit = DMAHelper::base(&input); in ExecOp()
310 CHECK_EQ(expected_base, DMAHelper::base(&output)); in ExecOp()
/external/tensorflow/tensorflow/compiler/tf2xla/
Dliteral_util.cc31 static_cast<const char*>(DMAHelper::base(&host_tensor)), xla_shape); in HostTensorToBorrowingLiteral()
53 static_cast<const char*>(DMAHelper::base(host_tensor)), xla_shape); in HostTensorToMutableBorrowingLiteral()
67 buf_ptrs.emplace_back(static_cast<const char*>(DMAHelper::base(tensor))); in HostTensorsToBorrowingLiteralTuple()
95 void* dst_ptr = DMAHelper::base(host_tensor); in CopyLiteralToHostTensor()
/external/tensorflow/tensorflow/core/common_runtime/sycl/
Dsycl_device_context.cc31 const void *src_ptr = DMAHelper::base(cpu_tensor); in CopyCPUTensorToDevice()
32 void *dst_ptr = DMAHelper::base(device_tensor); in CopyCPUTensorToDevice()
109 const void *src_ptr = DMAHelper::base(device_tensor); in CopyDeviceTensorToCPU()
110 void *dst_ptr = DMAHelper::base(cpu_tensor); in CopyDeviceTensorToCPU()
Dsycl_util.h30 inline void const* GetBase(const Tensor* src) { return DMAHelper::base(src); } in GetBase()
31 inline void* GetBase(Tensor* dst) { return DMAHelper::base(dst); } in GetBase()
/external/tensorflow/tensorflow/contrib/gdr/
Dgdr_memory_manager.cc370 const TensorBuffer* buffer = DMAHelper::buffer(&tensor); in TransportOptionsFromTensor()
383 buffer = DMAHelper::buffer(copy); in TransportOptionsFromTensor()
414 std::memcpy(buffer->data(), DMAHelper::buffer(&tensor)->data(), in TransportOptionsFromTensor()
453 const TensorBuffer* buffer = DMAHelper::buffer(tensor); in TensorFromTransportOptions()
466 buffer = DMAHelper::buffer(copy); in TensorFromTransportOptions()
489 << DMAHelper::buffer(tensor)->size() << " took " in TensorFromTransportOptions()
499 std::memcpy(DMAHelper::buffer(tensor)->data(), in TensorFromTransportOptions()
500 DMAHelper::buffer(copy)->data(), in TensorFromTransportOptions()
501 DMAHelper::buffer(copy)->size()); in TensorFromTransportOptions()
578 const void* addr = DMAHelper::buffer(tensor)->data(); in FindMemoryRegion()
Dgdr_worker.cc93 DMAHelper::CanUseDMA(&val) && dma_ok) { in GrpcRecvTensorAsync()
168 if (!DMAHelper::CanUseDMA(hook->prod_value)) { in RecvBufAsync()
Dgdr_collective_executor_mgr.cc49 req_.set_buf_ptr(reinterpret_cast<int64>(DMAHelper::base(to_tensor))); in RecvBufCall()
/external/tensorflow/tensorflow/core/distributed_runtime/
Dcollective_rma_distributed.cc48 req_.set_buf_ptr(reinterpret_cast<int64>(DMAHelper::base(to_tensor))); in RecvBufCall()
66 char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor)); in PopulateTensorFromExtra()
/external/tensorflow/tensorflow/contrib/verbs/
Drdma.cc946 int64_t prev_val = *(int64_t*)DMAHelper::base(&in) - 1; in ValidateChecksum()
1078 (void*)DMAHelper::base(&in), in.TotalBytes()) != nullptr)) { in RecvHandler()
1091 CountCopies(rm_.name_, (void*)DMAHelper::base(&in), in RecvHandler()
1092 (void*)DMAHelper::base(&copy), in.TotalBytes(), true); in RecvHandler()
1153 memcpy(DMAHelper::base(tensor_), DMAHelper::base(&in), in.TotalBytes()); in Clone()
1203 src_buffer_ = const_cast<TensorBuffer*>(DMAHelper::buffer(&in)); in SendContent()
1540 rdma_addr_ = DMAHelper::base(result_tensor_); in AllocateTensors()
1548 rdma_addr_ = DMAHelper::base(proxy_tensor_); in AllocateTensors()
1629 CountCopies(key_, (void*)DMAHelper::base(proxy_tensor_), in RecvTensorContent()
1630 (void*)DMAHelper::base(result_tensor_), in RecvTensorContent()
/external/tensorflow/tensorflow/core/common_runtime/gpu/
Dgpu_util.cc101 if (!DMAHelper::CanUseDMA(&src)) { in PrepareCopy()
109 return const_cast<void*>(DMAHelper::base(src)); in GetBase()
112 void* GetBase(Tensor* dst) { return DMAHelper::base(dst); } in GetBase()
Dgpu_util.h74 T* ptr = reinterpret_cast<T*>(const_cast<void*>(DMAHelper::base(&t))); in AsDeviceMemory()
/external/tensorflow/tensorflow/contrib/mpi/
Dmpi_rendezvous_mgr.cc127 void* data = const_cast<void*>(DMAHelper::base(&tr.tensor())); in RecvFromRemoteAsync()
183 void* temp = const_cast<void*>(DMAHelper::base(&val)); in AddRequest()
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Dgrpc_tensor_coding.cc225 const TensorBuffer* buf = DMAHelper::buffer(&val); in EncodeTensorToByteBuffer()
Dgrpc_worker_service.cc543 const char* head = reinterpret_cast<const char*>(DMAHelper::base(tensor)); in SetTensorInRecvBufResp()
573 if (!DMAHelper::CanUseDMA(hook->prod_value)) { in RecvBufAsync()
/external/tensorflow/tensorflow/core/kernels/data/
Doptional_ops.cc34 if (DMAHelper::CanUseDMA(&t) || t.dtype() == DT_VARIANT) { in OptionalDeviceCopy()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_device_context.cc147 static_cast<const char*>(DMAHelper::base(cpu_tensor)), in CopyCPUTensorToDevice()

12