/external/tensorflow/tensorflow/core/common_runtime/ |
D | collective_rma_local_test.cc | 82 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F() 101 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F() 125 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F() 146 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
|
D | base_collective_executor.cc | 92 data_start_(reinterpret_cast<T*>(DMAHelper::base(&output_))), in CollectiveAdapterImpl() 109 DMAHelper::UnsafeSetShape(&output_, new_shape); in Flatten() 115 DMAHelper::UnsafeSetShape(&output_, old_shape_); in ConsumeFinalValue() 148 "base addr ", reinterpret_cast<int64>(DMAHelper::base(&output_)), in DebugString() 155 int64 base_addr = reinterpret_cast<int64>(DMAHelper::base(&t)); in TBounds()
|
D | collective_rma_local.cc | 124 memcpy(DMAHelper::base(dst), DMAHelper::base(src), bytes); in MemCpyAsync()
|
D | copy_tensor.cc | 74 if (!DMAHelper::CanUseDMA(&from)) { in CopyHostToDevice() 139 if (!DMAHelper::CanUseDMA(&from)) { in CopyDeviceToHost() 203 if (!DMAHelper::CanUseDMA(&from)) { in CopyDeviceToDevice() 369 } else if (DMAHelper::CanUseDMA(&from)) { in WrappedTensorDeviceCopy()
|
D | dma_helper.h | 24 class DMAHelper {
|
D | ring_reducer.cc | 85 (DMAHelper::base(col_ctx_->input) != DMAHelper::base(col_ctx_->output))) { in Run()
|
D | hierarchical_tree_broadcaster.cc | 363 (DMAHelper::base(col_ctx_->input) != in RunTree() 364 DMAHelper::base(col_ctx_->output))) { in RunTree()
|
D | scoped_allocator_mgr_test.cc | 159 static_cast<const char*>(DMAHelper::base(&backing_tensor_)); in TEST_F()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | scoped_allocator_ops.cc | 63 << num_elements_ << " buffer " << DMAHelper::buffer(backing_tensor) in Compute() 64 << " base addr " << DMAHelper::base(backing_tensor); in Compute() 126 const TensorBuffer* backing_buf = DMAHelper::buffer(&output); in Compute() 132 const TensorBuffer* input_buf = DMAHelper::buffer(&context->input(i)); in Compute() 191 const TensorBuffer* backing_buf = DMAHelper::buffer(&backing_copy); in Compute() 198 << DMAHelper::base(&context->input(i)); in Compute() 206 const TensorBuffer* input_buf = DMAHelper::buffer(©); in Compute()
|
D | scoped_allocator_ops_test.cc | 184 CHECK_EQ(DMAHelper::base(&input), DMAHelper::base(&output)); in ExecOp() 299 const void* lower_limit = DMAHelper::base(&input); in ExecOp() 310 CHECK_EQ(expected_base, DMAHelper::base(&output)); in ExecOp()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | literal_util.cc | 31 static_cast<const char*>(DMAHelper::base(&host_tensor)), xla_shape); in HostTensorToBorrowingLiteral() 53 static_cast<const char*>(DMAHelper::base(host_tensor)), xla_shape); in HostTensorToMutableBorrowingLiteral() 67 buf_ptrs.emplace_back(static_cast<const char*>(DMAHelper::base(tensor))); in HostTensorsToBorrowingLiteralTuple() 95 void* dst_ptr = DMAHelper::base(host_tensor); in CopyLiteralToHostTensor()
|
/external/tensorflow/tensorflow/core/common_runtime/sycl/ |
D | sycl_device_context.cc | 31 const void *src_ptr = DMAHelper::base(cpu_tensor); in CopyCPUTensorToDevice() 32 void *dst_ptr = DMAHelper::base(device_tensor); in CopyCPUTensorToDevice() 109 const void *src_ptr = DMAHelper::base(device_tensor); in CopyDeviceTensorToCPU() 110 void *dst_ptr = DMAHelper::base(cpu_tensor); in CopyDeviceTensorToCPU()
|
D | sycl_util.h | 30 inline void const* GetBase(const Tensor* src) { return DMAHelper::base(src); } in GetBase() 31 inline void* GetBase(Tensor* dst) { return DMAHelper::base(dst); } in GetBase()
|
/external/tensorflow/tensorflow/contrib/gdr/ |
D | gdr_memory_manager.cc | 370 const TensorBuffer* buffer = DMAHelper::buffer(&tensor); in TransportOptionsFromTensor() 383 buffer = DMAHelper::buffer(copy); in TransportOptionsFromTensor() 414 std::memcpy(buffer->data(), DMAHelper::buffer(&tensor)->data(), in TransportOptionsFromTensor() 453 const TensorBuffer* buffer = DMAHelper::buffer(tensor); in TensorFromTransportOptions() 466 buffer = DMAHelper::buffer(copy); in TensorFromTransportOptions() 489 << DMAHelper::buffer(tensor)->size() << " took " in TensorFromTransportOptions() 499 std::memcpy(DMAHelper::buffer(tensor)->data(), in TensorFromTransportOptions() 500 DMAHelper::buffer(copy)->data(), in TensorFromTransportOptions() 501 DMAHelper::buffer(copy)->size()); in TensorFromTransportOptions() 578 const void* addr = DMAHelper::buffer(tensor)->data(); in FindMemoryRegion()
|
D | gdr_worker.cc | 93 DMAHelper::CanUseDMA(&val) && dma_ok) { in GrpcRecvTensorAsync() 168 if (!DMAHelper::CanUseDMA(hook->prod_value)) { in RecvBufAsync()
|
D | gdr_collective_executor_mgr.cc | 49 req_.set_buf_ptr(reinterpret_cast<int64>(DMAHelper::base(to_tensor))); in RecvBufCall()
|
/external/tensorflow/tensorflow/core/distributed_runtime/ |
D | collective_rma_distributed.cc | 48 req_.set_buf_ptr(reinterpret_cast<int64>(DMAHelper::base(to_tensor))); in RecvBufCall() 66 char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor)); in PopulateTensorFromExtra()
|
/external/tensorflow/tensorflow/contrib/verbs/ |
D | rdma.cc | 946 int64_t prev_val = *(int64_t*)DMAHelper::base(&in) - 1; in ValidateChecksum() 1078 (void*)DMAHelper::base(&in), in.TotalBytes()) != nullptr)) { in RecvHandler() 1091 CountCopies(rm_.name_, (void*)DMAHelper::base(&in), in RecvHandler() 1092 (void*)DMAHelper::base(©), in.TotalBytes(), true); in RecvHandler() 1153 memcpy(DMAHelper::base(tensor_), DMAHelper::base(&in), in.TotalBytes()); in Clone() 1203 src_buffer_ = const_cast<TensorBuffer*>(DMAHelper::buffer(&in)); in SendContent() 1540 rdma_addr_ = DMAHelper::base(result_tensor_); in AllocateTensors() 1548 rdma_addr_ = DMAHelper::base(proxy_tensor_); in AllocateTensors() 1629 CountCopies(key_, (void*)DMAHelper::base(proxy_tensor_), in RecvTensorContent() 1630 (void*)DMAHelper::base(result_tensor_), in RecvTensorContent()
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_util.cc | 101 if (!DMAHelper::CanUseDMA(&src)) { in PrepareCopy() 109 return const_cast<void*>(DMAHelper::base(src)); in GetBase() 112 void* GetBase(Tensor* dst) { return DMAHelper::base(dst); } in GetBase()
|
D | gpu_util.h | 74 T* ptr = reinterpret_cast<T*>(const_cast<void*>(DMAHelper::base(&t))); in AsDeviceMemory()
|
/external/tensorflow/tensorflow/contrib/mpi/ |
D | mpi_rendezvous_mgr.cc | 127 void* data = const_cast<void*>(DMAHelper::base(&tr.tensor())); in RecvFromRemoteAsync() 183 void* temp = const_cast<void*>(DMAHelper::base(&val)); in AddRequest()
|
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/ |
D | grpc_tensor_coding.cc | 225 const TensorBuffer* buf = DMAHelper::buffer(&val); in EncodeTensorToByteBuffer()
|
D | grpc_worker_service.cc | 543 const char* head = reinterpret_cast<const char*>(DMAHelper::base(tensor)); in SetTensorInRecvBufResp() 573 if (!DMAHelper::CanUseDMA(hook->prod_value)) { in RecvBufAsync()
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | optional_ops.cc | 34 if (DMAHelper::CanUseDMA(&t) || t.dtype() == DT_VARIANT) { in OptionalDeviceCopy()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_device_context.cc | 147 static_cast<const char*>(DMAHelper::base(cpu_tensor)), in CopyCPUTensorToDevice()
|