/external/tensorflow/tensorflow/core/kernels/ |
D | scoped_allocator_ops.cc | 64 << DMAHelper::buffer(backing_tensor) << " base addr " in Compute() 65 << DMAHelper::base(backing_tensor); in Compute() 127 const TensorBuffer* backing_buf = DMAHelper::buffer(&output); in Compute() 133 const TensorBuffer* input_buf = DMAHelper::buffer(&context->input(i)); in Compute() 192 const TensorBuffer* backing_buf = DMAHelper::buffer(&backing_copy); in Compute() 199 << DMAHelper::base(&context->input(i)); in Compute() 207 const TensorBuffer* input_buf = DMAHelper::buffer(©); in Compute()
|
D | scoped_allocator_ops_test.cc | 185 CHECK_EQ(DMAHelper::base(&input), DMAHelper::base(&output)); in ExecOp() 300 const void* lower_limit = DMAHelper::base(&input); in ExecOp() 311 CHECK_EQ(expected_base, DMAHelper::base(&output)); in ExecOp()
|
D | serialize_sparse_op.cc | 298 static_cast<int64*>(DMAHelper::base(&output_indices)); in operator ()() 301 T* output_values_ptr = static_cast<T*>(DMAHelper::base(&output_values)); in operator ()()
|
D | shape_ops.h | 152 Tdim dim = *static_cast<const Tdim*>(DMAHelper::base(&dim_t)); in Compute()
|
/external/tensorflow/tensorflow/core/common_runtime/ |
D | collective_rma_local_test.cc | 89 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F() 108 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F() 132 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F() 153 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
|
D | base_collective_executor.cc | 103 data_start_(reinterpret_cast<T*>(DMAHelper::base(&output_))), in CollectiveAdapterImpl() 120 DMAHelper::UnsafeSetShape(&output_, new_shape); in Flatten() 126 DMAHelper::UnsafeSetShape(&output_, old_shape_); in ConsumeFinalValue() 161 "base addr ", reinterpret_cast<int64>(DMAHelper::base(&output_)), in DebugString() 168 int64 base_addr = reinterpret_cast<int64>(DMAHelper::base(&t)); in TBounds()
|
D | collective_rma_local.cc | 159 memcpy(DMAHelper::base(dst), DMAHelper::base(src), bytes); in MemCpyAsync()
|
D | copy_tensor.cc | 76 if (!DMAHelper::CanUseDMA(&from)) { in CopyHostToDevice() 151 if (!DMAHelper::CanUseDMA(&from)) { in CopyDeviceToDevice() 313 if (DMAHelper::CanUseDMA(&from)) { in WrappedTensorDeviceCopy() 355 if (!DMAHelper::CanUseDMA(&from)) { in CopyDeviceToHost()
|
D | dma_helper.h | 24 class DMAHelper {
|
D | ring_reducer.cc | 91 (DMAHelper::base(col_ctx_->input) != DMAHelper::base(col_ctx_->output))) { in Run()
|
D | hierarchical_tree_broadcaster.cc | 370 (DMAHelper::base(col_ctx_->input) != in RunTree() 371 DMAHelper::base(col_ctx_->output))) { in RunTree()
|
D | scoped_allocator_mgr_test.cc | 159 static_cast<const char*>(DMAHelper::base(&backing_tensor_)); in TEST_F()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | literal_util.cc | 42 static_cast<const char*>(DMAHelper::base(&host_tensor)), xla_shape); in HostTensorToBorrowingLiteral() 64 static_cast<const char*>(DMAHelper::base(host_tensor)), xla_shape); in HostTensorToMutableBorrowingLiteral() 78 buf_ptrs.emplace_back(static_cast<const char*>(DMAHelper::base(tensor))); in HostTensorsToBorrowingLiteralTuple() 106 void* dst_ptr = DMAHelper::base(host_tensor); in CopyLiteralToHostTensor()
|
/external/tensorflow/tensorflow/core/data/ |
D | compression_utils.cc | 34 total_size += DMAHelper::buffer(&component)->size(); in CompressElement() 55 const TensorBuffer* buffer = DMAHelper::buffer(&component); in CompressElement() 96 TensorBuffer* buffer = DMAHelper::buffer(&out->back()); in UncompressElement()
|
/external/tensorflow/tensorflow/core/common_runtime/device/ |
D | device_event_mgr_test.cc | 386 se::DeviceMemoryBase gpu_dst_ptr0(DMAHelper::base(&gpu_inputs_[0]), in DoAddChain() 389 &gpu_dst_ptr0, DMAHelper::base(&host_inputs_[0]), src_bytes); in DoAddChain() 390 se::DeviceMemoryBase gpu_dst_ptr1(DMAHelper::base(&gpu_inputs_[1]), in DoAddChain() 393 &gpu_dst_ptr1, DMAHelper::base(&host_inputs_[1]), src_bytes); in DoAddChain() 420 se::DeviceMemoryBase gpu_src_ptr(DMAHelper::base(ctx->mutable_output(0)), in DoAddChain() 422 gpu_helper_->d2h_stream()->ThenMemcpy(DMAHelper::base(&host_outputs_[0]), in DoAddChain()
|
/external/tensorflow/tensorflow/core/distributed_runtime/ |
D | collective_rma_distributed.cc | 50 req_.set_buf_ptr(reinterpret_cast<int64>(DMAHelper::base(to_tensor))); in RecvBufCall() 69 char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor)); in PopulateTensorFromExtra()
|
D | base_rendezvous_mgr.cc | 240 if (!DMAHelper::CanUseDMA(&in) && in.dtype() != DT_VARIANT && in SameWorkerRecvDone()
|
/external/tensorflow/tensorflow/core/common_runtime/gpu/ |
D | gpu_util.cc | 99 if (!DMAHelper::CanUseDMA(&src)) { in PrepareCopy() 107 return const_cast<void*>(DMAHelper::base(src)); in GetBase() 110 void* GetBase(Tensor* dst) { return DMAHelper::base(dst); } in GetBase()
|
D | gpu_util.h | 74 T* ptr = reinterpret_cast<T*>(const_cast<void*>(DMAHelper::base(&t))); in AsDeviceMemory()
|
/external/tensorflow/tensorflow/core/kernels/data/ |
D | optional_ops.cc | 37 if (DMAHelper::CanUseDMA(&t) || t.dtype() == DT_VARIANT) { in OptionalDeviceCopy()
|
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/ |
D | grpc_tensor_coding.cc | 237 const TensorBuffer* buf = DMAHelper::buffer(&val); in EncodeTensorToByteBuffer()
|
D | grpc_worker_service.cc | 574 const char* head = reinterpret_cast<const char*>(DMAHelper::base(tensor)); in SetTensorInRecvBufResp() 646 if (!DMAHelper::CanUseDMA(hook->prod_value)) { in RecvBufAsync()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_device_context.cc | 155 static_cast<const char*>(DMAHelper::base(cpu_tensor)), in CopyCPUTensorToDevice()
|
D | xla_tpu_device.cc | 154 TF_RET_CHECK(DMAHelper::CanUseDMA(input)); in TpuDeviceToDeviceCopy()
|
/external/tensorflow/tensorflow/core/kernels/data/experimental/ |
D | snapshot_util.cc | 233 auto tensor_buffer = DMAHelper::buffer(&tensor); in WriteTensors() 798 TensorBuffer* buffer = DMAHelper::buffer(&simple_tensor); in SnappyUncompress()
|