Home
last modified time | relevance | path

Searched refs:DMAHelper (Results 1 – 25 of 31) sorted by relevance

12

/external/tensorflow/tensorflow/core/kernels/
Dscoped_allocator_ops.cc64 << DMAHelper::buffer(backing_tensor) << " base addr " in Compute()
65 << DMAHelper::base(backing_tensor); in Compute()
127 const TensorBuffer* backing_buf = DMAHelper::buffer(&output); in Compute()
133 const TensorBuffer* input_buf = DMAHelper::buffer(&context->input(i)); in Compute()
192 const TensorBuffer* backing_buf = DMAHelper::buffer(&backing_copy); in Compute()
199 << DMAHelper::base(&context->input(i)); in Compute()
207 const TensorBuffer* input_buf = DMAHelper::buffer(&copy); in Compute()
Dscoped_allocator_ops_test.cc185 CHECK_EQ(DMAHelper::base(&input), DMAHelper::base(&output)); in ExecOp()
300 const void* lower_limit = DMAHelper::base(&input); in ExecOp()
311 CHECK_EQ(expected_base, DMAHelper::base(&output)); in ExecOp()
Dserialize_sparse_op.cc298 static_cast<int64*>(DMAHelper::base(&output_indices)); in operator ()()
301 T* output_values_ptr = static_cast<T*>(DMAHelper::base(&output_values)); in operator ()()
Dshape_ops.h152 Tdim dim = *static_cast<const Tdim*>(DMAHelper::base(&dim_t)); in Compute()
/external/tensorflow/tensorflow/core/common_runtime/
Dcollective_rma_local_test.cc89 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
108 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
132 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
153 EXPECT_NE(DMAHelper::base(&source_tensor), DMAHelper::base(&sink_tensor)); in TEST_F()
Dbase_collective_executor.cc103 data_start_(reinterpret_cast<T*>(DMAHelper::base(&output_))), in CollectiveAdapterImpl()
120 DMAHelper::UnsafeSetShape(&output_, new_shape); in Flatten()
126 DMAHelper::UnsafeSetShape(&output_, old_shape_); in ConsumeFinalValue()
161 "base addr ", reinterpret_cast<int64>(DMAHelper::base(&output_)), in DebugString()
168 int64 base_addr = reinterpret_cast<int64>(DMAHelper::base(&t)); in TBounds()
Dcollective_rma_local.cc159 memcpy(DMAHelper::base(dst), DMAHelper::base(src), bytes); in MemCpyAsync()
Dcopy_tensor.cc76 if (!DMAHelper::CanUseDMA(&from)) { in CopyHostToDevice()
151 if (!DMAHelper::CanUseDMA(&from)) { in CopyDeviceToDevice()
313 if (DMAHelper::CanUseDMA(&from)) { in WrappedTensorDeviceCopy()
355 if (!DMAHelper::CanUseDMA(&from)) { in CopyDeviceToHost()
Ddma_helper.h24 class DMAHelper {
Dring_reducer.cc91 (DMAHelper::base(col_ctx_->input) != DMAHelper::base(col_ctx_->output))) { in Run()
Dhierarchical_tree_broadcaster.cc370 (DMAHelper::base(col_ctx_->input) != in RunTree()
371 DMAHelper::base(col_ctx_->output))) { in RunTree()
Dscoped_allocator_mgr_test.cc159 static_cast<const char*>(DMAHelper::base(&backing_tensor_)); in TEST_F()
/external/tensorflow/tensorflow/compiler/tf2xla/
Dliteral_util.cc42 static_cast<const char*>(DMAHelper::base(&host_tensor)), xla_shape); in HostTensorToBorrowingLiteral()
64 static_cast<const char*>(DMAHelper::base(host_tensor)), xla_shape); in HostTensorToMutableBorrowingLiteral()
78 buf_ptrs.emplace_back(static_cast<const char*>(DMAHelper::base(tensor))); in HostTensorsToBorrowingLiteralTuple()
106 void* dst_ptr = DMAHelper::base(host_tensor); in CopyLiteralToHostTensor()
/external/tensorflow/tensorflow/core/data/
Dcompression_utils.cc34 total_size += DMAHelper::buffer(&component)->size(); in CompressElement()
55 const TensorBuffer* buffer = DMAHelper::buffer(&component); in CompressElement()
96 TensorBuffer* buffer = DMAHelper::buffer(&out->back()); in UncompressElement()
/external/tensorflow/tensorflow/core/common_runtime/device/
Ddevice_event_mgr_test.cc386 se::DeviceMemoryBase gpu_dst_ptr0(DMAHelper::base(&gpu_inputs_[0]), in DoAddChain()
389 &gpu_dst_ptr0, DMAHelper::base(&host_inputs_[0]), src_bytes); in DoAddChain()
390 se::DeviceMemoryBase gpu_dst_ptr1(DMAHelper::base(&gpu_inputs_[1]), in DoAddChain()
393 &gpu_dst_ptr1, DMAHelper::base(&host_inputs_[1]), src_bytes); in DoAddChain()
420 se::DeviceMemoryBase gpu_src_ptr(DMAHelper::base(ctx->mutable_output(0)), in DoAddChain()
422 gpu_helper_->d2h_stream()->ThenMemcpy(DMAHelper::base(&host_outputs_[0]), in DoAddChain()
/external/tensorflow/tensorflow/core/distributed_runtime/
Dcollective_rma_distributed.cc50 req_.set_buf_ptr(reinterpret_cast<int64>(DMAHelper::base(to_tensor))); in RecvBufCall()
69 char* head = reinterpret_cast<char*>(DMAHelper::base(cpu_tensor)); in PopulateTensorFromExtra()
Dbase_rendezvous_mgr.cc240 if (!DMAHelper::CanUseDMA(&in) && in.dtype() != DT_VARIANT && in SameWorkerRecvDone()
/external/tensorflow/tensorflow/core/common_runtime/gpu/
Dgpu_util.cc99 if (!DMAHelper::CanUseDMA(&src)) { in PrepareCopy()
107 return const_cast<void*>(DMAHelper::base(src)); in GetBase()
110 void* GetBase(Tensor* dst) { return DMAHelper::base(dst); } in GetBase()
Dgpu_util.h74 T* ptr = reinterpret_cast<T*>(const_cast<void*>(DMAHelper::base(&t))); in AsDeviceMemory()
/external/tensorflow/tensorflow/core/kernels/data/
Doptional_ops.cc37 if (DMAHelper::CanUseDMA(&t) || t.dtype() == DT_VARIANT) { in OptionalDeviceCopy()
/external/tensorflow/tensorflow/core/distributed_runtime/rpc/
Dgrpc_tensor_coding.cc237 const TensorBuffer* buf = DMAHelper::buffer(&val); in EncodeTensorToByteBuffer()
Dgrpc_worker_service.cc574 const char* head = reinterpret_cast<const char*>(DMAHelper::base(tensor)); in SetTensorInRecvBufResp()
646 if (!DMAHelper::CanUseDMA(hook->prod_value)) { in RecvBufAsync()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_device_context.cc155 static_cast<const char*>(DMAHelper::base(cpu_tensor)), in CopyCPUTensorToDevice()
Dxla_tpu_device.cc154 TF_RET_CHECK(DMAHelper::CanUseDMA(input)); in TpuDeviceToDeviceCopy()
/external/tensorflow/tensorflow/core/kernels/data/experimental/
Dsnapshot_util.cc233 auto tensor_buffer = DMAHelper::buffer(&tensor); in WriteTensors()
798 TensorBuffer* buffer = DMAHelper::buffer(&simple_tensor); in SnappyUncompress()

12