| /external/tensorflow/tensorflow/core/common_runtime/gpu/ |
| D | gpu_util_platform_specific.cc | 26 void GPUDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor, in CopyCPUTensorToDevice() 37 Device* device, Tensor* cpu_tensor, in CopyDeviceTensorToCPU()
|
| D | gpu_device_test.cc | 97 void InitCPUTensor(Tensor* cpu_tensor, int num_elements, float value) { in InitCPUTensor() 104 void CopyCPUToGPU(Tensor* cpu_tensor, Tensor* gpu_tensor, Device* device, in CopyCPUToGPU() 110 void CopyGPUToCPU(Tensor* gpu_tensor, Tensor* cpu_tensor, Device* device, in CopyGPUToCPU() 421 Tensor cpu_tensor(cpu_allocator(), DT_FLOAT, TensorShape({kNumElements})); in TEST_F() local
|
| D | gpu_util.cc | 256 const Tensor* gpu_tensor, Tensor* cpu_tensor, in CopyGPUTensorToCPU() 299 void GPUUtil::CopyCPUTensorToGPU(const Tensor* cpu_tensor, in CopyCPUTensorToGPU()
|
| /external/tensorflow/tensorflow/core/tpu/ |
| D | virtual_device.cc | 38 void VirtualDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor, in CopyCPUTensorToDevice() 50 Tensor* cpu_tensor, in CopyDeviceTensorToCPU()
|
| /external/tensorflow/tensorflow/core/framework/ |
| D | device_base.cc | 40 Tensor* cpu_tensor) { in CopyDeviceTensorToCPUSync() 52 Status DeviceContext::CopyCPUTensorToDeviceSync(const Tensor* cpu_tensor, in CopyCPUTensorToDeviceSync()
|
| D | device_base.h | 100 Tensor* cpu_tensor, StatusCallback done) { in CopyDeviceTensorToCPU()
|
| /external/tensorflow/tensorflow/compiler/jit/ |
| D | xla_device_context.cc | 113 void XlaDeviceContext::CopyCPUTensorToDevice(const Tensor* cpu_tensor, in CopyCPUTensorToDevice() 209 Device* device, Tensor* cpu_tensor, in CopyDeviceTensorToCPU()
|
| /external/tensorflow/tensorflow/core/distributed_runtime/ |
| D | collective_rma_distributed.cc | 68 Tensor* cpu_tensor) { in PopulateTensorFromExtra() 139 Tensor* cpu_tensor = new Tensor(cpu_dev->GetAllocator(cpu_attr), in RecvFromPeer() local
|
| /external/tensorflow/tensorflow/core/kernels/ |
| D | stack.cc | 257 Tensor* cpu_tensor = in ComputeAsync() local 304 Tensor* cpu_tensor = &value.tensor; in ComputeAsync() local
|
| D | dynamic_partition_op_gpu.cu.cc | 287 Tensor cpu_tensor; in ComputeAsync() local
|
| D | collective_nccl_test.cc | 271 Tensor cpu_tensor(dtype, shape); in InitTensor() local
|
| /external/tensorflow/tensorflow/core/common_runtime/ |
| D | ring_alg.cc | 258 Tensor cpu_tensor(tensor.dtype(), tensor.shape()); in TensorDebugString() local
|
| D | copy_tensor.cc | 248 Tensor* cpu_tensor = in ViaDMA() local
|
| D | permuter_test.cc | 359 Tensor cpu_tensor(dtype, shape); in InitTensor() local
|
| D | process_function_library_runtime_test.cc | 181 Tensor cpu_tensor(device_tensor.dtype(), device_tensor.shape()); in GPUToCPU() local 192 Tensor CPUToGPU(const Tensor& cpu_tensor) { in CPUToGPU()
|
| D | ring_gatherer_test.cc | 436 Tensor cpu_tensor(dtype, shape); in InitTensor() local
|
| D | ring_reducer_test.cc | 462 Tensor cpu_tensor(dtype, shape); in InitTensor() local
|
| D | hierarchical_tree_broadcaster_test.cc | 617 Tensor cpu_tensor(dtype, shape); in InitTensor() local
|
| /external/tensorflow/tensorflow/core/distributed_runtime/rpc/ |
| D | grpc_worker_service.cc | 675 Tensor* cpu_tensor = in RecvBufAsync() local
|