| /external/tensorflow/tensorflow/compiler/xla/service/ |
| D | transfer_manager.h | 82 se::Stream* stream, const ShapedBuffer& device_buffer) { in TransferLiteralFromDevice() 90 const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() 112 const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() 136 const ShapedBuffer& device_buffer) { in TransferLiteralToDevice() 162 const ShapedBuffer& device_buffer) { in TransferLiteralToDeviceAsync() 273 se::StreamExecutor* executor, const ShapedBuffer& device_buffer) const { in CanShapedBufferBeAccessedNow() 280 const se::DeviceMemoryBase& device_buffer) const { in CanBufferBeAccessedNow()
|
| D | transfer_manager.cc | 52 se::Stream* stream, const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() 79 se::Stream* stream, const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() 101 const ShapedBuffer& device_buffer, in TransferLiteralToDevice() 204 ShapedBuffer* device_buffer, in ReadDynamicShapes() 289 se::Stream* stream, const ShapedBuffer& device_buffer) { in WriteTupleIndexTables() 295 se::Stream* stream, const ShapedBuffer& device_buffer) { in WriteTupleIndexTablesAsync() 324 se::Stream* stream, const ShapedBuffer& device_buffer) { in WriteRootTupleIndexTable()
|
| D | generic_transfer_manager.cc | 62 se::Stream* stream, const ShapedBuffer& device_buffer, in TransferLiteralFromDevice() 98 const ShapedBuffer& device_buffer, in TransferLiteralToDeviceAsync()
|
| /external/tensorflow/tensorflow/compiler/xla/tests/ |
| D | transfer_manager_test.cc | 74 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 90 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 112 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 130 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 148 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 166 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 182 auto device_buffer = AllocateDeviceBuffer(shape); in XLA_TEST_F() local 201 auto device_buffer = AllocateDeviceBuffer(ondevice_shape); in XLA_TEST_F() local 222 auto device_buffer = AllocateDeviceBuffer(literal.shape()); in XLA_TEST_F() local 236 auto device_buffer = AllocateDeviceBuffer(literal.shape()); in XLA_TEST_F() local [all …]
|
| /external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
| D | cpu_transfer_manager.h | 49 const ShapedBuffer& device_buffer) const override { in CanShapedBufferBeAccessedNow() 55 const se::DeviceMemoryBase& device_buffer) const override { in CanBufferBeAccessedNow()
|
| D | cpu_transfer_manager.cc | 58 ShapedBuffer* device_buffer, in ReadDynamicShapes()
|
| D | cpu_xfeed.cc | 275 ShapedBuffer* device_buffer, Shape* device_shape, in ReadDynamicShapesOnCpu()
|
| /external/tensorflow/tensorflow/compiler/xla/pjrt/ |
| D | tfrt_cpu_pjrt_client.cc | 379 auto device_buffer = MaybeOwningCpuMemory::AllocateShared(byte_size); in AllocateDestinationBuffer() local 392 auto device_buffer = MaybeOwningCpuMemory::AllocateShared(byte_size); in AllocateDestinationBuffer() local 455 auto device_buffer = std::make_shared<MaybeOwningCpuMemory>( in BufferFromHostBuffer() local 460 auto device_buffer = MaybeOwningCpuMemory::AllocateShared(byte_size); in BufferFromHostBuffer() local 550 TfrtCpuBuffer::ScopedHold device_buffer( in BufferFromHostLiteral() local 560 TfrtCpuBuffer::ScopedHold device_buffer(movable_device_buffer); in BufferFromHostLiteral() local 571 TfrtCpuBuffer::ScopedHold device_buffer( in BufferFromHostLiteral() local 581 TfrtCpuBuffer::ScopedHold device_buffer(movable_device_buffer); in BufferFromHostLiteral() local 763 std::shared_ptr<TrackedTfrtCpuDeviceBuffer> device_buffer; in Release() local 855 void TfrtCpuBuffer::ConfirmDonation(TrackedTfrtCpuDeviceBuffer* device_buffer) { in ConfirmDonation() [all …]
|
| D | pjrt_stream_executor_client.cc | 321 void RecordUsage(PjRtStreamExecutorBuffer::ScopedHold device_buffer, in RecordUsage() 481 PjRtStreamExecutorBuffer::ScopedHold device_buffer, in AddDestinationBufferSynchronization() 587 ScopedHold device_buffer(this, ScopedHold::kUsage); in logical_on_device_shape() local 756 auto device_buffer = std::make_shared<TrackedDeviceBuffer>( in BufferFromHostBuffer() local 772 PjRtStreamExecutorBuffer::ScopedHold device_buffer( in BufferFromHostBuffer() local 832 PjRtStreamExecutorBuffer::ScopedHold device_buffer( in BufferFromHostBuffer() local 948 PjRtStreamExecutorBuffer::ScopedHold device_buffer( in BufferFromHostLiteral() local 962 PjRtStreamExecutorBuffer::ScopedHold device_buffer(movable_device_buffer); in BufferFromHostLiteral() local 1080 auto device_buffer = std::make_shared<TrackedDeviceBuffer>( in CreateViewOfDeviceBuffer() local 1116 Shape on_device_shape, std::shared_ptr<TrackedDeviceBuffer> device_buffer, in PjRtStreamExecutorBuffer() [all …]
|
| D | tracked_device_buffer_test.cc | 105 std::shared_ptr<TrackedDeviceBuffer> device_buffer = in TEST() local
|
| /external/tensorflow/tensorflow/stream_executor/tpu/ |
| D | tpu_transfer_manager.cc | 70 const xla::ShapedBuffer& device_buffer, in TransferLiteralToDeviceAsync() 192 stream_executor::Stream* stream, const xla::ShapedBuffer& device_buffer, in TransferLiteralFromDevice() 327 xla::ShapedBuffer* device_buffer, in ReadDynamicShapes()
|
| /external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/ |
| D | tpu_client.cc | 267 Shape on_host_shape, std::shared_ptr<TpuSharedBuffer> device_buffer, in PyTpuBuffer() 431 std::shared_ptr<TpuSharedBuffer> device_buffer = DeviceBuffer(); in BlockHostUntilReady() local 493 auto device_buffer = std::make_shared<TpuSharedBuffer>( in CreateBuffer() local
|
| /external/deqp-deps/amber/src/dawn/ |
| D | engine_dawn.cc | 369 auto& device_buffer = compute_pipeline.buffers[i]; in MapDeviceBufferToHostBuffer() local
|