/external/tensorflow/tensorflow/compiler/xla/service/ |
D | allocation_tracker.cc | 35 ScopedShapedBuffer shaped_buffer, const string& tag) { in Register() argument 39 replicated_buffers.emplace_back(std::move(shaped_buffer)); in Register() 69 for (auto& shaped_buffer : replicated_buffers) { in RegisterInternal() local 72 shaped_buffer.on_device_shape(), in RegisterInternal() 79 AddAllocationOrIncrementRefCount(shaped_buffer.buffer(index), in RegisterInternal() 80 shaped_buffer.device_ordinal()); in RegisterInternal() 87 ReleaseIfScopedShapedBuffer(std::move(shaped_buffer)))); in RegisterInternal() 102 for (const auto& shaped_buffer : replicated_buffers) { in Unregister() local 105 shaped_buffer->on_device_shape(), in Unregister() 110 TF_RETURN_IF_ERROR(DecrementRefCount(shaped_buffer->buffer(index), in Unregister() [all …]
|
D | transfer_manager.cc | 172 ShapedBuffer shaped_buffer(on_device_shape, in TransferArrayToDeviceAsync() local 174 shaped_buffer.set_buffer(dest, /*index=*/{}); in TransferArrayToDeviceAsync() 175 return TransferLiteralToDevice(stream, literal, shaped_buffer, in TransferArrayToDeviceAsync() 196 ShapedBuffer shaped_buffer(shape, stream->parent()->device_ordinal()); in TransferArrayFromDevice() local 197 shaped_buffer.set_buffer(source, /*index=*/{}); in TransferArrayFromDevice() 198 return TransferLiteralFromDevice(stream, shaped_buffer, literal, in TransferArrayFromDevice() 398 ScopedShapedBuffer shaped_buffer(std::move(on_device_shape), allocator, in AllocateScopedShapedBuffer() local 403 for (auto& pair : shaped_buffer.buffers()) { in AllocateScopedShapedBuffer() 407 ShapeUtil::GetSubshape(shaped_buffer.on_device_shape(), index); in AllocateScopedShapedBuffer() 409 allocator->Allocate(shaped_buffer.device_ordinal(), in AllocateScopedShapedBuffer() [all …]
|
D | shaped_buffer.cc | 129 ScopedShapedBuffer::ScopedShapedBuffer(ShapedBuffer shaped_buffer, in ScopedShapedBuffer() argument 131 : ShapedBuffer(std::move(shaped_buffer)), allocator_(allocator) {} in ScopedShapedBuffer() 152 ShapedBuffer shaped_buffer(static_cast<ShapedBuffer&&>(*this)); in release() local 154 return shaped_buffer; in release()
|
D | executable.cc | 65 ShapedBuffer shaped_buffer(input_shape, device_ordinal); in ToShapedBuffer() local 75 shaped_buffer.set_buffer(index_buffer.second.AsDeviceMemoryBase(), in ToShapedBuffer() 78 return std::move(shaped_buffer); in ToShapedBuffer() 94 const ShapedBuffer& shaped_buffer) { in MakeMaybeOwningDeviceMemoryTree() argument 95 ExecutionInput result(shaped_buffer.on_device_shape()); in MakeMaybeOwningDeviceMemoryTree() 96 shaped_buffer.buffers().ForEachElement( in MakeMaybeOwningDeviceMemoryTree()
|
D | service.cc | 248 const ShapedBuffer* shaped_buffer = replicated_buffers[replica]; in ResolveAndValidateArguments() local 249 replicated_arguments[replica].push_back(shaped_buffer); in ResolveAndValidateArguments() 918 TF_ASSIGN_OR_RETURN(const ShapedBuffer* shaped_buffer, in TransferToClient() 928 return_shape = Shape(shaped_buffer->on_host_shape()); in TransferToClient() 932 shaped_buffer->device_ordinal())); in TransferToClient() 937 stream.get(), *shaped_buffer)); in TransferToClient() 967 ScopedShapedBuffer shaped_buffer, in TransferToServer() 974 stream.get(), literal, shaped_buffer)); in TransferToServer() 975 replicated_buffers.emplace_back(std::move(shaped_buffer)); in TransferToServer()
|
D | shaped_buffer_test.cc | 186 xla::ScopedShapedBuffer shaped_buffer(shape, /*allocator=*/&allocator, in BM_TakeSubTree() local 191 (void)shaped_buffer.TakeSubTree(/*index=*/{fan_out / 2}).release(); in BM_TakeSubTree()
|
D | allocation_tracker.h | 49 StatusOr<GlobalDataHandle> Register(ScopedShapedBuffer shaped_buffer,
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | tracked_device_buffer.cc | 98 ScopedShapedBuffer* shaped_buffer, in FromScopedShapedBuffer() argument 102 shaped_buffer->buffers().begin(); in FromScopedShapedBuffer() 107 shaped_buffer->on_device_shape(), [&](const Shape&, const ShapeIndex&) { in FromScopedShapedBuffer() 108 CHECK(iterator != shaped_buffer->buffers().end()); in FromScopedShapedBuffer() 113 CHECK(iterator == shaped_buffer->buffers().end()); in FromScopedShapedBuffer() 115 shaped_buffer->memory_allocator(), shaped_buffer->device_ordinal(), in FromScopedShapedBuffer() 122 ShapedBuffer shaped_buffer(on_device_shape, device_ordinal_); in AsShapedBuffer() local 124 shaped_buffer.buffers().begin(); in AsShapedBuffer() 126 CHECK(iterator != shaped_buffer.buffers().end()); in AsShapedBuffer() 130 CHECK(iterator == shaped_buffer.buffers().end()); in AsShapedBuffer() [all …]
|
D | tracked_device_buffer_test.cc | 103 ScopedShapedBuffer shaped_buffer, in TEST() 106 TrackedDeviceBuffer::FromScopedShapedBuffer(&shaped_buffer, {}); in TEST()
|
D | BUILD | 83 "//tensorflow/compiler/xla/service:shaped_buffer", 202 "//tensorflow/compiler/xla/service:shaped_buffer", 247 "//tensorflow/compiler/xla/service:shaped_buffer",
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 186 auto shaped_buffer = scoped_buffer->release(); in CreateAndTransfer() local 189 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateAndTransfer() 191 ->InitializeFromShapedBuffer(shaped_buffer, backend->memory_allocator(), in CreateAndTransfer() 209 auto shaped_buffer = scoped_buffer->release(); in CreateUninitialized() local 212 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateUninitialized() 214 ->InitializeFromShapedBuffer(shaped_buffer, backend->memory_allocator(), in CreateUninitialized() 221 const xla::ShapedBuffer& shaped_buffer, const xla::Shape& on_host_shape, in CreateFromBuffer() argument 229 ->InitializeFromShapedBuffer(shaped_buffer, allocator, device_ordinal); in CreateFromBuffer() 235 const xla::ShapedBuffer& shaped_buffer, xla::Backend* backend, in CreateFromBuffer() argument 237 return CreateFromBuffer(shaped_buffer, shaped_buffer.on_host_shape(), in CreateFromBuffer() [all …]
|
D | xrt_state.h | 97 static Status CreateFromBuffer(const xla::ShapedBuffer& shaped_buffer, 105 static Status CreateFromBuffer(const xla::ShapedBuffer& shaped_buffer, 243 void InitializeFromShapedBuffer(const xla::ShapedBuffer& shaped_buffer,
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.h | 61 const xla::ShapedBuffer& shaped_buffer() const { in shaped_buffer() function 65 xla::ShapedBuffer& shaped_buffer() { in shaped_buffer() function 70 void set_shaped_buffer(xla::ScopedShapedBuffer shaped_buffer) { in set_shaped_buffer() argument 71 shaped_buffer_ = std::move(shaped_buffer); in set_shaped_buffer()
|
D | xla_tensor.cc | 35 return xla_tensor->shaped_buffer().root_buffer(); in DeviceMemoryFromTensor() 50 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer() local 53 for (auto& index_to_buffer : shaped_buffer.buffers()) { in AllocateShapedBuffer() 66 VLOG(4) << shaped_buffer.ToString(); in AllocateShapedBuffer() 68 set_shaped_buffer(std::move(shaped_buffer)); in AllocateShapedBuffer()
|
D | xla_tpu_device.cc | 88 xla_tensor->shaped_buffer().on_device_shape(); in TpuPaddedShapeFn() 211 << " input buffers: " << xla_input->shaped_buffer().ToString() in TpuDeviceToDeviceCopy() 212 << " output buffers: " << xla_output->shaped_buffer().ToString(); in TpuDeviceToDeviceCopy() 221 dst_compute_stream->parent(), xla_output->shaped_buffer())) { in TpuDeviceToDeviceCopy() 226 if (xla_output->shaped_buffer().on_device_shape().IsTuple()) { in TpuDeviceToDeviceCopy() 232 for (const auto& leaf : xla_input->shaped_buffer().buffers().leaves()) { in TpuDeviceToDeviceCopy() 236 xla_output->shaped_buffer().buffer(index); in TpuDeviceToDeviceCopy() 246 if (xla_output->shaped_buffer().on_device_shape().IsTuple()) { in TpuDeviceToDeviceCopy() 250 xla_output->shaped_buffer())); in TpuDeviceToDeviceCopy()
|
D | xla_device_context.cc | 160 << xla_tensor->shaped_buffer().ToString(); in CopyCPUTensorToDevice() 163 stream_->parent(), xla_tensor->shaped_buffer())) { in CopyCPUTensorToDevice() 170 host_to_device_stream_.get(), literal, xla_tensor->shaped_buffer())); in CopyCPUTensorToDevice() 249 xla_tensor->shaped_buffer().on_host_shape()), in CopyDeviceTensorToCPU() 258 device_to_host_stream.get(), xla_tensor->shaped_buffer(), literal, in CopyDeviceTensorToCPU() 263 << xla_tensor->shaped_buffer().ToString(); in CopyDeviceTensorToCPU()
|
D | xla_device.cc | 74 const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer(); in DefaultPaddedShapeFn() local 75 *shape = shaped_buffer.on_device_shape(); in DefaultPaddedShapeFn()
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | tpu_execute_op.cc | 174 xla::ShapedBuffer shaped_buffer(std::move(host_shape), buffers.shape(), in ToShapedBuffer() local 176 shaped_buffer.set_buffers(buffers.Map<se::DeviceMemoryBase>( in ToShapedBuffer() 181 return shaped_buffer; in ToShapedBuffer() 231 const xla::Shape& xla_shape = xla_tensor->shaped_buffer().on_host_shape(); in BuildComputationInputs() 332 &xla_tensor->shaped_buffer()); in BuildComputationInputs() 467 xla::ScopedShapedBuffer shaped_buffer(device_shape, allocator, in AllocateOutputTensors() local 469 shaped_buffer.buffers().ForEachMutableElement( in AllocateOutputTensors() 480 xla_tensor->set_shaped_buffer(std::move(shaped_buffer)); in AllocateOutputTensors() 690 auto shaped_buffer = input_buffers->ToShapedBuffer(std::move(host_shape), in DoWork() local 693 shaped_buffer)) { in DoWork() [all …]
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_execute_op.cc | 252 TF_ASSIGN_OR_RETURN(xla::ShapedBuffer shaped_buffer, in UpdateDynamicInputs() 261 transfer_manager->WriteTupleIndexTablesAsync(stream, shaped_buffer)); in UpdateDynamicInputs() 271 xla::ScopedShapedBuffer* shaped_buffer = run_result.MutableResult(); in CreateOutputTuple() local 272 if (shaped_buffer->on_device_shape().is_dynamic()) { in CreateOutputTuple() 275 xla::Shape output_device_shape = shaped_buffer->on_device_shape(); in CreateOutputTuple() 280 stream, shaped_buffer, &output_device_shape)); in CreateOutputTuple() 282 *shaped_buffer, in CreateOutputTuple() 288 *shaped_buffer, backend, device_ordinal, &output_tuple)); in CreateOutputTuple()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | local_client.cc | 416 const ShapedBuffer& shaped_buffer) { in ShapedBufferToLiteral() argument 418 shaped_buffer.device_ordinal())); in ShapedBufferToLiteral() 420 shaped_buffer); in ShapedBufferToLiteral() 452 TF_ASSIGN_OR_RETURN(::xla::ScopedShapedBuffer shaped_buffer, in TransferToLocalServer() 458 stream.get(), literal, shaped_buffer)); in TransferToLocalServer() 460 replicated_buffer.emplace_back(std::move(shaped_buffer)); in TransferToLocalServer()
|
D | local_client.h | 90 StatusOr<Literal> LiteralFromShapedBuffer(const ShapedBuffer& shaped_buffer); 162 StatusOr<Literal> ShapedBufferToLiteral(const ShapedBuffer& shaped_buffer);
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | buffer_donation_test.cc | 104 ShapedBuffer shaped_buffer = scoped_shaped_buffer.release(); in RunAndCheck() local 106 &stream, argument_literal, shaped_buffer)); in RunAndCheck() 107 ShapeTree<se::DeviceMemoryBase> input_buffers = shaped_buffer.buffers(); in RunAndCheck()
|
D | local_client_test_base.cc | 146 const ShapedBuffer& shaped_buffer) { in ShapedBufferToLiteral() argument 147 return local_client_->ShapedBufferToLiteral(shaped_buffer) in ShapedBufferToLiteral()
|
/external/tensorflow/tensorflow/stream_executor/tpu/ |
D | BUILD | 57 "//tensorflow/compiler/xla/service:shaped_buffer", 269 "//tensorflow/compiler/xla/service:shaped_buffer", 354 "//tensorflow/compiler/xla/service:shaped_buffer",
|
/external/tensorflow/tensorflow/compiler/xla/service/interpreter/ |
D | BUILD | 92 "//tensorflow/compiler/xla/service:shaped_buffer", 121 "//tensorflow/compiler/xla/service:shaped_buffer",
|