Home
last modified time | relevance | path

Searched refs:shaped_buffer (Results 1 – 25 of 38) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/xla/service/
Dallocation_tracker.cc35 ScopedShapedBuffer shaped_buffer, const string& tag) { in Register() argument
39 replicated_buffers.emplace_back(std::move(shaped_buffer)); in Register()
67 for (const auto& shaped_buffer : replicated_buffers) { in RegisterInternal() local
68 VLOG(2) << "shaped_buffer:" << shaped_buffer; in RegisterInternal()
69 if (shaped_buffer.platform() != backend_->platform()) { in RegisterInternal()
73 backend_->platform()->Name(), shaped_buffer.platform()->Name()); in RegisterInternal()
78 for (auto& shaped_buffer : replicated_buffers) { in RegisterInternal() local
81 shaped_buffer.on_device_shape(), in RegisterInternal()
88 AddAllocationOrIncrementRefCount(shaped_buffer.buffer(index), in RegisterInternal()
89 shaped_buffer.device_ordinal()); in RegisterInternal()
[all …]
Dtransfer_manager.cc169 ShapedBuffer shaped_buffer(/*on_host_shape=*/literal.shape(), on_device_shape, in TransferArrayToDeviceAsync() local
172 shaped_buffer.set_buffer(dest, /*index=*/{}); in TransferArrayToDeviceAsync()
173 return TransferLiteralToDevice(stream, literal, shaped_buffer, in TransferArrayToDeviceAsync()
194 ShapedBuffer shaped_buffer(/*on_host_shape=*/shape, shape, in TransferArrayFromDevice() local
197 shaped_buffer.set_buffer(source, /*index=*/{}); in TransferArrayFromDevice()
198 return TransferLiteralFromDevice(stream, shaped_buffer, literal, in TransferArrayFromDevice()
325 ScopedShapedBuffer shaped_buffer(on_host_shape, std::move(on_device_shape), in AllocateScopedShapedBuffer() local
330 for (auto& pair : shaped_buffer.buffers()) { in AllocateScopedShapedBuffer()
334 ShapeUtil::GetSubshape(shaped_buffer.on_device_shape(), index); in AllocateScopedShapedBuffer()
336 allocator->Allocate(shaped_buffer.device_ordinal(), in AllocateScopedShapedBuffer()
[all …]
Dshaped_buffer.cc127 ScopedShapedBuffer::ScopedShapedBuffer(ShapedBuffer shaped_buffer, in ScopedShapedBuffer() argument
129 : ShapedBuffer(std::move(shaped_buffer)), allocator_(allocator) {} in ScopedShapedBuffer()
150 ShapedBuffer shaped_buffer(static_cast<ShapedBuffer&&>(*this)); in release() local
152 return shaped_buffer; in release()
Dexecutable.cc48 const ShapedBuffer& shaped_buffer) { in MakeMaybeOwningDeviceMemoryTree() argument
49 ShapeTree<MaybeOwningDeviceMemory> result(shaped_buffer.on_device_shape()); in MakeMaybeOwningDeviceMemoryTree()
50 auto in_it = shaped_buffer.buffers().begin(); in MakeMaybeOwningDeviceMemoryTree()
52 for (; in_it != shaped_buffer.buffers().end(); ++in_it, ++out_it) { in MakeMaybeOwningDeviceMemoryTree()
Dservice.cc247 const ShapedBuffer* shaped_buffer = replicated_buffers[replica]; in ResolveAndValidateArguments() local
250 if (shaped_buffer->platform() != execute_backend_->platform() || in ResolveAndValidateArguments()
251 shaped_buffer->device_ordinal() != replica_device_ordinal) { in ResolveAndValidateArguments()
255 i, shaped_buffer->platform()->Name(), in ResolveAndValidateArguments()
256 shaped_buffer->device_ordinal(), in ResolveAndValidateArguments()
259 replicated_arguments[replica].push_back(shaped_buffer); in ResolveAndValidateArguments()
962 TF_ASSIGN_OR_RETURN(const ShapedBuffer* shaped_buffer, in TransferToClient()
972 return_shape = Shape(shaped_buffer->on_host_shape()); in TransferToClient()
976 shaped_buffer->device_ordinal())); in TransferToClient()
981 stream.get(), *shaped_buffer)); in TransferToClient()
[all …]
Dshaped_buffer_test.cc185 xla::ScopedShapedBuffer shaped_buffer(shape, shape, /*allocator=*/&allocator, in BM_TakeSubTree() local
191 (void)shaped_buffer.TakeSubTree(/*index=*/{fan_out / 2}).release(); in BM_TakeSubTree()
Dallocation_tracker.h49 StatusOr<GlobalDataHandle> Register(ScopedShapedBuffer shaped_buffer,
/external/tensorflow/tensorflow/compiler/xrt/
Dxrt_state.cc186 auto shaped_buffer = scoped_buffer->release(); in CreateAndTransfer() local
189 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateAndTransfer()
191 ->InitializeFromShapedBuffer(shaped_buffer, backend->memory_allocator(), in CreateAndTransfer()
209 auto shaped_buffer = scoped_buffer->release(); in CreateUninitialized() local
212 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateUninitialized()
214 ->InitializeFromShapedBuffer(shaped_buffer, backend->memory_allocator(), in CreateUninitialized()
221 const xla::ShapedBuffer& shaped_buffer, const xla::Shape& on_host_shape, in CreateFromBuffer() argument
229 ->InitializeFromShapedBuffer(shaped_buffer, allocator, device_ordinal); in CreateFromBuffer()
235 const xla::ShapedBuffer& shaped_buffer, xla::Backend* backend, in CreateFromBuffer() argument
237 return CreateFromBuffer(shaped_buffer, shaped_buffer.on_host_shape(), in CreateFromBuffer()
[all …]
Dxrt_state.h96 static Status CreateFromBuffer(const xla::ShapedBuffer& shaped_buffer,
104 static Status CreateFromBuffer(const xla::ShapedBuffer& shaped_buffer,
243 void InitializeFromShapedBuffer(const xla::ShapedBuffer& shaped_buffer,
/external/tensorflow/tensorflow/compiler/xla/python/
Dshared_device_buffer.cc96 ScopedShapedBuffer* shaped_buffer, in FromScopedShapedBuffer() argument
99 shaped_buffer->buffers().begin(); in FromScopedShapedBuffer()
102 shaped_buffer->on_host_shape(), shaped_buffer->on_device_shape(), in FromScopedShapedBuffer()
103 shaped_buffer->device_ordinal(), shaped_buffer->memory_allocator(), in FromScopedShapedBuffer()
104 &iterator, shaped_buffer->buffers().end(), definition_event); in FromScopedShapedBuffer()
105 CHECK(iterator == shaped_buffer->buffers().end()); in FromScopedShapedBuffer()
169 ShapedBuffer shaped_buffer(on_host_shape, on_device_shape, platform, in AsShapedBuffer() local
172 shaped_buffer.buffers().begin(); in AsShapedBuffer()
174 shaped_buffer.buffers().end()); in AsShapedBuffer()
175 CHECK(iterator == shaped_buffer.buffers().end()); in AsShapedBuffer()
[all …]
Dshared_device_buffer_test.cc106 ShapedBuffer shaped_buffer = abc_tuple_buffer->AsShapedBuffer( in TEST() local
108 EXPECT_EQ(shaped_buffer.on_host_shape(), abc_tuple_shape); in TEST()
109 EXPECT_EQ(shaped_buffer.on_device_shape(), abc_tuple_device_shape); in TEST()
121 auto it = shaped_buffer.buffers().begin(); in TEST()
123 while (it != shaped_buffer.buffers().end()) { in TEST()
140 ScopedShapedBuffer shaped_buffer, in TEST()
143 SharedDeviceBuffer::FromScopedShapedBuffer(&shaped_buffer, nullptr); in TEST()
Dlocal_client.cc446 TF_ASSIGN_OR_RETURN(ShapedBuffer shaped_buffer, buffer->AsShapedBuffer()); in MakeTuple()
448 local_device->host_to_device_stream()->parent(), shaped_buffer)) { in MakeTuple()
454 local_device->host_to_device_stream(), shaped_buffer)); in MakeTuple()
504 TF_ASSIGN_OR_RETURN(ShapedBuffer shaped_buffer, AsShapedBuffer()); in CopyToHostAsync()
506 stream, shaped_buffer, host_value->value.get(), in CopyToHostAsync()
757 TF_ASSIGN_OR_RETURN(ShapedBuffer shaped_buffer, handle->AsShapedBuffer()); in ExecuteHelper()
758 argument_buffers.push_back(std::move(shaped_buffer)); in ExecuteHelper()
DBUILD123 "//tensorflow/compiler/xla/service:shaped_buffer",
185 "//tensorflow/compiler/xla/service:shaped_buffer",
339 "//tensorflow/compiler/xla/service:shaped_buffer",
Dxla.cc288 TF_ASSIGN_OR_RETURN(ShapedBuffer shaped_buffer, buffer.AsShapedBuffer()); in PyLocalBufferCudaArrayInterface()
291 result["shape"] = IntSpanToTuple(shaped_buffer.on_host_shape().dimensions()); in PyLocalBufferCudaArrayInterface()
294 shaped_buffer.on_host_shape().element_type())); in PyLocalBufferCudaArrayInterface()
298 absl::bit_cast<std::uintptr_t>(shaped_buffer.root_buffer().opaque())); in PyLocalBufferCudaArrayInterface()
701 TF_ASSIGN_OR_RETURN(ShapedBuffer shaped_buffer, in PYBIND11_MODULE()
703 if (shaped_buffer.on_device_shape().IsTuple()) { in PYBIND11_MODULE()
709 shaped_buffer.root_buffer().opaque()); in PYBIND11_MODULE()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_tensor.h63 const xla::ShapedBuffer& shaped_buffer() const { in shaped_buffer() function
67 xla::ShapedBuffer& shaped_buffer() { in shaped_buffer() function
72 void set_shaped_buffer(xla::ScopedShapedBuffer shaped_buffer) { in set_shaped_buffer() argument
74 absl::make_unique<xla::ScopedShapedBuffer>(std::move(shaped_buffer)); in set_shaped_buffer()
Dxla_tensor.cc39 return xla_tensor->shaped_buffer().root_buffer(); in DeviceMemoryFromTensor()
54 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer() local
57 for (auto& index_to_buffer : shaped_buffer.buffers()) { in AllocateShapedBuffer()
70 VLOG(4) << shaped_buffer.ToString(); in AllocateShapedBuffer()
72 set_shaped_buffer(std::move(shaped_buffer)); in AllocateShapedBuffer()
Dxla_device_context.cc158 << xla_tensor->shaped_buffer().ToString(); in CopyCPUTensorToDevice()
161 stream_->parent(), xla_tensor->shaped_buffer())) { in CopyCPUTensorToDevice()
168 host_to_device_stream_.get(), literal, xla_tensor->shaped_buffer())); in CopyCPUTensorToDevice()
247 xla_tensor->shaped_buffer().on_host_shape()), in CopyDeviceTensorToCPU()
256 device_to_host_stream.get(), xla_tensor->shaped_buffer(), literal, in CopyDeviceTensorToCPU()
261 << xla_tensor->shaped_buffer().ToString(); in CopyDeviceTensorToCPU()
Dxla_device.cc129 const xla::ShapedBuffer& shaped_buffer = xla_tensor->shaped_buffer(); in DefaultPaddedShapeFn() local
130 *shape = shaped_buffer.on_device_shape(); in DefaultPaddedShapeFn()
/external/tensorflow/tensorflow/compiler/xrt/kernels/
Dxrt_execute_op.cc97 TF_ASSIGN_OR_RETURN(xla::ShapedBuffer shaped_buffer, in GetInputBuffers()
99 input_buffers.input_allocations.emplace_back(std::move(shaped_buffer)); in GetInputBuffers()
101 TF_ASSIGN_OR_RETURN(xla::ShapedBuffer shaped_buffer, in GetInputBuffers()
104 shaped_buffer.SubShapedBuffer(input_coords[i].index)); in GetInputBuffers()
129 TF_ASSIGN_OR_RETURN(xla::ShapedBuffer shaped_buffer, in GetChainedOpInputs()
131 input_buffers.input_allocations.emplace_back(std::move(shaped_buffer)); in GetChainedOpInputs()
133 TF_ASSIGN_OR_RETURN(xla::ShapedBuffer shaped_buffer, in GetChainedOpInputs()
137 shaped_buffer.SubShapedBuffer({input.output_index() - 1})); in GetChainedOpInputs()
167 auto shaped_buffer = run_result.release(); in RunExecutable() local
170 shaped_buffer, device_ref->backend(), device_ref->device_ordinal(), in RunExecutable()
/external/tensorflow/tensorflow/compiler/xla/client/
Dlocal_client.cc383 const ShapedBuffer& shaped_buffer) { in ShapedBufferToLiteral() argument
385 shaped_buffer.device_ordinal())); in ShapedBufferToLiteral()
387 shaped_buffer); in ShapedBufferToLiteral()
421 TF_ASSIGN_OR_RETURN(::xla::ScopedShapedBuffer shaped_buffer, in TransferToLocalServer()
427 stream.get(), literal, shaped_buffer)); in TransferToLocalServer()
429 replicated_buffer.emplace_back(std::move(shaped_buffer)); in TransferToLocalServer()
Dlocal_client.h83 StatusOr<Literal> LiteralFromShapedBuffer(const ShapedBuffer& shaped_buffer);
139 StatusOr<Literal> ShapedBufferToLiteral(const ShapedBuffer& shaped_buffer);
/external/tensorflow/tensorflow/compiler/xla/tests/
Dbuffer_donation_test.cc87 auto shaped_buffer = scoped_shaped_buffer.release(); in RunAndCheck() local
89 &stream, argument_literal, shaped_buffer)); in RunAndCheck()
90 auto input_buffers = shaped_buffer.buffers(); in RunAndCheck()
Dlocal_client_test_base.cc146 const ShapedBuffer& shaped_buffer) { in ShapedBufferToLiteral() argument
147 return local_client_->ShapedBufferToLiteral(shaped_buffer) in ShapedBufferToLiteral()
Dlocal_client_test_base.h93 Literal ShapedBufferToLiteral(const ShapedBuffer& shaped_buffer);
/external/tensorflow/tensorflow/compiler/xla/service/gpu/
Dgpu_executable.cc375 ScopedShapedBuffer shaped_buffer(root->shape(), root->shape(), in ExecuteAsyncOnStream() local
381 TF_RETURN_IF_ERROR(shaped_buffer.buffers().ForEachMutableElementWithStatus( in ExecuteAsyncOnStream()
440 return ExecutionOutput(std::move(shaped_buffer), std::move(buffers_to_free), in ExecuteAsyncOnStream()

12