Home
last modified time | relevance | path

Searched refs:on_device_shape (Results 1 – 25 of 35) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/xla/service/
Dshaped_buffer.h46 ShapedBuffer(Shape on_device_shape, int device_ordinal);
49 ShapedBuffer(Shape on_host_shape, Shape on_device_shape, int device_ordinal);
69 const Shape& on_device_shape() const { return on_device_shape_; } in on_device_shape() function
101 void set_shapes(const Shape& on_device_shape) { in set_shapes() argument
102 CHECK(ShapeUtil::EqualStructure(on_device_shape, on_device_shape_)) in set_shapes()
103 << "Structures are not the same. new: " << on_device_shape in set_shapes()
105 on_host_shape_ = ShapeUtil::DeviceShapeToHostShape(on_device_shape); in set_shapes()
106 on_device_shape_ = on_device_shape; in set_shapes()
110 void set_shapes(const Shape& on_host_shape, const Shape& on_device_shape) { in set_shapes() argument
111 set_shapes(on_device_shape); in set_shapes()
[all …]
Dshaped_buffer.cc34 ShapedBuffer::ShapedBuffer(Shape on_device_shape, int device_ordinal) in ShapedBuffer() argument
35 : on_device_shape_(std::move(on_device_shape)), in ShapedBuffer()
41 ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape, in ShapedBuffer() argument
43 : ShapedBuffer(on_device_shape, device_ordinal) {} in ShapedBuffer()
73 ShapeUtil::TryGetSubshape(on_device_shape(), index)); in SubShapedBuffer()
92 ShapeUtil::HumanStringWithLayout(on_device_shape()), in ToString()
95 on_device_shape(), in ToString()
116 ScopedShapedBuffer::ScopedShapedBuffer(Shape on_device_shape, in ScopedShapedBuffer() argument
119 : ShapedBuffer(std::move(on_device_shape), device_ordinal), in ScopedShapedBuffer()
123 Shape on_device_shape, in ScopedShapedBuffer() argument
[all …]
Dtransfer_manager.cc161 const Shape on_device_shape = HostShapeToDeviceShape(literal.shape()); in TransferArrayToDeviceAsync() local
162 TF_RET_CHECK(on_device_shape.IsArray()) in TransferArrayToDeviceAsync()
165 << " is not an array: " << ShapeUtil::HumanString(on_device_shape); in TransferArrayToDeviceAsync()
166 if (dest.size() < GetByteSizeRequirement(on_device_shape)) { in TransferArrayToDeviceAsync()
170 dest.size(), GetByteSizeRequirement(on_device_shape)); in TransferArrayToDeviceAsync()
172 ShapedBuffer shaped_buffer(on_device_shape, in TransferArrayToDeviceAsync()
298 device_buffer.on_device_shape(), in WriteTupleIndexTablesAsync()
324 TF_RET_CHECK(device_buffer.on_device_shape().IsTuple()); in WriteRootTupleIndexTable()
325 if (ShapeUtil::TupleElementCount(device_buffer.on_device_shape()) == 0) { in WriteRootTupleIndexTable()
329 TF_RET_CHECK(GetByteSizeRequirement(device_buffer.on_device_shape()) == in WriteRootTupleIndexTable()
[all …]
Dallocation_tracker.cc72 shaped_buffer.on_device_shape(), in RegisterInternal()
105 shaped_buffer->on_device_shape(), in Unregister()
137 if (!shaped_buffer->on_device_shape().IsTuple()) { in DeconstructTuple()
142 if (ShapeUtil::IsNestedTuple(shaped_buffer->on_device_shape())) { in DeconstructTuple()
148 i < ShapeUtil::TupleElementCount(shaped_buffer->on_device_shape()); in DeconstructTuple()
151 ShapeUtil::GetTupleElementShape(shaped_buffer->on_device_shape(), i), in DeconstructTuple()
Dgeneric_transfer_manager.cc73 device_buffer.on_device_shape(), in TransferLiteralFromDevice()
102 ShapeUtil::Compatible(literal.shape(), device_buffer.on_device_shape())); in TransferLiteralToDeviceAsync()
109 device_buffer.on_device_shape(), in TransferLiteralToDeviceAsync()
Dexecutable.h157 ExecutionOutput(Shape on_host_shape, Shape on_device_shape, in ExecutionOutput() argument
159 : result_(std::move(on_device_shape), allocator, device_ordinal) {} in ExecutionOutput()
160 ExecutionOutput(Shape on_device_shape, se::DeviceMemoryAllocator* allocator, in ExecutionOutput() argument
162 : result_(std::move(on_device_shape), allocator, device_ordinal) {} in ExecutionOutput()
/external/tensorflow/tensorflow/compiler/xla/python/
Dpy_buffer.cc59 return IntSpanToTuple(buffer()->on_device_shape().dimensions()); in python_shape()
63 PrimitiveType primitive = buffer()->on_device_shape().element_type(); in python_dtype()
97 ShapeUtil::DeviceShapeToHostShape(buffer_->on_device_shape())); in CopyToHostAsync()
112 TF_RET_CHECK(buffer_->on_device_shape().IsArray()); in AsNumPyArray()
117 PrimitiveTypeToDtype(buffer_->on_device_shape().element_type())); in AsNumPyArray()
130 py::array array(dtype, buffer_->on_device_shape().dimensions(), in AsNumPyArray()
131 ByteStridesForShape(buffer_->on_device_shape()), data, in AsNumPyArray()
154 if (buffer_->on_device_shape().IsTuple()) { in UnsafeBufferPointer()
173 if (!buffer_->on_device_shape().IsArray()) { in CudaArrayInterface()
177 if (buffer_->on_device_shape().element_type() == BF16) { in CudaArrayInterface()
[all …]
Dpy_buffer.h82 const Shape& shape() { return buffer_->on_device_shape(); } in shape()
96 int64 size() { return ShapeUtil::ElementsIn(buffer()->on_device_shape()); } in size()
99 int ndim() const { return buffer()->on_device_shape().dimensions_size(); } in ndim()
Ddlpack.cc254 if (buffer->buffer()->on_device_shape().IsTuple()) { in BufferToDLPackManagedTensor()
290 dt.ndim = buffer->buffer()->on_device_shape().dimensions_size(); in BufferToDLPackManagedTensor()
293 buffer->buffer()->on_device_shape().element_type())); in BufferToDLPackManagedTensor()
296 buffer->buffer()->on_device_shape().dimensions().begin(), in BufferToDLPackManagedTensor()
297 buffer->buffer()->on_device_shape().dimensions().end()); in BufferToDLPackManagedTensor()
298 pack->strides = StridesForShape(buffer->buffer()->on_device_shape()); in BufferToDLPackManagedTensor()
Dxla.cc288 pybuffer.buffer()->on_device_shape().dimensions()); in PYBIND11_MODULE()
294 buffer.buffer()->on_device_shape().element_type(); in PYBIND11_MODULE()
/external/tensorflow/tensorflow/compiler/xrt/
Dxrt_state.cc87 xla::Shape on_device_shape = transfer_manager->HostShapeToDeviceShape(shape); in AllocateScopedShapedBuffer() local
90 << xla::ShapeUtil::HumanStringWithLayout(on_device_shape); in AllocateScopedShapedBuffer()
96 shape, on_device_shape, backend->memory_allocator(), device_ordinal); in AllocateScopedShapedBuffer()
99 xla::ShapeUtil::GetSubshape(on_device_shape, index_to_buffer.first); in AllocateScopedShapedBuffer()
150 const xla::Shape& on_device_shape) in XRTTupleAllocation() argument
154 on_device_shape_(on_device_shape), in XRTTupleAllocation()
189 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateAndTransfer()
212 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateUninitialized()
222 const xla::Shape& on_device_shape, xla::Backend* backend, in CreateFromBuffer() argument
227 on_device_shape); in CreateFromBuffer()
[all …]
Dxrt_state.h107 const xla::Shape& on_device_shape,
199 const xla::Shape& on_device_shape() const;
239 const xla::Shape& on_device_shape);
/external/tensorflow/tensorflow/compiler/jit/
Dxla_tensor.cc46 xla::Shape on_device_shape = in AllocateShapedBuffer() local
50 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer()
55 xla::ShapeUtil::GetSubshape(on_device_shape, index_to_buffer.first); in AllocateShapedBuffer()
Dxla_tpu_device.cc87 const xla::Shape& on_device_shape = in TpuPaddedShapeFn() local
88 xla_tensor->shaped_buffer().on_device_shape(); in TpuPaddedShapeFn()
91 ApiConverter::StackHelper<XLA_Shape> se_shape(on_device_shape); in TpuPaddedShapeFn()
226 if (xla_output->shaped_buffer().on_device_shape().IsTuple()) { in TpuDeviceToDeviceCopy()
246 if (xla_output->shaped_buffer().on_device_shape().IsTuple()) { in TpuDeviceToDeviceCopy()
Dxla_launch_util.cc434 << output.on_device_shape().DebugString(); in PopulateOutputs()
444 xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_device_shape()}), in PopulateOutputs()
468 xla::Shape output_device_shape = output.on_device_shape(); in PopulateOutputs()
/external/tensorflow/tensorflow/stream_executor/tpu/
Dtpu_executable_interface.cc102 result.Result().on_device_shape(), result_index)); in AllocateOutputMemoryWithInputReuse()
144 const Shape& on_device_shape = result.Result().on_device_shape(); in AllocateOutputMemoryWithInputReuse() local
146 ShapeUtil::GetSubshape(on_device_shape, result_index); in AllocateOutputMemoryWithInputReuse()
Dc_api_conversions.cc27 ApiConverter::FromC(&c_buffer->on_device_shape); in FromC()
352 ApiConverter::ToC(buffer.on_device_shape(), in ToC()
353 &c_device_buffer->on_device_shape); in ToC()
376 ApiConverter::Free(&c_buffer->on_device_shape); in Free()
Dc_api_decl.h230 XLA_Shape on_device_shape; member
/external/tensorflow/tensorflow/compiler/xla/pjrt/
Dpjrt_stream_executor_client.cc390 Shape on_device_shape = dst_buffer.on_device_shape(); in AllocateDestinationBuffer() local
422 if (on_device_shape.IsTuple()) { in AllocateDestinationBuffer()
457 on_device_shape, std::move(dst_device_buffer), client, device); in AllocateDestinationBuffer()
459 if (on_device_shape.IsTuple()) { in AllocateDestinationBuffer()
766 on_device_shape{py_buffer->on_device_shape()}, in BufferFromHostBuffer()
778 ShapedBuffer buffer = device_buffer->AsShapedBuffer(on_device_shape); in BufferFromHostBuffer()
892 on_device_shape{py_buffer->on_device_shape()}]() { in BufferFromHostLiteral()
901 ShapedBuffer buffer = device_buffer->AsShapedBuffer(on_device_shape); in BufferFromHostLiteral()
998 Shape on_device_shape, std::shared_ptr<TrackedDeviceBuffer> device_buffer, in PjRtStreamExecutorBuffer() argument
1001 on_device_shape_(std::move(on_device_shape)), in PjRtStreamExecutorBuffer()
[all …]
Dtracked_device_buffer.cc107 shaped_buffer->on_device_shape(), [&](const Shape&, const ShapeIndex&) { in FromScopedShapedBuffer()
121 const Shape& on_device_shape) const { in AsShapedBuffer()
122 ShapedBuffer shaped_buffer(on_device_shape, device_ordinal_); in AsShapedBuffer()
Dpjrt_client.h278 virtual const Shape& on_device_shape() const = 0;
326 ShapeUtil::DeviceShapeToHostShape(on_device_shape())); in ToLiteral()
Dtracked_device_buffer.h141 ShapedBuffer AsShapedBuffer(const Shape& on_device_shape) const;
/external/tensorflow/tensorflow/compiler/xla/service/interpreter/
Dexecutable_base.cc86 const auto& actual_shape = argument_buffers[i].on_device_shape(); in ExecuteAsyncOnStream()
/external/tensorflow/tensorflow/compiler/xla/client/
Dlocal_client.cc178 argument_shapes.push_back(&arg->on_device_shape()); in Run()
246 argument_shapes.push_back(&arg->on_device_shape()); in RunAsync()
/external/tensorflow/tensorflow/compiler/xla/tests/
Dlocal_client_execute_test.cc135 x_array.on_device_shape().layout(), LayoutUtil::MakeLayout({0, 1}))); in XLA_TEST_F()
141 y_array.on_device_shape().layout(), LayoutUtil::MakeLayout({1, 0}))); in XLA_TEST_F()
176 result_colmaj.on_device_shape().layout(), in XLA_TEST_F()
189 result_rowmaj.on_device_shape().layout(), in XLA_TEST_F()

12