Home
last modified time | relevance | path

Searched refs:on_host_shape (Results 1 – 25 of 26) sorted by relevance

12

/external/tensorflow/tensorflow/compiler/xrt/
Dxrt_state.cc150 const xla::Shape& on_host_shape, in XRTTupleAllocation() argument
154 on_host_shape_(on_host_shape), in XRTTupleAllocation()
189 shaped_buffer.on_host_shape(), in CreateAndTransfer()
212 shaped_buffer.on_host_shape(), in CreateUninitialized()
221 const xla::ShapedBuffer& shaped_buffer, const xla::Shape& on_host_shape, in CreateFromBuffer() argument
225 *allocation = new XRTTupleAllocation(device_ordinal, allocator, on_host_shape, in CreateFromBuffer()
237 return CreateFromBuffer(shaped_buffer, shaped_buffer.on_host_shape(), in CreateFromBuffer()
260 if (!xla::ShapeUtil::Equal(literal.shape(), on_host_shape())) { in WriteLiteral()
264 " device=", xla::ShapeUtil::HumanStringWithLayout(on_host_shape())); in WriteLiteral()
283 xla::Literal literal(on_host_shape()); in SwapOut()
[all …]
Dxrt_state.h109 const xla::Shape& on_host_shape,
203 const xla::Shape& on_host_shape() const;
245 const xla::Shape& on_host_shape,
Dxrt_util.cc282 if (!InputShapeMatches(input_shape, tuple->on_host_shape())) { in GetInputTupleAllocations()
286 "; got ", tuple->on_host_shape().DebugString()); in GetInputTupleAllocations()
352 if (return_exploded_tuple && output_tuple->on_host_shape().IsTuple()) { in CreateExecuteOutput()
/external/tensorflow/tensorflow/compiler/xla/service/
Dshaped_buffer.h48 ShapedBuffer(Shape on_host_shape, Shape on_device_shape, int device_ordinal);
64 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function
109 void set_shapes(const Shape& on_host_shape, const Shape& on_device_shape) { in set_shapes() argument
155 explicit ScopedShapedBuffer(Shape on_host_shape, Shape on_device_shape,
Dtransfer_manager.cc61 Literal literal(device_buffer.on_host_shape()); in TransferLiteralFromDevice()
382 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator, in AllocateScopedShapedBuffer() argument
384 if (!LayoutUtil::HasLayout(on_host_shape)) { in AllocateScopedShapedBuffer()
386 ShapeUtil::HumanStringWithLayout(on_host_shape)); in AllocateScopedShapedBuffer()
388 TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape)); in AllocateScopedShapedBuffer()
390 ? HostShapeToDeviceShape(on_host_shape) in AllocateScopedShapedBuffer()
391 : shape_representation_fn(on_host_shape); in AllocateScopedShapedBuffer()
Dshaped_buffer.cc41 ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape, in ShapedBuffer() argument
122 ScopedShapedBuffer::ScopedShapedBuffer(Shape on_host_shape, in ScopedShapedBuffer() argument
Dshaped_buffer_test.cc173 EXPECT_EQ(ssb.on_host_shape(), array_shape); in TEST()
Dtransfer_manager.h255 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
Dexecutable.h155 ExecutionOutput(Shape on_host_shape, Shape on_device_shape, in ExecutionOutput() argument
/external/tensorflow/tensorflow/core/tpu/kernels/
Dtpu_reshard_variables_op_util.cc132 const xla::Shape& xla_shape = xla_tensor->shaped_buffer().on_host_shape(); in BuildInputBuffers()
233 xla::ShapeUtil::TupleElementCount(result_buffers.on_host_shape()); in UpdateOutputVariables()
253 xla::ShapeUtil::GetSubshape(result_buffers.on_host_shape(), {i}); in UpdateOutputVariables()
265 TF_RET_CHECK(result_buffers.on_host_shape().IsTuple()); in UpdateOutputVariables()
266 TF_RET_CHECK(!xla::ShapeUtil::IsNestedTuple(result_buffers.on_host_shape())); in UpdateOutputVariables()
271 const xla::Shape& output_host_shape = output_buffers.on_host_shape(); in UpdateOutputVariables()
Dtpu_execute_op.cc230 const xla::Shape& xla_shape = xla_tensor->shaped_buffer().on_host_shape(); in BuildComputationInputs()
404 xla::ShapeUtil::TupleElementCount(scoped_buffers.on_host_shape()); in AllocateOutputTensors()
421 xla::ShapeUtil::GetSubshape(scoped_buffers.on_host_shape(), {i}); in AllocateOutputTensors()
432 TF_RET_CHECK(scoped_buffers.on_host_shape().IsTuple()); in AllocateOutputTensors()
433 TF_RET_CHECK(!xla::ShapeUtil::IsNestedTuple(scoped_buffers.on_host_shape())); in AllocateOutputTensors()
704 std::make_shared<xla::Literal>(shaped_buffer.on_host_shape()); in DoWork()
737 shaped_buffer.on_host_shape())); in DoWork()
769 std::make_shared<xla::Literal>(output_buffers->buffers.on_host_shape()); in DoWork()
Dtpu_reshard_variables_op.cc226 shaped_buffer.on_host_shape())); in DoTpuExecute()
/external/tensorflow/tensorflow/compiler/jit/
Dxla_tensor.cc48 xla::Shape on_host_shape = in AllocateShapedBuffer() local
50 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer()
Dxla_launch_util.cc468 VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); in PopulateOutputs()
476 if (!output.on_host_shape().IsTuple()) { in PopulateOutputs()
479 xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}), in PopulateOutputs()
507 if (output.on_host_shape().is_dynamic()) { in PopulateOutputs()
Dxla_device_context.cc251 xla_tensor->shaped_buffer().on_host_shape()), in CopyDeviceTensorToCPU()
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/
Dtpu_client_extension.cc209 return buffer.on_host_shape().dimensions(); in PYBIND11_MODULE()
211 .def("xla_shape", &PyTpuBuffer::on_host_shape) in PYBIND11_MODULE()
215 return PrimitiveTypeToDtype(buffer->on_host_shape().element_type()); in PYBIND11_MODULE()
Dtpu_client.cc250 child_shapes.push_back(child_buffer->on_host_shape()); in MakeTuple()
277 Shape on_host_shape, std::shared_ptr<TpuSharedBuffer> device_buffer, in PyTpuBuffer() argument
281 on_host_shape_(std::move(on_host_shape)), in PyTpuBuffer()
667 if (result.buffer->on_host_shape().IsTuple()) { in Execute()
754 if (results[i].buffer->on_host_shape().IsTuple()) { in ExecuteOnLocalDevices()
Dtpu_client.h225 PyTpuBuffer(Shape on_host_shape,
235 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function
/external/tensorflow/tensorflow/compiler/xla/tests/
Dlocal_client_execute_test.cc211 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F()
212 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F()
239 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F()
240 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F()
317 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F()
318 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F()
964 auto executables, client->Compile(computation, {&buffer.on_host_shape()}, in BM_LocalClientOverhead()
Dcpu_gpu_fusion_test.cc882 {&buffer0.on_host_shape(), &buffer1.on_host_shape(), in BM_ParallelFusion()
883 &buffer2.on_host_shape()}, in BM_ParallelFusion()
Ddynamic_ops_test.cc793 host_shapes[i] = &shaped_buffers[i].on_host_shape(); in BM_DynamicSlice()
/external/tensorflow/tensorflow/compiler/xla/client/
Dlocal_client.cc209 auto literal = std::make_shared<Literal>(arg->on_host_shape()); in DumpArguments()
228 auto literal = std::make_shared<Literal>(outputs.on_host_shape()); in DumpOutputsAndSaveSnapshot()
/external/tensorflow/tensorflow/compiler/xrt/kernels/
Dtpu_execute_op.cc113 input_tuples.back()->on_host_shape())) { in GetChainedOpInputs()
118 tuple->on_host_shape().DebugString()); in GetChainedOpInputs()
Dxrt_state_ops.h524 xla::Literal literal(allocation->on_host_shape()); in Compute()
577 xla::Shape shape = allocation->on_host_shape(); in Compute()
/external/tensorflow/tensorflow/compiler/xla/pjrt/
Dpjrt_stream_executor_client.cc374 const Shape& on_host_shape, PjRtDevice* device, in AllocateDestinationBuffer() argument
378 if (on_host_shape.IsTuple() && on_host_shape.tuple_shapes_size() == 0) { in AllocateDestinationBuffer()
387 on_host_shape, se_client->allocator(), in AllocateDestinationBuffer()

12