/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 150 const xla::Shape& on_host_shape, in XRTTupleAllocation() argument 154 on_host_shape_(on_host_shape), in XRTTupleAllocation() 189 shaped_buffer.on_host_shape(), in CreateAndTransfer() 212 shaped_buffer.on_host_shape(), in CreateUninitialized() 221 const xla::ShapedBuffer& shaped_buffer, const xla::Shape& on_host_shape, in CreateFromBuffer() argument 225 *allocation = new XRTTupleAllocation(device_ordinal, allocator, on_host_shape, in CreateFromBuffer() 237 return CreateFromBuffer(shaped_buffer, shaped_buffer.on_host_shape(), in CreateFromBuffer() 260 if (!xla::ShapeUtil::Equal(literal.shape(), on_host_shape())) { in WriteLiteral() 264 " device=", xla::ShapeUtil::HumanStringWithLayout(on_host_shape())); in WriteLiteral() 283 xla::Literal literal(on_host_shape()); in SwapOut() [all …]
|
D | xrt_state.h | 109 const xla::Shape& on_host_shape, 203 const xla::Shape& on_host_shape() const; 245 const xla::Shape& on_host_shape,
|
D | xrt_util.cc | 282 if (!InputShapeMatches(input_shape, tuple->on_host_shape())) { in GetInputTupleAllocations() 286 "; got ", tuple->on_host_shape().DebugString()); in GetInputTupleAllocations() 352 if (return_exploded_tuple && output_tuple->on_host_shape().IsTuple()) { in CreateExecuteOutput()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | shaped_buffer.h | 48 ShapedBuffer(Shape on_host_shape, Shape on_device_shape, int device_ordinal); 64 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function 109 void set_shapes(const Shape& on_host_shape, const Shape& on_device_shape) { in set_shapes() argument 155 explicit ScopedShapedBuffer(Shape on_host_shape, Shape on_device_shape,
|
D | transfer_manager.cc | 61 Literal literal(device_buffer.on_host_shape()); in TransferLiteralFromDevice() 382 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator, in AllocateScopedShapedBuffer() argument 384 if (!LayoutUtil::HasLayout(on_host_shape)) { in AllocateScopedShapedBuffer() 386 ShapeUtil::HumanStringWithLayout(on_host_shape)); in AllocateScopedShapedBuffer() 388 TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape)); in AllocateScopedShapedBuffer() 390 ? HostShapeToDeviceShape(on_host_shape) in AllocateScopedShapedBuffer() 391 : shape_representation_fn(on_host_shape); in AllocateScopedShapedBuffer()
|
D | shaped_buffer.cc | 41 ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape, in ShapedBuffer() argument 122 ScopedShapedBuffer::ScopedShapedBuffer(Shape on_host_shape, in ScopedShapedBuffer() argument
|
D | shaped_buffer_test.cc | 173 EXPECT_EQ(ssb.on_host_shape(), array_shape); in TEST()
|
D | transfer_manager.h | 255 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
|
D | executable.h | 155 ExecutionOutput(Shape on_host_shape, Shape on_device_shape, in ExecutionOutput() argument
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | tpu_reshard_variables_op_util.cc | 132 const xla::Shape& xla_shape = xla_tensor->shaped_buffer().on_host_shape(); in BuildInputBuffers() 233 xla::ShapeUtil::TupleElementCount(result_buffers.on_host_shape()); in UpdateOutputVariables() 253 xla::ShapeUtil::GetSubshape(result_buffers.on_host_shape(), {i}); in UpdateOutputVariables() 265 TF_RET_CHECK(result_buffers.on_host_shape().IsTuple()); in UpdateOutputVariables() 266 TF_RET_CHECK(!xla::ShapeUtil::IsNestedTuple(result_buffers.on_host_shape())); in UpdateOutputVariables() 271 const xla::Shape& output_host_shape = output_buffers.on_host_shape(); in UpdateOutputVariables()
|
D | tpu_execute_op.cc | 230 const xla::Shape& xla_shape = xla_tensor->shaped_buffer().on_host_shape(); in BuildComputationInputs() 404 xla::ShapeUtil::TupleElementCount(scoped_buffers.on_host_shape()); in AllocateOutputTensors() 421 xla::ShapeUtil::GetSubshape(scoped_buffers.on_host_shape(), {i}); in AllocateOutputTensors() 432 TF_RET_CHECK(scoped_buffers.on_host_shape().IsTuple()); in AllocateOutputTensors() 433 TF_RET_CHECK(!xla::ShapeUtil::IsNestedTuple(scoped_buffers.on_host_shape())); in AllocateOutputTensors() 704 std::make_shared<xla::Literal>(shaped_buffer.on_host_shape()); in DoWork() 737 shaped_buffer.on_host_shape())); in DoWork() 769 std::make_shared<xla::Literal>(output_buffers->buffers.on_host_shape()); in DoWork()
|
D | tpu_reshard_variables_op.cc | 226 shaped_buffer.on_host_shape())); in DoTpuExecute()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 48 xla::Shape on_host_shape = in AllocateShapedBuffer() local 50 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer()
|
D | xla_launch_util.cc | 468 VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); in PopulateOutputs() 476 if (!output.on_host_shape().IsTuple()) { in PopulateOutputs() 479 xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}), in PopulateOutputs() 507 if (output.on_host_shape().is_dynamic()) { in PopulateOutputs()
|
D | xla_device_context.cc | 251 xla_tensor->shaped_buffer().on_host_shape()), in CopyDeviceTensorToCPU()
|
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/ |
D | tpu_client_extension.cc | 209 return buffer.on_host_shape().dimensions(); in PYBIND11_MODULE() 211 .def("xla_shape", &PyTpuBuffer::on_host_shape) in PYBIND11_MODULE() 215 return PrimitiveTypeToDtype(buffer->on_host_shape().element_type()); in PYBIND11_MODULE()
|
D | tpu_client.cc | 250 child_shapes.push_back(child_buffer->on_host_shape()); in MakeTuple() 277 Shape on_host_shape, std::shared_ptr<TpuSharedBuffer> device_buffer, in PyTpuBuffer() argument 281 on_host_shape_(std::move(on_host_shape)), in PyTpuBuffer() 667 if (result.buffer->on_host_shape().IsTuple()) { in Execute() 754 if (results[i].buffer->on_host_shape().IsTuple()) { in ExecuteOnLocalDevices()
|
D | tpu_client.h | 225 PyTpuBuffer(Shape on_host_shape, 235 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | local_client_execute_test.cc | 211 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 212 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 239 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 240 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 317 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 318 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 964 auto executables, client->Compile(computation, {&buffer.on_host_shape()}, in BM_LocalClientOverhead()
|
D | cpu_gpu_fusion_test.cc | 882 {&buffer0.on_host_shape(), &buffer1.on_host_shape(), in BM_ParallelFusion() 883 &buffer2.on_host_shape()}, in BM_ParallelFusion()
|
D | dynamic_ops_test.cc | 793 host_shapes[i] = &shaped_buffers[i].on_host_shape(); in BM_DynamicSlice()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | local_client.cc | 209 auto literal = std::make_shared<Literal>(arg->on_host_shape()); in DumpArguments() 228 auto literal = std::make_shared<Literal>(outputs.on_host_shape()); in DumpOutputsAndSaveSnapshot()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | tpu_execute_op.cc | 113 input_tuples.back()->on_host_shape())) { in GetChainedOpInputs() 118 tuple->on_host_shape().DebugString()); in GetChainedOpInputs()
|
D | xrt_state_ops.h | 524 xla::Literal literal(allocation->on_host_shape()); in Compute() 577 xla::Shape shape = allocation->on_host_shape(); in Compute()
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | pjrt_stream_executor_client.cc | 374 const Shape& on_host_shape, PjRtDevice* device, in AllocateDestinationBuffer() argument 378 if (on_host_shape.IsTuple() && on_host_shape.tuple_shapes_size() == 0) { in AllocateDestinationBuffer() 387 on_host_shape, se_client->allocator(), in AllocateDestinationBuffer()
|