/external/tensorflow/tensorflow/compiler/xla/service/ |
D | shaped_buffer.cc | 34 ShapedBuffer::ShapedBuffer(const Shape& on_host_shape, in ShapedBuffer() argument 37 : on_host_shape_(on_host_shape), in ShapedBuffer() 80 "), on-host shape=" + ShapeUtil::HumanStringWithLayout(on_host_shape()), in ToString() 106 ScopedShapedBuffer::ScopedShapedBuffer(const Shape& on_host_shape, in ScopedShapedBuffer() argument 110 : ShapedBuffer(on_host_shape, on_device_shape, allocator->platform(), in ScopedShapedBuffer() 162 xla::ShapeUtil::GetSubshape(on_host_shape(), {index}); in TakeSubTree()
|
D | generic_transfer_manager.cc | 81 device_buffer.on_host_shape())); in TransferLiteralFromDeviceInternal() 84 device_buffer.on_host_shape(), in TransferLiteralFromDeviceInternal() 111 device_buffer.on_host_shape())); in TransferLiteralToDeviceAsync() 114 ShapeUtil::Compatible(literal.shape(), device_buffer.on_host_shape())); in TransferLiteralToDeviceAsync() 121 device_buffer.on_host_shape(), in TransferLiteralToDeviceAsync()
|
D | transfer_manager.cc | 59 Literal literal(device_buffer.on_host_shape()); in TransferLiteralFromDevice() 310 const Shape& on_host_shape, DeviceMemoryAllocator* allocator, in AllocateScopedShapedBuffer() argument 312 if (!LayoutUtil::HasLayout(on_host_shape)) { in AllocateScopedShapedBuffer() 314 ShapeUtil::HumanStringWithLayout(on_host_shape)); in AllocateScopedShapedBuffer() 316 TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape)); in AllocateScopedShapedBuffer() 317 const Shape on_device_shape = HostShapeToDeviceShape(on_host_shape); in AllocateScopedShapedBuffer() 320 ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, allocator, in AllocateScopedShapedBuffer()
|
D | shaped_buffer.h | 45 ShapedBuffer(const Shape& on_host_shape, const Shape& on_device_shape, 62 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function 136 explicit ScopedShapedBuffer(const Shape& on_host_shape,
|
D | allocation_tracker.cc | 145 if (!shaped_buffer->on_host_shape().IsTuple()) { in DeconstructTuple() 162 ShapeUtil::GetTupleElementShape(shaped_buffer->on_host_shape(), i), in DeconstructTuple()
|
D | transfer_manager.h | 223 const Shape& on_host_shape, DeviceMemoryAllocator* allocator,
|
D | service.cc | 341 argument_shapes.push_back(&arg->on_host_shape()); in CreateModuleConfig() 882 const Shape& shape_arg = replicated_arguments.front()[i]->on_host_shape(); in Execute() 951 return_shape = Shape(shaped_buffer->on_host_shape()); in TransferToClient() 1122 *result->mutable_shape() = buffer->on_host_shape().ToProto(); in GetShape()
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 169 const xla::Shape& on_host_shape, in XRTTupleAllocation() argument 173 on_host_shape_(on_host_shape), in XRTTupleAllocation() 202 shaped_buffer.on_host_shape(), in CreateAndTransfer() 215 shaped_buffer.on_host_shape(), in CreateFromBuffer() 243 if (!xla::ShapeUtil::Equal(literal.shape(), on_host_shape())) { in WriteLiteral() 247 " device=", xla::ShapeUtil::HumanStringWithLayout(on_host_shape())); in WriteLiteral() 260 const xla::Shape& XRTTupleAllocation::on_host_shape() { return on_host_shape_; } in on_host_shape() function in tensorflow::XRTTupleAllocation 300 xla::ShapeUtil::TryGetSubshape(parent->on_host_shape(), subshape)); in MakeSubBuffer() 365 element.allocation->on_host_shape(); in ExpandTreeOfTuples() 510 xla::ShapedBuffer shaped_buffer(on_host_shape(), on_device_shape(), in ToShapedBuffer()
|
D | xrt_state.h | 163 const xla::Shape& on_host_shape(); 197 const xla::Shape& on_host_shape,
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 47 const xla::Shape& on_host_shape, in AllocateShapedBuffer() argument 52 on_host_shape); in AllocateShapedBuffer() 54 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer()
|
D | xla_tensor.h | 53 Status AllocateShapedBuffer(DataType dtype, const xla::Shape& on_host_shape,
|
D | xla_launch_util.cc | 268 VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); in PopulateOutputs() 277 if (!output.on_host_shape().IsTuple()) { in PopulateOutputs() 280 xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}), in PopulateOutputs()
|
D | xla_device_context.cc | 241 xla_tensor->shaped_buffer().on_host_shape()), in CopyDeviceTensorToCPU()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | local_client_execute_test.cc | 208 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 209 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 236 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 237 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 314 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 315 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 930 computation, {&buffer.on_host_shape()}, ExecutableBuildOptions()); in BM_LocalClientOverhead()
|
D | local_client_test_base.cc | 188 argument_layouts[i] = &arguments[i]->on_host_shape(); in ExecuteLocally()
|
D | fusion_test.cc | 888 {&buffer0.on_host_shape(), &buffer1.on_host_shape(), in BM_ParallelFusion() 889 &buffer2.on_host_shape()}, in BM_ParallelFusion()
|
D | dynamic_ops_test.cc | 773 host_shapes[i] = &shaped_buffers[i].on_host_shape(); in BM_DynamicSlice()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | local_client.cc | 68 arguments[i]->on_host_shape())) { in ValidateExecutionOptions() 77 ShapeUtil::HumanStringWithLayout(arguments[i]->on_host_shape())); in ValidateExecutionOptions()
|
/external/tensorflow/tensorflow/compiler/xrt/kernels/ |
D | xrt_state_ops.h | 493 xla::Literal literal(allocation->on_host_shape()); in Compute() 549 xla::Shape shape = allocation->on_host_shape(); in Compute()
|