/external/tensorflow/tensorflow/compiler/xla/python/ |
D | shared_device_buffer.cc | 59 const Shape& on_host_shape, const Shape& on_device_shape, in BufferFromScopedShapedBufferIterator() argument 74 if (on_host_shape.IsTuple()) { in BufferFromScopedShapedBufferIterator() 80 on_host_shape.tuple_shapes(i), on_device_shape.tuple_shapes(i), in BufferFromScopedShapedBufferIterator() 102 shaped_buffer->on_host_shape(), shaped_buffer->on_device_shape(), in FromScopedShapedBuffer() 112 const Shape& on_host_shape, TransferManager* transfer_manager, in MakeTuple() argument 115 CHECK(on_host_shape.IsTuple() && in MakeTuple() 116 on_host_shape.tuple_shapes_size() == children.size()); in MakeTuple() 121 transfer_manager->GetByteSizeRequirement(on_host_shape))); in MakeTuple() 166 ShapedBuffer SharedDeviceBuffer::AsShapedBuffer(const Shape& on_host_shape, in AsShapedBuffer() argument 169 ShapedBuffer shaped_buffer(on_host_shape, on_device_shape, platform, in AsShapedBuffer()
|
D | dlpack.cc | 263 dt.ndim = buffer->on_host_shape().dimensions_size(); in BufferToDLPackManagedTensor() 265 buffer->on_host_shape().element_type())); in BufferToDLPackManagedTensor() 267 pack->shape = std::vector<int64>(buffer->on_host_shape().dimensions().begin(), in BufferToDLPackManagedTensor() 268 buffer->on_host_shape().dimensions().end()); in BufferToDLPackManagedTensor() 269 pack->strides = StridesForShape(buffer->on_host_shape()); in BufferToDLPackManagedTensor()
|
D | shared_device_buffer.h | 101 const Shape& on_host_shape, TransferManager* transfer_manager, 114 ShapedBuffer AsShapedBuffer(const Shape& on_host_shape,
|
D | local_client.cc | 425 host_shapes.push_back(buffer->on_host_shape()); in MakeTuple() 433 Shape on_host_shape = ShapeUtil::MakeTupleShape(host_shapes); in MakeTuple() local 438 device_buffers, on_host_shape, transfer_manager, allocator, in MakeTuple() 441 std::move(on_host_shape), ShapeUtil::MakeTupleShape(device_shapes), in MakeTuple() 469 PyLocalBuffer::PyLocalBuffer(Shape on_host_shape, Shape on_device_shape, in PyLocalBuffer() argument 474 on_host_shape_(std::move(on_host_shape)), in PyLocalBuffer() 642 dst_buffer.on_host_shape(), dst_buffer.on_device_shape(), in CopyToDevice() 826 result_buffer.on_host_shape(), result_buffer.on_device_shape(), in ExecuteHelper()
|
D | xla.cc | 193 if (buffer.on_host_shape().element_type() == BF16 && in PyLocalBufferGetBuffer() 205 const Shape& shape = buffer.on_host_shape(); in PyLocalBufferGetBuffer() 282 if (buffer.on_host_shape().element_type() == BF16) { in PyLocalBufferCudaArrayInterface() 287 LayoutUtil::IsMonotonicWithDim0Major(buffer.on_host_shape().layout())); in PyLocalBufferCudaArrayInterface() 291 result["shape"] = IntSpanToTuple(shaped_buffer.on_host_shape().dimensions()); in PyLocalBufferCudaArrayInterface() 294 shaped_buffer.on_host_shape().element_type())); in PyLocalBufferCudaArrayInterface() 682 << buffer->on_host_shape().ToString(/*print_layout=*/true); in PYBIND11_MODULE() 692 .def("shape", &PyLocalBuffer::on_host_shape) in PYBIND11_MODULE()
|
D | local_client.h | 214 PyLocalBuffer(Shape on_host_shape, Shape on_device_shape, 224 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function
|
D | shared_device_buffer_test.cc | 108 EXPECT_EQ(shaped_buffer.on_host_shape(), abc_tuple_shape); in TEST()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | shaped_buffer.cc | 34 ShapedBuffer::ShapedBuffer(Shape on_host_shape, Shape on_device_shape, in ShapedBuffer() argument 36 : on_host_shape_(std::move(on_host_shape)), in ShapedBuffer() 72 ShapeUtil::TryGetSubshape(on_host_shape(), index)); in SubShapedBuffer() 93 "), on-host shape=" + ShapeUtil::HumanStringWithLayout(on_host_shape()), in ToString() 119 ScopedShapedBuffer::ScopedShapedBuffer(Shape on_host_shape, in ScopedShapedBuffer() argument 123 : ShapedBuffer(std::move(on_host_shape), std::move(on_device_shape), in ScopedShapedBuffer() 175 xla::ShapeUtil::GetSubshape(on_host_shape(), {index}); in TakeSubTree()
|
D | generic_transfer_manager.cc | 75 device_buffer.on_host_shape())); in TransferLiteralFromDevice() 78 device_buffer.on_host_shape(), in TransferLiteralFromDevice() 109 device_buffer.on_host_shape())); in TransferLiteralToDeviceAsync() 112 ShapeUtil::Compatible(literal.shape(), device_buffer.on_host_shape())); in TransferLiteralToDeviceAsync() 119 device_buffer.on_host_shape(), in TransferLiteralToDeviceAsync()
|
D | transfer_manager.cc | 59 Literal literal(device_buffer.on_host_shape()); in TransferLiteralFromDevice() 315 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator, in AllocateScopedShapedBuffer() argument 317 if (!LayoutUtil::HasLayout(on_host_shape)) { in AllocateScopedShapedBuffer() 319 ShapeUtil::HumanStringWithLayout(on_host_shape)); in AllocateScopedShapedBuffer() 321 TF_RETURN_IF_ERROR(ShapeUtil::ValidateShape(on_host_shape)); in AllocateScopedShapedBuffer() 322 Shape on_device_shape = HostShapeToDeviceShape(on_host_shape); in AllocateScopedShapedBuffer() 325 ScopedShapedBuffer shaped_buffer(on_host_shape, std::move(on_device_shape), in AllocateScopedShapedBuffer()
|
D | shaped_buffer.h | 45 ShapedBuffer(Shape on_host_shape, Shape on_device_shape, 62 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function 139 explicit ScopedShapedBuffer(Shape on_host_shape, Shape on_device_shape,
|
D | allocation_tracker.cc | 146 if (!shaped_buffer->on_host_shape().IsTuple()) { in DeconstructTuple() 163 ShapeUtil::GetTupleElementShape(shaped_buffer->on_host_shape(), i), in DeconstructTuple()
|
D | transfer_manager.h | 232 const Shape& on_host_shape, se::DeviceMemoryAllocator* allocator,
|
D | service.cc | 356 argument_shapes.push_back(&arg->on_host_shape()); in CreateModuleConfig() 902 const Shape& shape_arg = replicated_arguments.front()[i]->on_host_shape(); in Execute() 972 return_shape = Shape(shaped_buffer->on_host_shape()); in TransferToClient() 1143 *result->mutable_shape() = buffer->on_host_shape().ToProto(); in GetShape()
|
D | shaped_buffer_test.cc | 171 EXPECT_EQ(ssb.on_host_shape(), array_shape); in TEST()
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 149 const xla::Shape& on_host_shape, in XRTTupleAllocation() argument 153 on_host_shape_(on_host_shape), in XRTTupleAllocation() 189 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateAndTransfer() 212 shaped_buffer.on_host_shape(), shaped_buffer.on_device_shape()); in CreateUninitialized() 221 const xla::ShapedBuffer& shaped_buffer, const xla::Shape& on_host_shape, in CreateFromBuffer() argument 226 *allocation = new XRTTupleAllocation(device_ordinal, allocator, on_host_shape, in CreateFromBuffer() 237 return CreateFromBuffer(shaped_buffer, shaped_buffer.on_host_shape(), in CreateFromBuffer() 260 if (!xla::ShapeUtil::Equal(literal.shape(), on_host_shape())) { in WriteLiteral() 264 " device=", xla::ShapeUtil::HumanStringWithLayout(on_host_shape())); in WriteLiteral() 283 xla::Literal literal(on_host_shape()); in SwapOut() [all …]
|
D | xrt_state.h | 105 const xla::Shape& on_host_shape, 195 const xla::Shape& on_host_shape() const; 238 const xla::Shape& on_host_shape,
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 47 const xla::Shape& on_host_shape, in AllocateShapedBuffer() argument 52 on_host_shape); in AllocateShapedBuffer() 54 xla::ScopedShapedBuffer shaped_buffer(on_host_shape, on_device_shape, in AllocateShapedBuffer()
|
D | xla_tensor.h | 53 Status AllocateShapedBuffer(DataType dtype, const xla::Shape& on_host_shape,
|
D | xla_launch_util.cc | 385 VLOG(2) << "Result tuple shape: " << output.on_host_shape().DebugString(); in PopulateOutputs() 394 if (!output.on_host_shape().IsTuple()) { in PopulateOutputs() 397 xla::ShapeUtil::MakeTupleShape({nontuple_buffer.on_host_shape()}), in PopulateOutputs()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | local_client.cc | 177 argument_shapes.push_back(&arg->on_host_shape()); in Run() 197 auto literal = std::make_shared<Literal>(arg->on_host_shape()); in DumpArguments() 216 auto literal = std::make_shared<Literal>(outputs.on_host_shape()); in DumpOutputsAndSaveSnapshot() 237 argument_shapes.push_back(&arg->on_host_shape()); in RunAsync() 261 Shape const& on_host_shape, const ShapeTree<MaybeOwningDeviceMemory>& tree, in MaybeOwningShapeTreeToShapedBuffer() argument 263 ShapedBuffer result(on_host_shape, tree.shape(), platform, device_ordinal); in MaybeOwningShapeTreeToShapedBuffer()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | local_client_execute_test.cc | 211 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 212 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 239 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 240 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 317 EXPECT_TRUE(result.on_host_shape().IsTuple()); in XLA_TEST_F() 318 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 958 auto executables, client->Compile(computation, {&buffer.on_host_shape()}, in BM_LocalClientOverhead()
|
D | local_client_test_base.cc | 194 argument_layouts[i] = &arguments[i]->on_host_shape(); in ExecuteLocally()
|
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/ |
D | tpu_client.h | 171 PyTpuBuffer(Shape on_host_shape, 181 const Shape& on_host_shape() const { return on_host_shape_; } in on_host_shape() function
|
D | tpu_client.cc | 239 child_shapes.push_back(child_buffer->on_host_shape()); in MakeTuple() 266 Shape on_host_shape, std::shared_ptr<TpuSharedBuffer> device_buffer, in PyTpuBuffer() argument 270 on_host_shape_(std::move(on_host_shape)), in PyTpuBuffer()
|