/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | tensor_list_utils.cc | 127 *is_nested_list = (xla::ShapeUtil::TupleElementCount(list_shape) > 2); in IsNestedTensorList() 166 int tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in GetTensorListPushIndex() 179 int tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in SetTensorListPushIndex() 214 int tuple_size = xla::ShapeUtil::TupleElementCount(element_tensor_list_shape); in GetTensorListShapeFromElementTensorListShape() 253 int tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in CreateZerosTensorListWithShape() 313 int list_tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in ExecuteTensorListPushBack() 320 int element_tuple_size = xla::ShapeUtil::TupleElementCount(element_shape); in ExecuteTensorListPushBack() 378 int list_tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in ExecuteTensorListPopBack()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | layout_util.cc | 365 if (ShapeUtil::TupleElementCount(src) != in CopyLayoutInternal() 366 ShapeUtil::TupleElementCount(*dst)) { in CopyLayoutInternal() 370 for (int64 i = 0; i < ShapeUtil::TupleElementCount(src); ++i) { in CopyLayoutInternal() 399 if (!rhs.IsTuple() || ShapeUtil::TupleElementCount(lhs) != in LayoutsInShapesEqual() 400 ShapeUtil::TupleElementCount(rhs)) { in LayoutsInShapesEqual() 403 for (int i = 0; i < ShapeUtil::TupleElementCount(lhs); ++i) { in LayoutsInShapesEqual()
|
D | literal_comparison.cc | 205 const int64 tuple_elements = ShapeUtil::TupleElementCount(shape); in RecursiveElementCount() 696 for (int i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) { in EqualHelper() 792 for (int64 i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) { in NearHelper() 879 if (ShapeUtil::TupleElementCount(expected) != in EqualShapes() 880 ShapeUtil::TupleElementCount(actual)) { in EqualShapes() 883 ShapeUtil::TupleElementCount(expected), in EqualShapes() 884 ShapeUtil::TupleElementCount(actual)); in EqualShapes()
|
D | shape_util.cc | 382 return shape.IsTuple() && TupleElementCount(shape) == 0; in IsEmptyTuple() 385 /* static */ int64 ShapeUtil::TupleElementCount(const Shape& shape) { in TupleElementCount() function in xla::ShapeUtil 393 CHECK_GT(TupleElementCount(shape), index); in GetTupleElementShape() 409 CHECK_LE(start, TupleElementCount(tuple)); in SliceTuple() 410 CHECK_LE(limit, TupleElementCount(tuple)); in SliceTuple() 863 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in ForEachSubshapeHelper() 880 for (int64 i = 0; i < ShapeUtil::TupleElementCount(*shape); ++i) { in ForEachMutableSubshapeHelper()
|
D | literal.cc | 123 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in SetPiece() 310 ShapeUtil::TupleElementCount(piece->subshape())) { in CreateFromProto() 313 ShapeUtil::TupleElementCount(piece->subshape()), in CreateFromProto() 340 for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) { in DecomposeTuple() 978 for (int i = 0; i < ShapeUtil::TupleElementCount(subshape); ++i) { in TupleToStringHelper() 1322 for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) { in ConvertToShape() 1996 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in CopyPieceSubtree() 2092 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in BuildPieceSubtree() 2121 CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_)); in BorrowingLiteral()
|
D | shape_tree.h | 402 int64 count = ShapeUtil::TupleElementCount(shape); in CountSubshapes() 414 const int64 size = ShapeUtil::TupleElementCount(shape); in InitChildren() 450 const int64 size = ShapeUtil::TupleElementCount(shape); in InitChildren()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | conditional_simplifier_test.cc | 327 EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 0); in TEST_F() 373 EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); in TEST_F() 419 EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); in TEST_F() 482 EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); in TEST_F()
|
D | transfer_manager.cc | 248 ShapeUtil::TupleElementCount(device_subshape) > 0) { in WriteTupleIndexTablesAsync() 255 for (int64 i = 0; i < ShapeUtil::TupleElementCount(device_subshape); in WriteTupleIndexTablesAsync() 272 if (ShapeUtil::TupleElementCount(device_buffer.on_device_shape()) == 0) { in WriteRootTupleIndexTable() 281 i < ShapeUtil::TupleElementCount(device_buffer.on_device_shape()); ++i) { in WriteRootTupleIndexTable()
|
D | hlo_element_type_converter.cc | 69 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in GetConvertedTupleShape() 90 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in ConvertTupleElements()
|
D | hlo_module_dce.cc | 62 ShapeUtil::TupleElementCount(xla_while->shape()); in RunWhileDCE()
|
D | conditional_simplifier.cc | 149 ShapeUtil::TupleElementCount(f->shape()); in TryRemoveConditional() 201 ShapeUtil::TupleElementCount(param->shape())) { in TryRemoveUnusedConditionalOperands() 212 int64 old_tuple_element_count = ShapeUtil::TupleElementCount(old_shape); in TryRemoveUnusedConditionalOperands()
|
D | gather_expander_test.cc | 97 ASSERT_EQ(ShapeUtil::TupleElementCount(while_shape), 4); in TEST_F()
|
D | generic_transfer_manager.cc | 45 TF_RET_CHECK(elements.size() == ShapeUtil::TupleElementCount(shape)); in WriteSingleTupleIndexTable()
|
D | allocation_tracker.cc | 160 i < ShapeUtil::TupleElementCount(shaped_buffer->on_device_shape()); in DeconstructTuple()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | shape_util.cc | 31 int64 tuple_elements = xla::ShapeUtil::TupleElementCount(shape); in PopulateInfeedLayoutVector() 135 int64 tuple_elements = xla::ShapeUtil::TupleElementCount(input_shape); in GetShapeWithLayout()
|
D | xla_jit_compiled_cpu_function_test.cc | 224 ASSERT_EQ(ShapeUtil::TupleElementCount(result), 1); in TEST() 267 ASSERT_EQ(ShapeUtil::TupleElementCount(result), 2); in TEST()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_transfer_manager.cc | 114 buffers.reserve(ShapeUtil::TupleElementCount(shape)); in TransferLiteralToInfeed() 121 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in TransferLiteralToInfeed()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | infeed_thunk.cc | 79 const int64 tuple_element_count = ShapeUtil::TupleElementCount(shape); in ExecuteOnStream()
|
D | gpu_transfer_manager.cc | 134 const int64 tuple_element_count = ShapeUtil::TupleElementCount(shape); in ShapeTreeToLiteral()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_test.cc | 100 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.shape())); in XLA_TEST_F()
|
D | buffer_donation_test.cc | 115 for (int i = 0; i < ShapeUtil::TupleElementCount(argument_literal.shape()); in RunAndCheck()
|
D | local_client_execute_test.cc | 212 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 240 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 318 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F()
|
/external/tensorflow/tensorflow/c/eager/ |
D | c_api_debug.cc | 95 if (xla::ShapeUtil::TupleElementCount(padded_shape) != 2) { in TensorDebugInfo()
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | fused_ir_emitter.cc | 185 for (size_t i = 0; i < ShapeUtil::TupleElementCount(tuple->shape()); ++i) { in HandleTuple()
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | shared_device_buffer.cc | 76 int num_children = ShapeUtil::TupleElementCount(on_device_shape); in BufferFromScopedShapedBufferIterator()
|