/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_module_dce_test.cc | 199 EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape())); in TEST_F() 309 EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape())); in TEST_F() 310 EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[1]->shape())); in TEST_F() 376 EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape())); in TEST_F() 377 EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[1]->shape())); in TEST_F() 513 EXPECT_EQ(1, ShapeUtil::TupleElementCount(while_loops[0]->shape())); in TEST_F()
|
D | transfer_manager.cc | 301 ShapeUtil::TupleElementCount(device_subshape) > 0) { in WriteTupleIndexTablesAsync() 308 for (int64 i = 0; i < ShapeUtil::TupleElementCount(device_subshape); in WriteTupleIndexTablesAsync() 325 if (ShapeUtil::TupleElementCount(device_buffer.on_device_shape()) == 0) { in WriteRootTupleIndexTable() 334 i < ShapeUtil::TupleElementCount(device_buffer.on_device_shape()); ++i) { in WriteRootTupleIndexTable() 344 if (ShapeUtil::TupleElementCount(buffer_tree.shape()) == 0) { in WriteRootTupleIndexTable() 353 for (int64 i = 0; i < ShapeUtil::TupleElementCount(buffer_tree.shape()); in WriteRootTupleIndexTable()
|
D | conditional_simplifier_test.cc | 322 EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 0); in TEST_F() 368 EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); in TEST_F() 414 EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); in TEST_F() 477 EXPECT_EQ(ShapeUtil::TupleElementCount(conditional->shape()), 1); in TEST_F()
|
D | hlo_element_type_converter.cc | 69 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in GetConvertedTupleShape() 90 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in ConvertTupleElements()
|
D | hlo_module_dce.cc | 65 ShapeUtil::TupleElementCount(xla_while->shape()); in RunWhileDCE()
|
D | gather_expander_test.cc | 102 ASSERT_EQ(ShapeUtil::TupleElementCount(while_shape), 4); in TEST_F()
|
D | generic_transfer_manager.cc | 45 TF_RET_CHECK(elements.size() == ShapeUtil::TupleElementCount(shape)); in WriteSingleTupleIndexTable()
|
D | conditional_simplifier.cc | 185 ShapeUtil::TupleElementCount(f->shape()); in TryRemoveConditional() 224 int64 old_tuple_element_count = ShapeUtil::TupleElementCount(param->shape()); in TryRemoveUnusedConditionalOperands()
|
D | allocation_tracker.cc | 148 i < ShapeUtil::TupleElementCount(shaped_buffer->on_device_shape()); in DeconstructTuple()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | layout_util.cc | 372 if (ShapeUtil::TupleElementCount(src) != in CopyLayoutInternal() 373 ShapeUtil::TupleElementCount(*dst)) { in CopyLayoutInternal() 377 for (int64 i = 0; i < ShapeUtil::TupleElementCount(src); ++i) { in CopyLayoutInternal() 406 if (!rhs.IsTuple() || ShapeUtil::TupleElementCount(lhs) != in LayoutsInShapesEqual() 407 ShapeUtil::TupleElementCount(rhs)) { in LayoutsInShapesEqual() 410 for (int i = 0; i < ShapeUtil::TupleElementCount(lhs); ++i) { in LayoutsInShapesEqual()
|
D | literal_comparison.cc | 205 const int64 tuple_elements = ShapeUtil::TupleElementCount(shape); in RecursiveElementCount() 685 for (int i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) { in EqualHelper() 781 for (int64 i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) { in NearHelper() 868 if (ShapeUtil::TupleElementCount(expected) != in EqualShapes() 869 ShapeUtil::TupleElementCount(actual)) { in EqualShapes() 872 ShapeUtil::TupleElementCount(expected), in EqualShapes() 873 ShapeUtil::TupleElementCount(actual)); in EqualShapes()
|
D | shape_util.cc | 492 return shape.IsTuple() && TupleElementCount(shape) == 0; in IsEmptyTuple() 495 /* static */ int64 ShapeUtil::TupleElementCount(const Shape& shape) { in TupleElementCount() function in xla::ShapeUtil 503 CHECK_GT(TupleElementCount(shape), index); in GetTupleElementShape() 519 CHECK_LE(start, TupleElementCount(tuple)); in SliceTuple() 520 CHECK_LE(limit, TupleElementCount(tuple)); in SliceTuple() 990 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in ForEachSubshapeHelper() 1007 for (int64 i = 0; i < ShapeUtil::TupleElementCount(*shape); ++i) { in ForEachMutableSubshapeHelper()
|
D | literal.cc | 146 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in SetPiece() 378 ShapeUtil::TupleElementCount(piece->subshape())) { in CreateFromProto() 381 ShapeUtil::TupleElementCount(piece->subshape()), in CreateFromProto() 408 for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) { in DecomposeTuple() 1174 for (int i = 0; i < ShapeUtil::TupleElementCount(subshape); ++i) { in TupleToStringHelper() 1540 for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) { in ConvertToShape() 2278 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in CopyPieceSubtree() 2371 CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_)); in MutableBorrowingLiteral() 2401 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in BuildPieceSubtree() 2430 CHECK_EQ(src_buf_ptrs.size(), ShapeUtil::TupleElementCount(*shape_)); in BorrowingLiteral()
|
D | shape_tree.h | 441 int64 count = ShapeUtil::TupleElementCount(shape); in CountSubshapes() 453 const int64 size = ShapeUtil::TupleElementCount(shape); in InitChildren() 489 const int64 size = ShapeUtil::TupleElementCount(shape); in InitChildren()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | tensor_list_utils.cc | 128 *is_nested_list = (xla::ShapeUtil::TupleElementCount(list_shape) > 2); in IsNestedTensorList() 167 int tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in GetTensorListPushIndex() 180 int tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in SetTensorListPushIndex() 229 int tuple_size = xla::ShapeUtil::TupleElementCount(element_tensor_list_shape); in GetTensorListShapeFromElementTensorListShape() 273 int tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in CreateZerosTensorListWithShape() 363 int list_tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in ExecuteTensorListPushBack() 370 int element_tuple_size = xla::ShapeUtil::TupleElementCount(element_shape); in ExecuteTensorListPushBack() 428 int list_tuple_size = xla::ShapeUtil::TupleElementCount(list_shape); in ExecuteTensorListPopBack()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | shape_util.cc | 31 int64 tuple_elements = xla::ShapeUtil::TupleElementCount(shape); in PopulateInfeedLayoutVector() 178 int64 tuple_elements = xla::ShapeUtil::TupleElementCount(input_shape); in GetShapeWithLayout()
|
D | xla_jit_compiled_cpu_function_test.cc | 227 ASSERT_EQ(ShapeUtil::TupleElementCount(result), 1); in TEST() 278 ASSERT_EQ(ShapeUtil::TupleElementCount(result), 2); in TEST()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_transfer_manager.cc | 114 buffers.reserve(ShapeUtil::TupleElementCount(shape)); in TransferLiteralToInfeed() 121 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in TransferLiteralToInfeed()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_test.cc | 100 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.shape())); in XLA_TEST_F()
|
D | local_client_execute_test.cc | 212 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 240 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F() 318 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result.on_host_shape())); in XLA_TEST_F()
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | tpu_execute_op.cc | 204 if (arg_list.size() != xla::ShapeUtil::TupleElementCount(input_host_shape)) { in BuildComputationInputs() 208 xla::ShapeUtil::TupleElementCount(input_host_shape)); in BuildComputationInputs() 405 xla::ShapeUtil::TupleElementCount(scoped_buffers.on_host_shape()); in AllocateOutputTensors()
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | fused_ir_emitter.cc | 134 for (size_t i = 0; i < ShapeUtil::TupleElementCount(tuple->shape()); ++i) { in HandleTuple()
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | spmd_partitioner_util.cc | 58 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in CreateConstant() 74 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in CreateZero() 95 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in CreateOne() 129 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in EvenlyPartitions() 151 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in MakePartitionedShape() 171 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in MakeNonPaddedShapeForGivenPartition()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | horizontal_loop_fusion.cc | 51 return ShapeUtil::TupleElementCount(root->shape()); in GetOutputSizeOfFusion()
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_util.cc | 351 xla::ShapeUtil::TupleElementCount(output_tuple->on_device_shape()); in CreateExecuteOutput()
|