/external/tensorflow/tensorflow/compiler/xla/ |
D | layout_util.cc | 392 if (ShapeUtil::TupleElementCount(src) != in CopyLayoutInternal() 393 ShapeUtil::TupleElementCount(*dst)) { in CopyLayoutInternal() 397 for (int64 i = 0; i < ShapeUtil::TupleElementCount(src); ++i) { in CopyLayoutInternal() 430 if (ShapeUtil::TupleElementCount(lhs) != in LayoutsInShapesEqual() 431 ShapeUtil::TupleElementCount(rhs)) { in LayoutsInShapesEqual() 434 for (int i = 0; i < ShapeUtil::TupleElementCount(lhs); ++i) { in LayoutsInShapesEqual()
|
D | shape_util.cc | 326 return IsTuple(shape) && TupleElementCount(shape) == 0; in IsEmptyTuple() 333 /* static */ int64 ShapeUtil::TupleElementCount(const Shape& shape) { in TupleElementCount() function in xla::ShapeUtil 341 CHECK_GT(TupleElementCount(shape), index); in GetTupleElementShape() 350 CHECK_LE(start, TupleElementCount(tuple)); in SliceTuple() 351 CHECK_LE(limit, TupleElementCount(tuple)); in SliceTuple() 904 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in ForEachSubshapeHelper() 921 for (int64 i = 0; i < ShapeUtil::TupleElementCount(*shape); ++i) { in ForEachMutableSubshapeHelper()
|
D | shape_tree.h | 474 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in InitChildren() 485 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in InitChildren()
|
D | shape_util.h | 412 static int64 TupleElementCount(const Shape& shape);
|
D | literal_util.cc | 254 for (int i = 0; i < ShapeUtil::TupleElementCount(shape()); ++i) { in DecomposeTuple() 1091 for (int i = 0; i < ShapeUtil::TupleElementCount(subshape); ++i) { in ToStringHelper() 1814 ShapeUtil::TupleElementCount(piece.subshape())) { in CreateFromProto() 1817 ShapeUtil::TupleElementCount(piece.subshape()), in CreateFromProto()
|
D | literal_util_test.cc | 1422 ASSERT_EQ(ShapeUtil::TupleElementCount(literal.shape()), 3); in TEST_F() 1439 ASSERT_EQ(ShapeUtil::TupleElementCount(literal.shape()), 0); in TEST_F()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_transfer_manager.cc | 70 buffers.reserve(ShapeUtil::TupleElementCount(shape)); in TransferLiteralToInfeed() 77 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in TransferLiteralToInfeed()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | literal_test_util.cc | 50 if (ShapeUtil::TupleElementCount(expected) != in EqualShapes() 51 ShapeUtil::TupleElementCount(actual)) { in EqualShapes() 54 << ShapeUtil::TupleElementCount(expected) in EqualShapes() 56 << ShapeUtil::TupleElementCount(actual); in EqualShapes() 318 for (int i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) { in Equal() 589 const int64 tuple_elements = ShapeUtil::TupleElementCount(shape); in RecursiveElementCount() 697 for (int64 i = 0; i < ShapeUtil::TupleElementCount(expected.shape()); ++i) { in Near()
|
D | client_test.cc | 98 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result->shape())); in XLA_TEST_F()
|
D | local_client_execute_test.cc | 217 EXPECT_EQ(3, ShapeUtil::TupleElementCount(result->on_host_shape())); in XLA_TEST_F() 246 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result->on_host_shape())); in XLA_TEST_F() 327 EXPECT_EQ(2, ShapeUtil::TupleElementCount(result->on_host_shape())); in XLA_TEST_F()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | hlo_element_type_converter.cc | 69 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in GetConvertedTupleShape() 90 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in ConvertTupleElements()
|
D | generic_transfer_manager.cc | 58 TF_RET_CHECK(elements.size() == ShapeUtil::TupleElementCount(shape)); in WriteSingleTupleIndexTable()
|
D | allocation_tracker.cc | 117 i < ShapeUtil::TupleElementCount(shaped_buffer->on_device_shape()); in DeconstructTuple()
|
D | hlo_sharding.h | 90 CHECK_EQ(flattened_list.size(), ShapeUtil::TupleElementCount(tuple_shape)); in Tuple()
|
D | transfer_manager.cc | 131 for (int64 i = 0; i < ShapeUtil::TupleElementCount(device_subshape); in WriteTupleIndexTables()
|
D | while_loop_simplifier.cc | 332 const int64 tuple_size = ShapeUtil::TupleElementCount(while_init->shape()); in TryRemoveDeadWhileParams()
|
D | hlo_computation.cc | 483 for (int64 i = 0; i < ShapeUtil::TupleElementCount(instruction->shape()); in DeepCopyHelper()
|
D | algebraic_simplifier.cc | 500 elems.reserve(ShapeUtil::TupleElementCount(literal.shape())); in BuildTupleConstant() 501 for (int i = 0; i < ShapeUtil::TupleElementCount(literal.shape()); ++i) { in BuildTupleConstant()
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | numpy_bridge.cc | 119 int num_elements = ShapeUtil::TupleElementCount(shape); in PyShapeInfoFromXlaShape() 120 dimensions = PyTuple_New(ShapeUtil::TupleElementCount(shape)); in PyShapeInfoFromXlaShape() 352 int num_elements = ShapeUtil::TupleElementCount(literal.shape()); in PyObjectFromXlaLiteral()
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_transfer_manager.cc | 114 buffers.reserve(ShapeUtil::TupleElementCount(shape)); in TransferLiteralToInfeed() 121 for (int64 i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in TransferLiteralToInfeed()
|
D | parallel_cpu_executable.cc | 91 for (int i = 0; i < ShapeUtil::TupleElementCount(shape); ++i) { in MarkLiveAddressesInOutput()
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | tuple_ops.cc | 49 for (int i = 0; i < ShapeUtil::TupleElementCount(select.GetShape()); ++i) { in EmitTupleSelect()
|
D | fused_ir_emitter.cc | 143 for (size_t i = 0; i < ShapeUtil::TupleElementCount(tuple->shape()); ++i) { in HandleTuple()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | xla_jit_compiled_cpu_function_test.cc | 127 ASSERT_EQ(ShapeUtil::TupleElementCount(result), 1); in TEST()
|
/external/tensorflow/tensorflow/compiler/aot/tests/ |
D | tfcompile_test.cc | 460 ASSERT_EQ(ShapeUtil::TupleElementCount(muladd_result), 2); in TEST()
|