/external/tensorflow/tensorflow/compiler/xla/service/ |
D | tuple_util.cc | 29 std::vector<HloInstruction*> tuple_elements; in ExtractPrefix() local 30 tuple_elements.reserve(elements); in ExtractPrefix() 32 tuple_elements.push_back( in ExtractPrefix() 38 HloInstruction::CreateTuple(tuple_elements)); in ExtractPrefix() 48 std::vector<HloInstruction*> tuple_elements; in AppendSuffix() local 49 tuple_elements.reserve(input_shape.tuple_shapes_size()); in AppendSuffix() 51 tuple_elements.push_back( in AppendSuffix() 55 tuple_elements.insert(tuple_elements.end(), trailing_values.begin(), in AppendSuffix() 58 HloInstruction::CreateTuple(tuple_elements)); in AppendSuffix()
|
D | stable_sort_expander.cc | 113 std::vector<HloInstruction*> tuple_elements; in ExpandInstruction() local 114 tuple_elements.reserve(sort->operand_count()); in ExpandInstruction() 116 tuple_elements.push_back( in ExpandInstruction() 120 expanded_sort = tuple_elements[0]; in ExpandInstruction() 121 if (tuple_elements.size() > 1) { in ExpandInstruction() 123 HloInstruction::CreateTuple(tuple_elements)); in ExpandInstruction()
|
D | hlo_sharding_test.cc | 469 ASSERT_EQ(sharding_new_metadata.tuple_elements().size(), 3); in TEST_F() 471 ASSERT_EQ(sharding_new_metadata.tuple_elements()[0].metadata().size(), 1); in TEST_F() 473 sharding_new_metadata.tuple_elements()[0].metadata().front(), in TEST_F() 476 ASSERT_EQ(sharding_new_metadata.tuple_elements()[1].metadata().size(), 2); in TEST_F() 479 sharding_new_metadata.tuple_elements()[1].metadata()[i], in TEST_F() 483 ASSERT_EQ(sharding_new_metadata.tuple_elements()[2].metadata().size(), 1); in TEST_F() 485 sharding_new_metadata.tuple_elements()[2].metadata().front(), in TEST_F() 523 ASSERT_EQ(sharding_new_metadata.tuple_elements().size(), 3); in TEST_F() 525 for (const auto& sub_sharding : sharding_new_metadata.tuple_elements()) { in TEST_F() 559 EXPECT_EQ(sharding_no_metadata.tuple_elements().size(), 3); in TEST_F() [all …]
|
D | hlo_element_type_converter.cc | 89 std::vector<HloInstruction*> tuple_elements; in ConvertTupleElements() local 100 tuple_elements.push_back(element); in ConvertTupleElements() 103 HloInstruction::CreateTuple(tuple_elements)); in ConvertTupleElements()
|
D | sharding_propagation_test.cc | 719 {copy_param0->sharding(), reduce->sharding().tuple_elements()[0], in TEST_P() 720 reduce->sharding().tuple_elements()[1]}) { in TEST_P() 769 EXPECT_THAT(gte1->sharding().tuple_elements()[0], in TEST_P() 771 EXPECT_THAT(gte1->sharding().tuple_elements()[1], in TEST_P() 776 {gte->sharding(), gte1->sharding().tuple_elements()[0], in TEST_P() 777 gte1->sharding().tuple_elements()[1], gte2->sharding()}) { in TEST_P() 821 EXPECT_THAT(tuple->sharding().tuple_elements()[0], in TEST_P() 823 EXPECT_THAT(tuple->sharding().tuple_elements()[1], ShardingMetadata({})); in TEST_P() 824 EXPECT_THAT(tuple1->sharding().tuple_elements()[0], ShardingMetadata({})); in TEST_P() 825 EXPECT_THAT(tuple1->sharding().tuple_elements()[1], in TEST_P() [all …]
|
D | sharding_propagation.cc | 60 return absl::c_any_of(sharding.tuple_elements(), IsSpatiallyPartitioned); in IsSpatiallyPartitioned() 651 instruction->sharding().tuple_elements(); in InferShardingFromOperands() 660 operand->sharding().tuple_elements()[i], in InferShardingFromOperands() 663 operand->sharding().tuple_elements()[i]; in InferShardingFromOperands() 1295 new_sharding.tuple_elements()[sharding_index] = user.sharding(); in GetShardingFromUser() 1297 for (int64 i = 0; i < user.sharding().tuple_elements().size(); ++i) { in GetShardingFromUser() 1298 new_sharding.tuple_elements()[sharding_index + i] = in GetShardingFromUser() 1299 user.sharding().tuple_elements()[i]; in GetShardingFromUser() 1460 for (int i = 0, e = a.tuple_elements().size(); i < e; ++i) { in SameShardingMetadata() 1461 if (!same_metadata(a.tuple_elements()[i].metadata(), in SameShardingMetadata() [all …]
|
D | hlo_sharding.h | 267 std::vector<HloSharding>& tuple_elements() { return tuple_elements_; } in tuple_elements() function 268 const std::vector<HloSharding>& tuple_elements() const { in tuple_elements() function
|
D | hlo_sharding.cc | 211 for (auto& tuple_element_sharding : tuple_elements()) { in UsedDevices() 217 element_count = tuple_elements().size(); in UsedDevices() 672 for (HloSharding& sub_sharding : sharding.tuple_elements()) { in WithMetadata() 684 for (HloSharding& sub_sharding : sharding.tuple_elements()) { in WithoutMetadata()
|
D | hlo_sharding_util.cc | 43 const auto& lhs_shardings = lhs.tuple_elements(); in IsShardingMoreSpecific() 44 const auto& rhs_shardings = rhs.tuple_elements(); in IsShardingMoreSpecific() 74 for (int64 i = 0; i < old.tuple_elements().size(); ++i) { in MergeSharding() 76 MergeSharding(old.tuple_elements()[i], &to_merge->tuple_elements()[i], in MergeSharding() 1101 for (const auto& subsharding : sharding.tuple_elements()) { in DevicesForShardingInternal()
|
D | hlo_instructions.cc | 1745 HloInstruction::InstructionVector tuple_elements; in CloneAndFuseInternal() local 1748 tuple_elements = fused_root->operands(); in CloneAndFuseInternal() 1750 tuple_elements.push_back(fused_root); in CloneAndFuseInternal() 1755 tuple_elements.push_back(inst); in CloneAndFuseInternal() 1758 tuple_elements.push_back(clone); in CloneAndFuseInternal() 1761 HloInstruction::CreateTuple(tuple_elements)); in CloneAndFuseInternal() 1776 int64 index = tuple_elements.size(); in CloneAndFuseInternal()
|
D | hlo_sharding_metadata.cc | 239 domain_sharding.tuple_elements().size()); in ApplyShardingFromUsers()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | shape_util.cc | 31 int64 tuple_elements = xla::ShapeUtil::TupleElementCount(shape); in PopulateInfeedLayoutVector() local 32 for (int64 i = 0; i < tuple_elements; ++i) { in PopulateInfeedLayoutVector() 178 int64 tuple_elements = xla::ShapeUtil::TupleElementCount(input_shape); in GetShapeWithLayout() local 180 shapes.reserve(tuple_elements); in GetShapeWithLayout() 182 for (int64 i = 0; i < tuple_elements; ++i) { in GetShapeWithLayout()
|
D | xla_compiler.cc | 380 sub_sharding.tuple_elements(); in BuildComputation()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | transfer_manager_test.cc | 364 std::vector<Literal> tuple_elements; in Run() local 366 tuple_elements.push_back( in Run() 369 Literal literal = LiteralUtil::MakeTupleOwned(std::move(tuple_elements)); in Run() 394 std::vector<Literal> tuple_elements; in Run() local 396 tuple_elements.push_back( in Run() 399 Literal literal = LiteralUtil::MakeTupleOwned(std::move(tuple_elements)); in Run()
|
/external/tensorflow/tensorflow/compiler/xla/service/g3doc/ |
D | hlo_parser.md | 84 | '(' tuple_elements ')' 86 tuple_elements
|
/external/tensorflow/tensorflow/core/tpu/kernels/ |
D | infeed_ops.cc | 134 int64 tuple_elements = xla::ShapeUtil::TupleElementCount(*output_shape); in GetInfeedShapeWithLayout() local 135 for (int64 i = 0; i < tuple_elements; ++i) { in GetInfeedShapeWithLayout()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | literal_comparison.cc | 205 const int64 tuple_elements = ShapeUtil::TupleElementCount(shape); in RecursiveElementCount() local 207 for (int64 i = 0; i < tuple_elements; ++i) { in RecursiveElementCount()
|
D | literal_test.cc | 1731 Literal tuple_elements[] = { in TEST_F() local 1737 {&tuple_elements[0], &tuple_elements[1], &nil_literal}); in TEST_F()
|
/external/tensorflow/tensorflow/compiler/xla/service/spmd/ |
D | spmd_partitioner.cc | 1417 std::vector<HloSharding> subshardings = sharding.tuple_elements(); in Preprocess() 1434 hlo->sharding().tuple_elements(), in Preprocess()
|
D | spmd_partitioner_util.cc | 49 return absl::c_any_of(sharding.tuple_elements(), HasReplicatedSharding); in HasReplicatedSharding()
|