/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.cc | 147 const Shape& subshape = shape.tuple_shapes(i); in SetPiece() local 150 child_piece.set_subshape(&subshape); in SetPiece() 152 SetPiece(subshape, &child_piece, allocate_arrays); in SetPiece() 180 CHECK(&root_piece_->subshape() == shape_.get()); in Literal() 209 DCHECK(&other.root_piece_->subshape() == other.shape_.get()); in operator =() 213 DCHECK(&root_piece_->subshape() == shape_.get()); in operator =() 222 if (piece->subshape().IsArray()) { in CreateFromShape() 376 if (piece->subshape().IsTuple()) { in CreateFromProto() 378 ShapeUtil::TupleElementCount(piece->subshape())) { in CreateFromProto() 381 ShapeUtil::TupleElementCount(piece->subshape()), in CreateFromProto() [all …]
|
D | literal.h | 68 const Shape& shape() const { return root_piece().subshape(); } in shape() 394 return subshape().dimensions_size() * sizeof(int32); in dynamic_size_buffer_bytes() 399 const Shape& subshape() const { return *subshape_; } in subshape() function 400 void set_subshape(const Shape* subshape) { subshape_ = subshape; } in set_subshape() argument 403 int64 size_bytes() const { return ShapeUtil::ByteSizeOf(subshape()); } in size_bytes() 406 int64 element_count() const { return ShapeUtil::ElementsIn(subshape()); } in element_count() 895 DCHECK(subshape().IsArray()) << ShapeUtil::HumanString(subshape()); in data() 896 DCHECK_EQ(subshape().element_type(), in data() 901 << PrimitiveType_Name(subshape().element_type()); in data() 908 DCHECK(subshape().IsArray()) << ShapeUtil::HumanString(subshape()); in data() [all …]
|
D | shape_util.cc | 898 const Shape* subshape = &shape; in IndexIsValid() local 900 if (!subshape->IsTuple() || i >= subshape->tuple_shapes_size() || i < 0) { in IndexIsValid() 903 subshape = &subshape->tuple_shapes(i); in IndexIsValid() 954 for (const Shape& subshape : shape.tuple_shapes()) { in GetLeafCount() local 955 count += GetLeafCount(subshape); in GetLeafCount() 1024 [&func](const Shape& subshape, const ShapeIndex& index) { in ForEachSubshape() argument 1025 func(subshape, index); in ForEachSubshape() 1037 [&func](Shape* subshape, const ShapeIndex& index) { in ForEachMutableSubshape() argument 1038 func(subshape, index); in ForEachMutableSubshape() 1650 for (const Shape& subshape : shape.tuple_shapes()) { in Hash() local [all …]
|
D | literal_util.cc | 51 &result_shape, [](Shape* subshape, const ShapeIndex&) { in ConvertType() argument 52 if (subshape->element_type() == in ConvertType() 54 subshape->set_element_type( in ConvertType() 64 [&](const Shape& subshape, const ShapeIndex& shape_index) { in ConvertType() argument 65 if (subshape.IsArray()) { in ConvertType() 66 if (subshape.element_type() == in ConvertType()
|
D | shape.cc | 88 for (const Shape& subshape : tuple_shapes_) { in is_static() local 89 if (!subshape.is_static()) { in is_static()
|
D | shape_util_test.cc | 503 shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) { in TEST() argument 504 EXPECT_EQ(&shape, &subshape); in TEST() 518 shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) { in TEST() argument 520 ShapeUtil::Equal(subshape, ShapeUtil::GetSubshape(shape, index))); in TEST() 526 EXPECT_EQ(33, ShapeUtil::ElementsIn(subshape)); in TEST() 540 &shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) { in TEST() argument 542 EXPECT_EQ(subshape, ShapeUtil::GetMutableSubshape(&shape, index)); in TEST() 548 EXPECT_EQ(33, ShapeUtil::ElementsIn(*subshape)); in TEST()
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | optimize_input_output_buffer_alias.cc | 57 input_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build() argument 58 if (subshape.IsTuple()) { in Build() 61 parameter_entries.emplace_back(Entry{subshape, index, false}); in Build() 67 output_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build() argument 68 if (subshape.IsTuple()) { in Build() 72 if (Shape::Equal()(entry.shape, subshape) && !entry.used) { in Build()
|
D | bfloat16_propagation.cc | 55 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineFusionComputationPrecision() argument 56 if (subshape.element_type() != F32) { in DetermineFusionComputationPrecision() 103 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in RevertIfFusionInternalBF16Changes() argument 110 if (subshape.element_type() != F32) { in RevertIfFusionInternalBF16Changes() 180 const Shape& subshape, const ShapeIndex& index) { in DetermineWhileComputationsPrecision() argument 181 if (subshape.element_type() != F32) { in DetermineWhileComputationsPrecision() 215 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineConditionalComputationsPrecision() argument 216 if (subshape.element_type() != F32) { in DetermineConditionalComputationsPrecision() 238 const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index); in AllUsersConsumeBF16() local 239 if (subshape.element_type() != BF16 && subshape.element_type() != F32) { in AllUsersConsumeBF16() [all …]
|
D | hlo_element_type_converter.cc | 70 Shape subshape = ShapeUtil::GetTupleElementShape(shape, i); in GetConvertedTupleShape() local 71 CHECK(!subshape.IsTuple()); in GetConvertedTupleShape() 72 if (subshape.element_type() == from_type) { in GetConvertedTupleShape() 73 subshape = ShapeUtil::ChangeElementType(subshape, to_type); in GetConvertedTupleShape() 75 new_tuple_subshapes.push_back(subshape); in GetConvertedTupleShape()
|
D | while_loop_invariant_code_motion.cc | 222 operand->shape(), [&input_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() 224 if (subshape.IsArray()) { in TryHoistingInvariantInstructionsFromWhileBody() 225 input_size += shape_size_function_(subshape); in TryHoistingInvariantInstructionsFromWhileBody() 231 [&output_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() argument 233 if (subshape.IsArray()) { in TryHoistingInvariantInstructionsFromWhileBody() 234 output_size += shape_size_function_(subshape); in TryHoistingInvariantInstructionsFromWhileBody()
|
D | bfloat16_normalization.cc | 98 shape, [&](const Shape& subshape, const ShapeIndex& index) { in CountSubshapesWithMatchingType() argument 99 if (subshape.element_type() == type) { in CountSubshapesWithMatchingType() 109 shape, [&](const Shape& subshape, const ShapeIndex& index) { in ShapeLeafCount() argument 178 hlo->mutable_shape(), [&](Shape* subshape, const xla::ShapeIndex& index) { in ChangeOutputTypeThenInsertConvertBack() argument 179 if (subshape->element_type() == from) { in ChangeOutputTypeThenInsertConvertBack() 180 subshape->set_element_type(to); in ChangeOutputTypeThenInsertConvertBack() 351 auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i}); in HandleMultipleOutputs() local 354 HloInstruction::CreateGetTupleElement(*subshape, hlo, i)); in HandleMultipleOutputs() 357 subshape->set_element_type(F32); in HandleMultipleOutputs() 358 bfloat16_normalization_->UpdateLayout(subshape); in HandleMultipleOutputs() [all …]
|
D | layout_assignment.cc | 314 [this, instruction, mandatory](const Shape& subshape, in SetInstructionLayout() 323 if (subshape.IsArray() && subshape.has_layout()) { in SetInstructionLayout() 324 return SetBufferLayout(subshape.layout(), *buffers[0], mandatory); in SetInstructionLayout() 769 [&](const Shape& subshape, const ShapeIndex& shape_index) { in CheckParameterLayout() argument 771 !subshape.has_layout()) { in CheckParameterLayout() 775 subshape, in CheckParameterLayout() 1410 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() argument 1411 if (subshape.IsTuple()) { in PropagateOperandConstraint() 1414 if (subshape.rank() <= 1) { in PropagateOperandConstraint() 1420 if (subshape.rank() != operand->shape().rank()) { in PropagateOperandConstraint() [all …]
|
D | generic_transfer_manager.cc | 74 [&](const Shape& subshape, const ShapeIndex& index) -> Status { in TransferLiteralFromDevice() argument 75 if (subshape.IsArray()) { in TransferLiteralFromDevice() 79 /*size=*/GetByteSizeRequirement(subshape)); in TransferLiteralFromDevice()
|
D | shaped_buffer.cc | 96 [this, &s](const Shape& subshape, const ShapeIndex& index) { in ToString() argument 98 if (subshape.IsTuple()) { in ToString() 101 shape_str = ShapeUtil::HumanStringWithLayout(subshape); in ToString()
|
D | instruction_fusion.cc | 205 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() argument 206 if (subshape.IsArray()) { in EffectivelyAtMostUnary() 207 output_rank = std::max(output_rank, ShapeUtil::TrueRank(subshape)); in EffectivelyAtMostUnary() 299 shape, [&size](const Shape& subshape, const ShapeIndex& shape_index) { in ComputeGloballyUnfusible() argument 300 if (subshape.IsArray()) { in ComputeGloballyUnfusible() 301 size += ShapeUtil::ElementsIn(subshape); in ComputeGloballyUnfusible()
|
D | while_loop_expensive_invariant_code_motion.cc | 281 [&output_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() 283 if (subshape.IsArray()) { in TryHoistingInvariantInstructionsFromWhileBody() 284 output_size += shape_size_function_(subshape); in TryHoistingInvariantInstructionsFromWhileBody()
|
D | hlo_cost_analysis.cc | 756 [&](const Shape& subshape, const ShapeIndex&) { in HandleAllReduce() argument 757 if (subshape.IsArray()) { in HandleAllReduce() 758 flops += ShapeUtil::ElementsIn(subshape); in HandleAllReduce() 836 [this, fusion](const Shape& subshape, const ShapeIndex& shape_index) { in HandleFusion() argument 837 if (!subshape.IsArray()) { in HandleFusion() 863 current_properties_[kBytesAccessedKey] += GetShapeSize(subshape); in HandleFusion() 864 SetOutputBytesAccessed(shape_index, GetShapeSize(subshape)); in HandleFusion() 883 const Shape& subshape = shape.tuple_shapes(i); in HandleFusion() local 887 propagate_output_size_to_parent(subshape, subshape_index); in HandleFusion()
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | utils.cc | 173 dst_shape, [&](Shape* subshape, const ShapeIndex& idx) { in DetermineArgumentLayoutsFromCompileOptions() argument 174 if (subshape->IsArray() && !subshape->has_layout()) { in DetermineArgumentLayoutsFromCompileOptions() 178 LayoutUtil::SetToDefaultLayout(subshape); in DetermineArgumentLayoutsFromCompileOptions() 182 *subshape->mutable_layout() = layout.layout(); in DetermineArgumentLayoutsFromCompileOptions()
|
D | tracked_device_buffer_test.cc | 35 [&](const Shape& subshape, const ShapeIndex&) -> Status { in MakeArray() argument 41 subshape))); in MakeArray()
|
/external/tensorflow/tensorflow/compiler/xla/tools/ |
D | driver.cc | 171 std::string subshape; in TupleShapeFromString() local 173 while (std::getline(sstream, subshape, ' ')) { in TupleShapeFromString() 174 if (subshape[subshape.size() - 1] == ',') { in TupleShapeFromString() 175 subshape = subshape.substr(0, subshape.size() - 1); in TupleShapeFromString() 177 out.push_back(ArrayShapeFromString(subshape)); in TupleShapeFromString()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 54 xla::Shape subshape = in AllocateShapedBuffer() local 57 client->backend().transfer_manager()->GetByteSizeRequirement(subshape); in AllocateShapedBuffer() 61 subshape.layout().memory_space())); in AllocateShapedBuffer()
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 98 const xla::Shape& subshape = in AllocateScopedShapedBuffer() local 100 uint64 size = transfer_manager->GetByteSizeRequirement(subshape); in AllocateScopedShapedBuffer() 363 XRTTupleAllocation* parent, const xla::ShapeIndex& subshape, in MakeSubBuffer() argument 367 xla::ShapeUtil::TryGetSubshape(parent->on_host_shape(), subshape)); in MakeSubBuffer() 370 xla::ShapeUtil::TryGetSubshape(parent->on_device_shape(), subshape)); in MakeSubBuffer() 377 (*allocation)->buffers_.CopySubtreeFrom(parent->buffers_, subshape, {}); in MakeSubBuffer() 391 xla::ShapeIndex parent_index = subshape; in MakeSubBuffer() 485 const xla::Shape& subshape = in MakeTuple() local 487 uint64 size = transfer_manager->GetByteSizeRequirement(subshape); in MakeTuple()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_library_test_base.cc | 332 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() argument 333 if (subshape->element_type() == F32) { in ComputeAndCompareLiteralWithStatus() 334 subshape->set_element_type(BF16); in ComputeAndCompareLiteralWithStatus() 389 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() argument 390 if (subshape->element_type() == F32) { in ComputeAndCompareLiteralWithStatus() 391 subshape->set_element_type(BF16); in ComputeAndCompareLiteralWithStatus() 624 [](Shape* subshape, const ShapeIndex&) { in MaybeConvertShapeToBfloat16() argument 625 if (subshape->element_type() == F32) { in MaybeConvertShapeToBfloat16() 626 subshape->set_element_type(BF16); in MaybeConvertShapeToBfloat16()
|
/external/tensorflow/tensorflow/compiler/xla/python_api/ |
D | xla_shape.py | 57 if not all(isinstance(subshape, Shape) for subshape in dimensions):
|
/external/tensorflow/tensorflow/core/ops/ |
D | math_ops.cc | 1128 ShapeHandle subshape; in SegmentReductionShapeFn() local 1129 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SegmentReductionShapeFn() 1133 c->Concatenate(c->Vector(InferenceContext::kUnknownDim), subshape, &out)); in SegmentReductionShapeFn() 1152 ShapeHandle subshape; in SparseSegmentReductionShapeFn() local 1153 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SparseSegmentReductionShapeFn() 1157 c->Concatenate(c->Vector(InferenceContext::kUnknownDim), subshape, &out)); in SparseSegmentReductionShapeFn() 1176 ShapeHandle subshape; in SparseSegmentReductionGradShapeFn() local 1177 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SparseSegmentReductionGradShapeFn() 1195 TF_RETURN_IF_ERROR(c->Concatenate(dim0_shape, subshape, &out)); in SparseSegmentReductionGradShapeFn() 1217 ShapeHandle subshape; in SparseSegmentReductionWithNumSegmentsShapeFn() local [all …]
|