/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.cc | 124 const Shape& subshape = shape.tuple_shapes(i); in SetPiece() local 127 child_piece.set_subshape(&subshape); in SetPiece() 129 SetPiece(subshape, &child_piece, allocate_arrays); in SetPiece() 154 CHECK(&root_piece_->subshape() == shape_.get()); in Literal() 180 DCHECK(&other.root_piece_->subshape() == other.shape_.get()); in operator =() 184 DCHECK(&root_piece_->subshape() == shape_.get()); in operator =() 193 if (piece->subshape().IsArray()) { in CreateFromShape() 308 if (piece->subshape().IsTuple()) { in CreateFromProto() 310 ShapeUtil::TupleElementCount(piece->subshape())) { in CreateFromProto() 313 ShapeUtil::TupleElementCount(piece->subshape()), in CreateFromProto() [all …]
|
D | literal.h | 67 const Shape& shape() const { return root_piece().subshape(); } in shape() 359 const Shape& subshape() const { return *subshape_; } in subshape() function 360 void set_subshape(const Shape* subshape) { subshape_ = subshape; } in set_subshape() argument 363 int64 size_bytes() const { return ShapeUtil::ByteSizeOf(subshape()); } in size_bytes() 366 int64 element_count() const { return ShapeUtil::ElementsIn(subshape()); } in element_count() 832 DCHECK(subshape().IsArray()) << ShapeUtil::HumanString(subshape()); in data() 833 DCHECK_EQ(subshape().element_type(), in data() 838 << PrimitiveType_Name(subshape().element_type()); in data() 845 DCHECK(subshape().IsArray()) << ShapeUtil::HumanString(subshape()); in data() 846 DCHECK_EQ(subshape().element_type(), in data() [all …]
|
D | literal_util.cc | 51 &result_shape, [](Shape* subshape, const ShapeIndex&) { in ConvertType() argument 52 if (subshape->element_type() == in ConvertType() 54 subshape->set_element_type( in ConvertType() 64 [&](const Shape& subshape, const ShapeIndex& shape_index) { in ConvertType() argument 65 if (subshape.IsArray()) { in ConvertType() 66 if (subshape.element_type() == in ConvertType()
|
D | shape_util.cc | 771 const Shape* subshape = &shape; in IndexIsValid() local 773 if (!subshape->IsTuple() || i >= subshape->tuple_shapes_size() || i < 0) { in IndexIsValid() 776 subshape = &subshape->tuple_shapes(i); in IndexIsValid() 827 for (const Shape& subshape : shape.tuple_shapes()) { in GetLeafCount() local 828 count += GetLeafCount(subshape); in GetLeafCount() 897 [&func](const Shape& subshape, const ShapeIndex& index) { in ForEachSubshape() argument 898 func(subshape, index); in ForEachSubshape() 910 [&func](Shape* subshape, const ShapeIndex& index) { in ForEachMutableSubshape() argument 911 func(subshape, index); in ForEachMutableSubshape() 1476 for (const Shape& subshape : shape.tuple_shapes()) { in Hash() local [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | optimize_input_output_buffer_alias.cc | 61 *input_shapes[i], [&](const Shape& subshape, const ShapeIndex& index) { in Build() argument 62 if (subshape.IsTuple()) { in Build() 65 parameter_entries.emplace_back(Entry{i, subshape, index, false}); in Build() 72 output_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build() argument 73 if (subshape.IsTuple()) { in Build() 77 if (Shape::Equal()(entry.shape, subshape) && !entry.used) { in Build()
|
D | bfloat16_propagation.cc | 53 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineFusionComputationPrecision() argument 54 if (subshape.element_type() != F32) { in DetermineFusionComputationPrecision() 101 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in RevertIfFusionInternalBF16Changes() argument 108 if (subshape.element_type() != F32) { in RevertIfFusionInternalBF16Changes() 178 const Shape& subshape, const ShapeIndex& index) { in DetermineWhileComputationsPrecision() argument 179 if (subshape.element_type() != F32) { in DetermineWhileComputationsPrecision() 209 const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index); in AllUsersConsumeBF16() local 210 if (subshape.element_type() != BF16 && subshape.element_type() != F32) { in AllUsersConsumeBF16() 848 auto subshape = entry.first; in Run() local 849 CHECK_EQ(subshape->element_type(), F32); in Run() [all …]
|
D | while_loop_invariant_code_motion.cc | 222 operand->shape(), [&input_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() 224 if (subshape.IsArray()) { in TryHoistingInvariantInstructionsFromWhileBody() 225 input_size += shape_size_function_(subshape); in TryHoistingInvariantInstructionsFromWhileBody() 231 [&output_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() argument 233 if (subshape.IsArray()) { in TryHoistingInvariantInstructionsFromWhileBody() 234 output_size += shape_size_function_(subshape); in TryHoistingInvariantInstructionsFromWhileBody()
|
D | hlo_element_type_converter.cc | 70 Shape subshape = ShapeUtil::GetTupleElementShape(shape, i); in GetConvertedTupleShape() local 71 CHECK(!subshape.IsTuple()); in GetConvertedTupleShape() 72 if (subshape.element_type() == from_type) { in GetConvertedTupleShape() 73 subshape = ShapeUtil::ChangeElementType(subshape, to_type); in GetConvertedTupleShape() 75 new_tuple_subshapes.push_back(subshape); in GetConvertedTupleShape()
|
D | bfloat16_normalization.cc | 98 shape, [&](const Shape& subshape, const ShapeIndex& index) { in CountSubshapesWithMatchingType() argument 99 if (subshape.element_type() == type) { in CountSubshapesWithMatchingType() 109 shape, [&](const Shape& subshape, const ShapeIndex& index) { in ShapeLeafCount() argument 178 hlo->mutable_shape(), [&](Shape* subshape, const xla::ShapeIndex& index) { in ChangeOutputTypeThenInsertConvertBack() argument 179 if (subshape->element_type() == from) { in ChangeOutputTypeThenInsertConvertBack() 180 subshape->set_element_type(to); in ChangeOutputTypeThenInsertConvertBack() 351 auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i}); in HandleMultipleOutputs() local 354 HloInstruction::CreateGetTupleElement(*subshape, hlo, i)); in HandleMultipleOutputs() 357 subshape->set_element_type(F32); in HandleMultipleOutputs() 358 bfloat16_normalization_->UpdateLayout(subshape); in HandleMultipleOutputs() [all …]
|
D | dynamic_dimension_inference.h | 178 Shape subshape = ShapeUtil::GetSubshape(inst->shape(), index); in SetDynamicSize() local 179 CHECK(!subshape.IsTuple()) in SetDynamicSize() 181 CHECK(dim < subshape.rank() && dim >= 0) in SetDynamicSize() 183 << subshape.ToString() << ", Dimension: " << dim; in SetDynamicSize()
|
D | layout_assignment.cc | 313 [this, instruction, mandatory](const Shape& subshape, in SetInstructionLayout() 322 if (subshape.IsArray() && subshape.has_layout()) { in SetInstructionLayout() 323 return SetBufferLayout(subshape.layout(), *buffers[0], mandatory); in SetInstructionLayout() 768 [&](const Shape& subshape, const ShapeIndex& shape_index) { in CheckParameterLayout() argument 770 !subshape.has_layout()) { in CheckParameterLayout() 774 subshape, in CheckParameterLayout() 1395 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() argument 1396 if (subshape.IsTuple()) { in PropagateOperandConstraint() 1399 if (subshape.rank() <= 1) { in PropagateOperandConstraint() 1405 if (subshape.rank() != operand->shape().rank()) { in PropagateOperandConstraint() [all …]
|
D | generic_transfer_manager.cc | 79 [&](const Shape& subshape, const ShapeIndex& index) -> Status { in TransferLiteralFromDevice() argument 80 if (subshape.IsArray()) { in TransferLiteralFromDevice() 84 /*size=*/GetByteSizeRequirement(subshape)); in TransferLiteralFromDevice()
|
D | shaped_buffer.cc | 99 [this, &s](const Shape& subshape, const ShapeIndex& index) { in ToString() argument 101 if (subshape.IsTuple()) { in ToString() 104 shape_str = ShapeUtil::HumanStringWithLayout(subshape); in ToString()
|
D | instruction_fusion.cc | 197 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() argument 198 if (subshape.IsArray()) { in EffectivelyAtMostUnary() 199 output_rank = std::max(output_rank, ShapeUtil::TrueRank(subshape)); in EffectivelyAtMostUnary() 291 shape, [&size](const Shape& subshape, const ShapeIndex& shape_index) { in ComputeGloballyUnfusible() argument 292 if (subshape.IsArray()) { in ComputeGloballyUnfusible() 293 size += ShapeUtil::ElementsIn(subshape); in ComputeGloballyUnfusible()
|
D | dynamic_padder.cc | 822 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in InsertSliceToDynamicBeforeModuleOutputs() argument 823 if (subshape.IsArray()) { in InsertSliceToDynamicBeforeModuleOutputs() 825 for (int64 dim = 0; dim < subshape.rank(); ++dim) { in InsertSliceToDynamicBeforeModuleOutputs() 841 ShapeUtil::ForEachSubshape(root->shape(), [&](const Shape& subshape, in InsertSliceToDynamicBeforeModuleOutputs() 843 if (!subshape.IsArray()) { in InsertSliceToDynamicBeforeModuleOutputs() 849 ShapeUtil::MakeShapeWithStaticDimensions(subshape), root, in InsertSliceToDynamicBeforeModuleOutputs() 861 Shape dynamic_subshape = subshape; in InsertSliceToDynamicBeforeModuleOutputs() 862 for (int64 dim = 0; dim < subshape.rank(); ++dim) { in InsertSliceToDynamicBeforeModuleOutputs() 869 LiteralUtil::CreateR0<int32>(subshape.dimensions(dim))); in InsertSliceToDynamicBeforeModuleOutputs()
|
D | memory_space_assignment.cc | 537 [&](const Shape& subshape, const ShapeIndex& index) { in AddInputAndOutputRequiredAssignments() argument 539 if (subshape.has_layout() && subshape.layout().memory_space() == in AddInputAndOutputRequiredAssignments() 561 [&](const Shape& subshape, const ShapeIndex& index) { in AddInputAndOutputRequiredAssignments() argument 563 if (subshape.has_layout() && subshape.layout().memory_space() == in AddInputAndOutputRequiredAssignments() 1257 const Shape& subshape = tuple_shape.tuple_shapes(i); in ReplaceTupleWith() local 1261 if (subshape.IsTuple()) { in ReplaceTupleWith() 1263 HloInstruction::CreateGetTupleElement(subshape, tuple, i)); in ReplaceTupleWith() 1269 if (subshape != new_instruction->shape()) { in ReplaceTupleWith() 1270 VLOG(4) << "Old shape = " << subshape.ToString() in ReplaceTupleWith() 1274 HloInstruction::CreateBitcast(subshape, new_instruction)); in ReplaceTupleWith() [all …]
|
D | hlo_cost_analysis.cc | 715 [&](const Shape& subshape, const ShapeIndex&) { in HandleAllReduce() argument 716 if (subshape.IsArray()) { in HandleAllReduce() 717 flops += ShapeUtil::ElementsIn(subshape); in HandleAllReduce() 776 [this, fusion](const Shape& subshape, const ShapeIndex& shape_index) { in HandleFusion() argument 777 if (!subshape.IsArray()) { in HandleFusion() 803 current_properties_[kBytesAccessedKey] += GetShapeSize(subshape); in HandleFusion() 804 SetOutputBytesAccessed(shape_index, GetShapeSize(subshape)); in HandleFusion() 823 const Shape& subshape = shape.tuple_shapes(i); in HandleFusion() local 827 propagate_output_size_to_parent(subshape, subshape_index); in HandleFusion()
|
D | hlo_replication_analysis.cc | 264 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplicationOnComputation() argument 289 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication() argument 301 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication() argument
|
/external/tensorflow/tensorflow/compiler/xla/tools/ |
D | driver.cc | 169 std::string subshape; in TupleShapeFromString() local 171 while (std::getline(sstream, subshape, ' ')) { in TupleShapeFromString() 172 if (subshape[subshape.size() - 1] == ',') { in TupleShapeFromString() 173 subshape = subshape.substr(0, subshape.size() - 1); in TupleShapeFromString() 175 out.push_back(ArrayShapeFromString(subshape)); in TupleShapeFromString()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 58 xla::Shape subshape = in AllocateShapedBuffer() local 61 client->backend().transfer_manager()->GetByteSizeRequirement(subshape); in AllocateShapedBuffer() 65 subshape.layout().memory_space())); in AllocateShapedBuffer()
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 98 const xla::Shape& subshape = in AllocateScopedShapedBuffer() local 100 uint64 size = transfer_manager->GetByteSizeRequirement(subshape); in AllocateScopedShapedBuffer() 363 XRTTupleAllocation* parent, const xla::ShapeIndex& subshape, in MakeSubBuffer() argument 367 xla::ShapeUtil::TryGetSubshape(parent->on_host_shape(), subshape)); in MakeSubBuffer() 370 xla::ShapeUtil::TryGetSubshape(parent->on_device_shape(), subshape)); in MakeSubBuffer() 377 (*allocation)->buffers_.CopySubtreeFrom(parent->buffers_, subshape, {}); in MakeSubBuffer() 391 xla::ShapeIndex parent_index = subshape; in MakeSubBuffer() 485 const xla::Shape& subshape = in MakeTuple() local 487 uint64 size = transfer_manager->GetByteSizeRequirement(subshape); in MakeTuple()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_library_test_base.cc | 332 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() argument 333 if (subshape->element_type() == F32) { in ComputeAndCompareLiteralWithStatus() 334 subshape->set_element_type(BF16); in ComputeAndCompareLiteralWithStatus() 389 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() argument 390 if (subshape->element_type() == F32) { in ComputeAndCompareLiteralWithStatus() 391 subshape->set_element_type(BF16); in ComputeAndCompareLiteralWithStatus() 624 [](Shape* subshape, const ShapeIndex&) { in MaybeConvertShapeToBfloat16() argument 625 if (subshape->element_type() == F32) { in MaybeConvertShapeToBfloat16() 626 subshape->set_element_type(BF16); in MaybeConvertShapeToBfloat16()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | custom_call_thunk.cc | 147 const Shape& subshape = in ExecuteOnStream() local 149 auto n = subshape.tuple_shapes_size(); in ExecuteOnStream() 150 if (!subshape.IsTuple() || n == 0) { in ExecuteOnStream()
|
/external/tensorflow/tensorflow/compiler/xla/python_api/ |
D | xla_shape.py | 57 if not all(isinstance(subshape, Shape) for subshape in dimensions):
|
/external/tensorflow/tensorflow/core/ops/ |
D | math_ops.cc | 1111 ShapeHandle subshape; in SegmentReductionShapeFn() local 1112 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SegmentReductionShapeFn() 1116 c->Concatenate(c->Vector(InferenceContext::kUnknownDim), subshape, &out)); in SegmentReductionShapeFn() 1135 ShapeHandle subshape; in SparseSegmentReductionShapeFn() local 1136 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SparseSegmentReductionShapeFn() 1140 c->Concatenate(c->Vector(InferenceContext::kUnknownDim), subshape, &out)); in SparseSegmentReductionShapeFn() 1159 ShapeHandle subshape; in SparseSegmentReductionGradShapeFn() local 1160 TF_RETURN_IF_ERROR(c->Subshape(data_shape, 1, &subshape)); in SparseSegmentReductionGradShapeFn() 1178 TF_RETURN_IF_ERROR(c->Concatenate(dim0_shape, subshape, &out)); in SparseSegmentReductionGradShapeFn() 1200 ShapeHandle subshape; in SparseSegmentReductionWithNumSegmentsShapeFn() local [all …]
|