/external/tensorflow/tensorflow/compiler/xla/service/ |
D | optimize_input_output_buffer_alias.cc | 57 input_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build() 67 output_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build()
|
D | bfloat16_propagation.cc | 55 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineFusionComputationPrecision() 103 const ShapeIndex& index) { in RevertIfFusionInternalBF16Changes() 176 const Shape& subshape, const ShapeIndex& index) { in DetermineWhileComputationsPrecision() 211 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineConditionalComputationsPrecision() 234 const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index); in AllUsersConsumeBF16() local 951 auto subshape = entry.first; in Run() local 986 Shape* subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index); in OutputTypeAfterChange() local
|
D | bfloat16_normalization.cc | 97 shape, [&](const Shape& subshape, const ShapeIndex& index) { in CountSubshapesWithMatchingType() 108 shape, [&](const Shape& subshape, const ShapeIndex& index) { in ShapeLeafCount() 177 hlo->mutable_shape(), [&](Shape* subshape, const xla::ShapeIndex& index) { in ChangeOutputTypeThenInsertConvertBack() 350 auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i}); in HandleMultipleOutputs() local
|
D | while_loop_invariant_code_motion.cc | 248 [&output_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() 249 const ShapeIndex& /*index*/) { in TryHoistingInvariantInstructionsFromWhileBody()
|
D | layout_assignment.cc | 378 const Shape& subshape, const ShapeIndex& index) -> Status { in SetInstructionLayout() 416 const Shape& subshape, const ShapeIndex& index) -> Status { in SetInstructionLayout() 931 [&](const Shape& subshape, const ShapeIndex& shape_index) { in CheckParameterLayout() 1645 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() 1680 user->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() 1999 [instruction, this](Shape* subshape, const ShapeIndex& index) { in AssignLayouts() 2055 const xla::ShapeIndex& index) { in AssignLayouts() 2087 const Shape& subshape, const ShapeIndex& index) { in CalculateComputationLayout() 2372 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateComputationLayouts() 2672 return absl::c_all_of(shape.tuple_shapes(), [](const Shape& subshape) { in IsAtMostRank1()
|
D | compile_only_service.cc | 104 const xla::ShapeIndex& index) { in CompileAheadOfTime()
|
D | hlo_module_util.cc | 157 Shape* subshape, const ShapeIndex& index) { in UpdateEntryComputationLayout()
|
D | tuple_util.cc | 94 const Shape& subshape = tuple_shape.tuple_shapes(i); in ReplaceTupleWith() local
|
D | hlo_element_type_converter.cc | 72 Shape subshape = ShapeUtil::GetTupleElementShape(shape, i); in GetConvertedTupleShape() local
|
D | dynamic_dimension_inference.cc | 470 [&](const Shape& subshape, ShapeIndex reduce_result_index) { in HandleReduce() 1258 [&](const Shape& subshape, ShapeIndex reduce_window_result_index) { in HandleReduceWindow() 1527 hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in HandleConditional() 1551 hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in HandleConditional() 1707 [&](const Shape& subshape, const ShapeIndex& index) { in HandleWhile() 1918 Shape subshape = ShapeUtil::GetSubshape(inst->shape(), index); in SetDynamicSize() local 2020 const ShapeIndex& subindex) { in HasDynamicDimension()
|
D | generic_transfer_manager.cc | 74 [&](const Shape& subshape, const ShapeIndex& index) -> Status { in TransferLiteralFromDevice()
|
D | hlo_replication_analysis.cc | 382 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplicationOnComputation() 408 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication()
|
D | shaped_buffer.cc | 96 [this, &s](const Shape& subshape, const ShapeIndex& index) { in ToString()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.cc | 233 const Shape& subshape = shape.tuple_shapes(i); in mutable_shape_do_not_use() local 251 for (const Shape& subshape : shape.tuple_shapes()) { in SetPiece() local 676 Shape* subshape = in SetDynamicSize() local 853 Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index); in Relayout() local 869 [this, &result](const Shape& subshape, const ShapeIndex& index) { in Relayout() 883 shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ToBoundedDynamic() 900 &new_shape, [this](Shape* subshape, const ShapeIndex& index) { in ToStatic() 1128 const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index); in GetAsString() local 1304 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in TupleToStringHelper() local 1325 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in DenseArrayToStringHelper() local [all …]
|
D | shape_layout.cc | 56 const xla::ShapeIndex& index) { in MatchesLayoutInShape()
|
D | shape.cc | 113 for (const Shape& subshape : tuple_shapes_) { in is_static() local
|
D | literal_util.cc | 50 &result_shape, [](Shape* subshape, const ShapeIndex&) { in ConvertType() 63 [&](const Shape& subshape, const ShapeIndex& shape_index) { in ConvertType()
|
/external/tensorflow/tensorflow/core/ops/ |
D | math_ops.cc | 1161 ShapeHandle subshape; in SegmentReductionShapeFn() local 1185 ShapeHandle subshape; in SparseSegmentReductionShapeFn() local 1209 ShapeHandle subshape; in SparseSegmentReductionGradShapeFn() local 1250 ShapeHandle subshape; in SparseSegmentReductionWithNumSegmentsShapeFn() local
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | tracked_device_buffer_test.cc | 35 [&](const Shape& subshape, const ShapeIndex&) -> Status { in MakeArray()
|
D | utils.cc | 175 dst_shape, [&](Shape* subshape, const ShapeIndex& idx) { in DetermineArgumentLayoutsFromCompileOptions()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 54 xla::Shape subshape = in AllocateShapedBuffer() local
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 99 const xla::Shape& subshape = in AllocateScopedShapedBuffer() local 364 XRTTupleAllocation* parent, const xla::ShapeIndex& subshape, in MakeSubBuffer() 485 const xla::Shape& subshape = in MakeTuple() local
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | value_inference.cc | 392 Shape subshape = in IsInstructionOverLimit() local 477 Shape subshape = in AnalyzeConstantValueFallback() local 645 Shape subshape = in AnalyzeUpperBound() local 815 Shape subshape = in AnalyzeLowerBound() local 905 Shape subshape = in AnalyzeConstant() local 1007 Shape subshape = in AnalyzeIsDynamic() local
|
/external/tensorflow/tensorflow/compiler/xla/stream_executor/tpu/ |
D | tpu_executable_interface.cc | 64 const xla::ShapeIndex& index) { in AllocateOutputMemoryWithInputReuse()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_library_test_base.cc | 332 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() 390 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() 624 [](Shape* subshape, const ShapeIndex&) { in MaybeConvertShapeToBfloat16()
|