/external/tensorflow/tensorflow/compiler/xla/service/ |
D | optimize_input_output_buffer_alias.cc | 57 input_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build() 67 output_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build()
|
D | bfloat16_propagation.cc | 55 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineFusionComputationPrecision() 103 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in RevertIfFusionInternalBF16Changes() 180 const Shape& subshape, const ShapeIndex& index) { in DetermineWhileComputationsPrecision() 215 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineConditionalComputationsPrecision() 238 const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index); in AllUsersConsumeBF16() local 945 auto subshape = entry.first; in Run() local 979 Shape* subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index); in OutputTypeAfterChange() local
|
D | hlo_replication_analysis.cc | 314 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplicationOnComputation() 339 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication() 351 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication()
|
D | bfloat16_normalization.cc | 98 shape, [&](const Shape& subshape, const ShapeIndex& index) { in CountSubshapesWithMatchingType() 109 shape, [&](const Shape& subshape, const ShapeIndex& index) { in ShapeLeafCount() 178 hlo->mutable_shape(), [&](Shape* subshape, const xla::ShapeIndex& index) { in ChangeOutputTypeThenInsertConvertBack() 351 auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i}); in HandleMultipleOutputs() local
|
D | while_loop_invariant_code_motion.cc | 243 [&output_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() 244 const ShapeIndex& /*index*/) { in TryHoistingInvariantInstructionsFromWhileBody()
|
D | layout_assignment.cc | 353 const Shape& subshape, const ShapeIndex& index) -> Status { in SetInstructionLayout() 856 [&](const Shape& subshape, const ShapeIndex& shape_index) { in CheckParameterLayout() 1498 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() 1539 user->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() 1720 const Shape& subshape = ShapeUtil::GetSubshape(instruction->shape(), index); in InferArrayLayout() local 1851 [instruction, &constraints](Shape* subshape, const ShapeIndex& index) { in AssignLayouts() 2183 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateComputationLayouts() 2475 return absl::c_all_of(shape.tuple_shapes(), [](const Shape& subshape) { in IsAtMostRank1()
|
D | instruction_fusion.cc | 210 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() 304 shape, [&size](const Shape& subshape, const ShapeIndex& shape_index) { in ComputeGloballyUnfusible()
|
D | hlo_element_type_converter.cc | 70 Shape subshape = ShapeUtil::GetTupleElementShape(shape, i); in GetConvertedTupleShape() local
|
D | generic_transfer_manager.cc | 74 [&](const Shape& subshape, const ShapeIndex& index) -> Status { in TransferLiteralFromDevice()
|
D | shaped_buffer.cc | 96 [this, &s](const Shape& subshape, const ShapeIndex& index) { in ToString()
|
D | dynamic_dimension_inference.cc | 439 [&](const Shape& subshape, ShapeIndex reduce_result_index) { in HandleReduce() 1120 [&](const Shape& subshape, ShapeIndex reduce_window_result_index) { in HandleReduceWindow() 1389 hlo->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in HandleConditional() 1413 const ShapeIndex& index) { in HandleConditional() 1727 Shape subshape = ShapeUtil::GetSubshape(inst->shape(), index); in SetDynamicSize() local 1822 const ShapeIndex& subindex) { in HasDynamicDimension()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.cc | 150 const Shape& subshape = shape.tuple_shapes(i); in SetPiece() local 726 Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index); in Relayout() local 742 [this, &result](const Shape& subshape, const ShapeIndex& index) { in Relayout() 756 shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ToBoundedDynamic() 773 &new_shape, [this](Shape* subshape, const ShapeIndex& index) { in ToStatic() 988 const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index); in GetAsString() local 1105 shape(), [&](const Shape& subshape, const ShapeIndex& index) { in Hash() 1185 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in TupleToStringHelper() local 1203 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in DenseArrayToStringHelper() local 1290 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in ToStringHelper() local [all …]
|
D | shape_layout.cc | 56 const xla::ShapeIndex& index) { in MatchesLayoutInShape()
|
D | literal_util.cc | 51 &result_shape, [](Shape* subshape, const ShapeIndex&) { in ConvertType() 64 [&](const Shape& subshape, const ShapeIndex& shape_index) { in ConvertType()
|
D | shape.cc | 107 for (const Shape& subshape : tuple_shapes_) { in is_static() local
|
D | shape_util.cc | 938 const Shape* subshape = &shape; in IndexIsValid() local 994 for (const Shape& subshape : shape.tuple_shapes()) { in GetLeafCount() local 1064 [&func](const Shape& subshape, const ShapeIndex& index) { in ForEachSubshape() 1077 [&func](Shape* subshape, const ShapeIndex& index) { in ForEachMutableSubshape() 1690 for (const Shape& subshape : shape.tuple_shapes()) { in Hash() local 1766 ForEachMutableSubshape(&s, [](Shape* subshape, const ShapeIndex& index) { in DeviceShapeToHostShape()
|
/external/tensorflow/tensorflow/core/ops/ |
D | math_ops.cc | 1149 ShapeHandle subshape; in SegmentReductionShapeFn() local 1173 ShapeHandle subshape; in SparseSegmentReductionShapeFn() local 1197 ShapeHandle subshape; in SparseSegmentReductionGradShapeFn() local 1238 ShapeHandle subshape; in SparseSegmentReductionWithNumSegmentsShapeFn() local
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | tracked_device_buffer_test.cc | 35 [&](const Shape& subshape, const ShapeIndex&) -> Status { in MakeArray()
|
D | utils.cc | 173 dst_shape, [&](Shape* subshape, const ShapeIndex& idx) { in DetermineArgumentLayoutsFromCompileOptions()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 54 xla::Shape subshape = in AllocateShapedBuffer() local
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 99 const xla::Shape& subshape = in AllocateScopedShapedBuffer() local 364 XRTTupleAllocation* parent, const xla::ShapeIndex& subshape, in MakeSubBuffer() 485 const xla::Shape& subshape = in MakeTuple() local
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_library_test_base.cc | 332 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() 389 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() 622 [](Shape* subshape, const ShapeIndex&) { in MaybeConvertShapeToBfloat16()
|
/external/tensorflow/tensorflow/stream_executor/tpu/ |
D | tpu_executable_interface.cc | 64 const xla::ShapeIndex& index) { in AllocateOutputMemoryWithInputReuse()
|
/external/tensorflow/tensorflow/compiler/xla/tools/ |
D | hlo_control_flow_flattening.cc | 85 Shape* subshape = shape->add_tuple_shapes(); in FlattenWhileLoop() local
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | shape_util.cc | 33 const xla::Shape& subshape = in PopulateInfeedLayoutVector() local
|