/external/tensorflow/tensorflow/compiler/xla/service/ |
D | optimize_input_output_buffer_alias.cc | 57 input_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build() 67 output_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build()
|
D | bfloat16_propagation.cc | 55 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineFusionComputationPrecision() 103 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in RevertIfFusionInternalBF16Changes() 180 const Shape& subshape, const ShapeIndex& index) { in DetermineWhileComputationsPrecision() 215 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineConditionalComputationsPrecision() 238 const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index); in AllUsersConsumeBF16() local 950 auto subshape = entry.first; in Run() local 984 Shape* subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index); in OutputTypeAfterChange() local
|
D | hlo_replication_analysis.cc | 314 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplicationOnComputation() 339 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication() 351 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication()
|
D | bfloat16_normalization.cc | 98 shape, [&](const Shape& subshape, const ShapeIndex& index) { in CountSubshapesWithMatchingType() 109 shape, [&](const Shape& subshape, const ShapeIndex& index) { in ShapeLeafCount() 178 hlo->mutable_shape(), [&](Shape* subshape, const xla::ShapeIndex& index) { in ChangeOutputTypeThenInsertConvertBack() 351 auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i}); in HandleMultipleOutputs() local
|
D | while_loop_invariant_code_motion.cc | 231 [&output_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() 232 const ShapeIndex& /*index*/) { in TryHoistingInvariantInstructionsFromWhileBody()
|
D | layout_assignment.cc | 315 const ShapeIndex& index) -> Status { in SetInstructionLayout() 769 [&](const Shape& subshape, const ShapeIndex& shape_index) { in CheckParameterLayout() 1410 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() 1451 user->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() 1632 const Shape& subshape = ShapeUtil::GetSubshape(instruction->shape(), index); in InferArrayLayout() local 1763 [instruction, &constraints](Shape* subshape, const ShapeIndex& index) { in AssignLayouts() 1954 const xla::ShapeIndex& index) { in RunOnComputation() 2056 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateComputationLayouts() 2327 return absl::c_all_of(shape.tuple_shapes(), [](const Shape& subshape) { in IsAtMostRank1()
|
D | hlo_element_type_converter.cc | 70 Shape subshape = ShapeUtil::GetTupleElementShape(shape, i); in GetConvertedTupleShape() local
|
D | instruction_fusion.cc | 205 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() 299 shape, [&size](const Shape& subshape, const ShapeIndex& shape_index) { in ComputeGloballyUnfusible()
|
D | generic_transfer_manager.cc | 74 [&](const Shape& subshape, const ShapeIndex& index) -> Status { in TransferLiteralFromDevice()
|
D | shaped_buffer.cc | 96 [this, &s](const Shape& subshape, const ShapeIndex& index) { in ToString()
|
D | while_loop_expensive_invariant_code_motion.cc | 282 const ShapeIndex& /*index*/) { in TryHoistingInvariantInstructionsFromWhileBody()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.cc | 147 const Shape& subshape = shape.tuple_shapes(i); in SetPiece() local 712 Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index); in Relayout() local 728 [this, &result](const Shape& subshape, const ShapeIndex& index) { in Relayout() 742 shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ToBoundedDynamic() 759 &new_shape, [this](Shape* subshape, const ShapeIndex& index) { in ToStatic() 974 const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index); in GetAsString() local 1091 shape(), [&](const Shape& subshape, const ShapeIndex& index) { in Hash() 1171 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in TupleToStringHelper() local 1189 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in DenseArrayToStringHelper() local 1276 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in ToStringHelper() local [all …]
|
D | literal_util.cc | 51 &result_shape, [](Shape* subshape, const ShapeIndex&) { in ConvertType() 64 [&](const Shape& subshape, const ShapeIndex& shape_index) { in ConvertType()
|
D | shape.cc | 88 for (const Shape& subshape : tuple_shapes_) { in is_static() local
|
D | shape_util.cc | 898 const Shape* subshape = &shape; in IndexIsValid() local 954 for (const Shape& subshape : shape.tuple_shapes()) { in GetLeafCount() local 1024 [&func](const Shape& subshape, const ShapeIndex& index) { in ForEachSubshape() 1037 [&func](Shape* subshape, const ShapeIndex& index) { in ForEachMutableSubshape() 1650 for (const Shape& subshape : shape.tuple_shapes()) { in Hash() local 1726 ForEachMutableSubshape(&s, [](Shape* subshape, const ShapeIndex& index) { in DeviceShapeToHostShape()
|
D | literal.h | 399 const Shape& subshape() const { return *subshape_; } in subshape() function 400 void set_subshape(const Shape* subshape) { subshape_ = subshape; } in set_subshape()
|
D | shape_util_test.cc | 503 shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) { in TEST() 518 shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) { in TEST() 540 &shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) { in TEST()
|
/external/tensorflow/tensorflow/core/ops/ |
D | math_ops.cc | 1128 ShapeHandle subshape; in SegmentReductionShapeFn() local 1152 ShapeHandle subshape; in SparseSegmentReductionShapeFn() local 1176 ShapeHandle subshape; in SparseSegmentReductionGradShapeFn() local 1217 ShapeHandle subshape; in SparseSegmentReductionWithNumSegmentsShapeFn() local
|
/external/tensorflow/tensorflow/compiler/xla/pjrt/ |
D | tracked_device_buffer_test.cc | 35 [&](const Shape& subshape, const ShapeIndex&) -> Status { in MakeArray()
|
D | utils.cc | 173 dst_shape, [&](Shape* subshape, const ShapeIndex& idx) { in DetermineArgumentLayoutsFromCompileOptions()
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 54 xla::Shape subshape = in AllocateShapedBuffer() local
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 98 const xla::Shape& subshape = in AllocateScopedShapedBuffer() local 363 XRTTupleAllocation* parent, const xla::ShapeIndex& subshape, in MakeSubBuffer() 485 const xla::Shape& subshape = in MakeTuple() local
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_library_test_base.cc | 332 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() 389 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() 624 [](Shape* subshape, const ShapeIndex&) { in MaybeConvertShapeToBfloat16()
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | shape_util.cc | 33 const xla::Shape& subshape = in PopulateInfeedLayoutVector() local
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | xla.cc | 143 &shape_with_layout, [](Shape* subshape, const ShapeIndex&) { in PYBIND11_MODULE()
|