/external/tensorflow/tensorflow/compiler/xla/service/ |
D | optimize_input_output_buffer_alias.cc | 61 *input_shapes[i], [&](const Shape& subshape, const ShapeIndex& index) { in Build() 72 output_shape, [&](const Shape& subshape, const ShapeIndex& index) { in Build()
|
D | hlo_replication_analysis.cc | 264 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplicationOnComputation() 289 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication() 301 param->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in ComputeHloReplication()
|
D | bfloat16_propagation.cc | 53 root->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in DetermineFusionComputationPrecision() 101 inst->shape(), [&](const Shape& subshape, const ShapeIndex& index) { in RevertIfFusionInternalBF16Changes() 178 const Shape& subshape, const ShapeIndex& index) { in DetermineWhileComputationsPrecision() 209 const Shape& subshape = ShapeUtil::GetSubshape(hlo.shape(), index); in AllUsersConsumeBF16() local 848 auto subshape = entry.first; in Run() local 882 Shape* subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), index); in OutputTypeAfterChange() local
|
D | bfloat16_normalization.cc | 98 shape, [&](const Shape& subshape, const ShapeIndex& index) { in CountSubshapesWithMatchingType() 109 shape, [&](const Shape& subshape, const ShapeIndex& index) { in ShapeLeafCount() 178 hlo->mutable_shape(), [&](Shape* subshape, const xla::ShapeIndex& index) { in ChangeOutputTypeThenInsertConvertBack() 351 auto subshape = ShapeUtil::GetMutableSubshape(hlo->mutable_shape(), {i}); in HandleMultipleOutputs() local
|
D | while_loop_invariant_code_motion.cc | 231 [&output_size, this](const Shape& subshape, in TryHoistingInvariantInstructionsFromWhileBody() 232 const ShapeIndex& /*index*/) { in TryHoistingInvariantInstructionsFromWhileBody()
|
D | layout_assignment.cc | 314 const ShapeIndex& index) -> Status { in SetInstructionLayout() 768 [&](const Shape& subshape, const ShapeIndex& shape_index) { in CheckParameterLayout() 1395 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() 1432 user->shape(), [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateOperandConstraint() 1609 const Shape& subshape = ShapeUtil::GetSubshape(instruction->shape(), index); in InferArrayLayout() local 1740 [instruction, &constraints](Shape* subshape, const ShapeIndex& index) { in AssignLayouts() 2016 [&](const Shape& subshape, const ShapeIndex& shape_index) { in PropagateComputationLayouts() 2280 return absl::c_all_of(shape.tuple_shapes(), [](const Shape& subshape) { in IsAtMostRank1()
|
D | dynamic_dimension_inference.h | 178 Shape subshape = ShapeUtil::GetSubshape(inst->shape(), index); in SetDynamicSize() local
|
D | instruction_fusion.cc | 197 [&output_rank](const Shape& subshape, const ShapeIndex& shape_index) { in EffectivelyAtMostUnary() 291 shape, [&size](const Shape& subshape, const ShapeIndex& shape_index) { in ComputeGloballyUnfusible()
|
D | hlo_element_type_converter.cc | 70 Shape subshape = ShapeUtil::GetTupleElementShape(shape, i); in GetConvertedTupleShape() local
|
D | generic_transfer_manager.cc | 79 [&](const Shape& subshape, const ShapeIndex& index) -> Status { in TransferLiteralFromDevice()
|
D | shaped_buffer.cc | 99 [this, &s](const Shape& subshape, const ShapeIndex& index) { in ToString()
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | literal.cc | 124 const Shape& subshape = shape.tuple_shapes(i); in SetPiece() local 576 Shape* subshape = ShapeUtil::GetMutableSubshape(&new_shape, shape_index); in Relayout() local 592 [this, &result](const Shape& subshape, const ShapeIndex& index) { in Relayout() 784 const Shape& subshape = ShapeUtil::GetSubshape(shape(), shape_index); in GetAsString() local 895 shape(), [&](const Shape& subshape, const ShapeIndex& index) { in Hash() 975 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in TupleToStringHelper() local 993 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in DenseArrayToStringHelper() local 1066 const Shape& subshape = ShapeUtil::GetSubshape(literal.shape(), shape_index); in ToStringHelper() local 1997 const Shape& subshape = shape.tuple_shapes(i); in CopyPieceSubtree() local 2093 const Shape& subshape = shape.tuple_shapes(i); in BuildPieceSubtree() local
|
D | literal_util.cc | 51 &result_shape, [](Shape* subshape, const ShapeIndex&) { in ConvertType() 64 [&](const Shape& subshape, const ShapeIndex& shape_index) { in ConvertType()
|
D | shape.cc | 88 for (const Shape& subshape : tuple_shapes_) { in is_static() local
|
D | shape_util.cc | 771 const Shape* subshape = &shape; in IndexIsValid() local 827 for (const Shape& subshape : shape.tuple_shapes()) { in GetLeafCount() local 897 [&func](const Shape& subshape, const ShapeIndex& index) { in ForEachSubshape() 910 [&func](Shape* subshape, const ShapeIndex& index) { in ForEachMutableSubshape() 1476 for (const Shape& subshape : shape.tuple_shapes()) { in Hash() local
|
D | literal.h | 359 const Shape& subshape() const { return *subshape_; } in subshape() function 360 void set_subshape(const Shape* subshape) { subshape_ = subshape; } in set_subshape()
|
D | shape_util_test.cc | 501 shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) { in TEST() 516 shape, [&calls, &shape](const Shape& subshape, const ShapeIndex& index) { in TEST() 538 &shape, [&calls, &shape](const Shape* subshape, const ShapeIndex& index) { in TEST()
|
/external/tensorflow/tensorflow/core/ops/ |
D | math_ops.cc | 1111 ShapeHandle subshape; in SegmentReductionShapeFn() local 1135 ShapeHandle subshape; in SparseSegmentReductionShapeFn() local 1159 ShapeHandle subshape; in SparseSegmentReductionGradShapeFn() local 1200 ShapeHandle subshape; in SparseSegmentReductionWithNumSegmentsShapeFn() local
|
/external/tensorflow/tensorflow/compiler/jit/ |
D | xla_tensor.cc | 58 xla::Shape subshape = in AllocateShapedBuffer() local
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | custom_call_thunk.cc | 147 const Shape& subshape = in ExecuteOnStream() local
|
/external/tensorflow/tensorflow/compiler/xrt/ |
D | xrt_state.cc | 98 const xla::Shape& subshape = in AllocateScopedShapedBuffer() local 363 XRTTupleAllocation* parent, const xla::ShapeIndex& subshape, in MakeSubBuffer() 485 const xla::Shape& subshape = in MakeTuple() local
|
/external/tensorflow/tensorflow/compiler/tf2xla/ |
D | shape_util.cc | 33 const xla::Shape& subshape = in PopulateInfeedLayoutVector() local
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | client_library_test_base.cc | 332 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() 389 &layout_shape, [&](Shape* subshape, const ShapeIndex& /*index*/) { in ComputeAndCompareLiteralWithStatus() 624 [](Shape* subshape, const ShapeIndex&) { in MaybeConvertShapeToBfloat16()
|
/external/tensorflow/tensorflow/compiler/xla/python/ |
D | shared_device_buffer.cc | 136 on_device_shape, [&](const Shape& subshape, const ShapeIndex&) -> Status { in MakeArray()
|
/external/tensorflow/tensorflow/compiler/xla/python/tpu_driver/client/ |
D | tpu_client.h | 94 StatusOr<Shape> ChooseCompactLayoutForShape(Shape subshape) { in ChooseCompactLayoutForShape()
|