/external/tensorflow/tensorflow/compiler/xla/service/ |
D | logistic_expander.cc | 43 const Shape operand_shape = operand->shape(); in ExpandLogisticWithTanh() local 59 const Shape operand_shape = operand->shape(); in ExpandLogisticWithExp() local
|
D | shape_inference.cc | 433 const Shape& operand_shape, PrimitiveType new_element_type) { in InferConvertShape() 449 const Shape& operand_shape, PrimitiveType new_element_type) { in InferBitcastConvertShape() 503 const Shape& operand_shape, const int exponent_bits, in InferReducePrecisionShape() 527 const Shape& operand_shape, const Shape& padding_value_shape, in InferPadShape() 1210 const Shape& operand_shape, const Shape& scale_shape, in InferBatchNormTrainingShape() 1315 const Shape& operand_shape, const Shape& scale_shape, in InferBatchNormInferenceShape() 1459 const Shape& operand_shape, const Shape& scale_shape, in InferBatchNormGradShape() 2043 for (const Shape* operand_shape : operand_shapes) { in InferAllGatherShape() local 2081 for (const Shape* operand_shape : operand_shapes) { in InferAllReduceShape() local 2099 for (const Shape* operand_shape : operand_shapes) { in InferReduceScatterShape() local [all …]
|
D | hlo_verifier.cc | 1010 const Shape& operand_shape = instruction.operands()[i]->shape(); in SameElementTypesForOperandsAndToApplyParameters() local 1046 const Shape& operand_shape = bitcast->operand(0)->shape(); in HandleBitcast() local 1069 const Shape& operand_shape = broadcast->operand(0)->shape(); in HandleBroadcast() local 1087 const Shape& operand_shape = dynamic_reshape->operand(0)->shape(); in HandleDynamicReshape() local 1101 const Shape& operand_shape = reshape->operand(0)->shape(); in HandleReshape() local 1510 const Shape& operand_shape = copy_done->operand(0)->shape(); in HandleCopyDone() local 2319 const Shape& operand_shape = operand->shape(); in CheckElementwiseInstruction() local 2331 const Shape& operand_shape = comparison->operand(1)->shape(); in CheckElementwiseInstruction() local 2517 const Shape& operand_shape = operand->shape(); in Postprocess() local
|
D | reduce_decomposer.cc | 141 auto operand_shape = operand->shape(); in ExpectedOutputShape() local
|
D | select_and_scatter_expander.cc | 35 auto operand_shape = operand->shape(); in ExpandInstruction() local
|
D | hlo_creation_utils.cc | 560 const Shape& operand_shape = operand->shape(); in CollapseFirstNDims() local 585 const Shape& operand_shape = operand->shape(); in PrependDegenerateDims() local 623 const Shape& operand_shape = operand->shape(); in InsertDegenerateDims() local
|
D | batchnorm_expander.cc | 172 const Shape operand_shape = operand->shape(); in HandleBatchNormTraining() local 301 const Shape operand_shape = operand->shape(); in HandleBatchNormInference() local
|
D | hlo_sharding_util.cc | 909 const Shape& operand_shape, const HloSharding& operand_sharding, in PassthroughOperandToGatherOutputOrScatterUpdate() 956 const Shape& operand_shape, const HloSharding& update_or_gather_sharding, in PassthroughGatherOutputOrScatterUpdateToOperand() 1064 const Shape& operand_shape) { in GatherOutputShardingFromDataOperand() 1118 std::vector<int64_t> GetScatterSliceSize(const Shape& operand_shape, in GetScatterSliceSize()
|
D | indexed_array_analysis.cc | 351 absl::Span<const int64_t> operand_shape, in ComputeReshapePassthroughDimPairs() 450 absl::Span<const int64_t> operand_shape, in FindSourcePositionForPassthroughResultDim()
|
D | dynamic_padder.cc | 475 const Shape operand_shape = reshape->operand(0)->shape(); in RewriteDynamicReshapeSplitInput() local 779 const Shape operand_shape = reshape->operand(0)->shape(); in RewriteDynamicReshapeSingleGroup() local 1361 Shape operand_shape = in RewriteDynamicSort() local
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | dynamic_ops_test.cc | 511 void RunR3Contiguous(std::vector<int32_t> operand_shape, int32_t index, in RunR3Contiguous() 671 std::vector<int32_t> operand_shape({4, 5, 2}); in XLA_TEST_F() local 677 std::vector<int32_t> operand_shape({4, 5, 2}); in XLA_TEST_F() local 683 std::vector<int32_t> operand_shape({4, 5, 2}); in XLA_TEST_F() local 689 std::vector<int32_t> operand_shape({4, 5, 2}); in XLA_TEST_F() local 695 std::vector<int32_t> operand_shape({4, 5, 2}); in XLA_TEST_F() local 701 std::vector<int32_t> operand_shape({4, 5, 2}); in XLA_TEST_F() local 707 std::vector<int32_t> operand_shape({4, 5, 2}); in XLA_TEST_F() local 713 std::vector<int32_t> operand_shape({4, 5, 2}); in XLA_TEST_F() local 718 std::vector<int32_t> operand_shape({3, 123, 247}); in XLA_TEST_F() local [all …]
|
D | select_and_scatter_test.cc | 44 std::vector<int64_t> operand_shape; member 75 auto operand_shape = GetParam().operand_shape; in XLA_TEST_P() local
|
/external/tensorflow/tensorflow/core/transforms/shape_inference/ |
D | pass.cc | 109 ShapedType operand_shape = op->getOperand(0).getType().cast<ShapedType>(); in TryToCacheResultsTensorValue() local 122 ShapedType operand_shape = op->getOperand(0).getType().cast<ShapedType>(); in TryToCacheResultsTensorValue() local 141 auto operand_shape = operand_type.cast<ShapedType>(); in TryToCacheResultsTensorValue() local
|
/external/tensorflow/tensorflow/compiler/xla/service/llvm_ir/ |
D | ir_array.cc | 297 const Shape& operand_shape, absl::Span<const int64_t> starts, in SourceIndexOfSlice() 315 const Shape& shape, const Shape& operand_shape, in SourceIndexOfTranspose() 330 const Shape& shape, const Shape& operand_shape, in SourceIndexOfBitcast() 367 const Shape& shape, const Shape& operand_shape, in SourceIndexOfBroadcast()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | aggregate_ops.cc | 64 xla::Shape operand_shape; in Compile() local
|
/external/tensorflow/tensorflow/lite/toco/graph_transformations/ |
D | fuse_binary_into_preceding_affine.cc | 58 const Shape& operand_shape = operand.shape(); in FuseAddOrSubParamsIntoPrecedingAffine() local 128 const Shape& operand_shape = operand.shape(); in FuseMulOrDivParamsIntoPrecedingAffine() local
|
D | fuse_binary_into_following_affine.cc | 202 const auto& operand_shape = in Run() local
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | dynamic_shaped_ops.cc | 140 const XlaComputation& computation) -> StatusOr<XlaComputation> { in DynamicConditional() 200 const XlaComputation& computation) -> StatusOr<XlaComputation> { in DynamicConditional()
|
D | pooling_test.cc | 39 Shape operand_shape = b->GetShape(input).ValueOrDie(); in MakeGeneralPadding() local
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | ir_emission_utils.cc | 223 const Shape& operand_shape, absl::Span<int64_t const> dims_to_reduce) { in IsReductionFromOrToContiguousDimensionsImpl() 257 Shape operand_shape = GetShape(first_operand); in IsReductionFromOrToContiguousDimensions() local 302 Shape operand_shape = GetShape(input); in GetReductionKindAndContiguousComponents() local 878 const Shape& operand_shape = in Match021Transpose() local
|
D | reduction_layout_normalizer.cc | 69 const Shape &operand_shape = operand->shape(); in HandleReduce() local
|
D | ir_emitter_unnested.h | 482 Shape operand_shape; member
|
D | elemental_ir_emitter.cc | 164 Shape operand_shape = hlo->operand(0)->shape(); in GetSourceIndexOfBitcast() local
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | cpu_layout_assignment.cc | 144 Shape operand_shape( in AddBackendConstraints() local
|
/external/tensorflow/tensorflow/compiler/mlir/tools/kernel_gen/transforms/ |
D | shape_simplification.cc | 120 operand_extents, [&](ArrayRef<int64_t> operand_shape) { in matchAndRewrite()
|