/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | xla_broadcast_helper_op.cc | 38 const TensorShape lhs_shape = context->InputShape(0); in Compile() local 41 const bool broadcast_lhs = lhs_shape.dims() < rhs_shape.dims(); in Compile() 42 const TensorShape* min_rank_shape = broadcast_lhs ? &lhs_shape : &rhs_shape; in Compile() 43 const TensorShape* max_rank_shape = broadcast_lhs ? &rhs_shape : &lhs_shape; in Compile() 51 lhs_shape.dims() == rhs_shape.dims() || lhs_shape.dims() == 0 || in Compile() 57 lhs_shape.DebugString(), " and ", rhs_shape.DebugString())); in Compile() 69 lhs_shape.DebugString(), " and ", rhs_shape.DebugString())); in Compile() 89 lhs_shape.DebugString(), " and ", rhs_shape.DebugString())); in Compile()
|
D | cwise_ops.cc | 34 const TensorShape lhs_shape = ctx->InputShape(0); in Compile() local 43 BCast bcast(BCast::FromShape(lhs_shape), BCast::FromShape(rhs_shape), in Compile() 47 lhs_shape.DebugString(), " vs. ", in Compile() 69 int max_rank = std::max(lhs_shape.dims(), rhs_shape.dims()); in Compile() 70 int min_rank = std::min(lhs_shape.dims(), rhs_shape.dims()); in Compile() 81 Computation(ctx, lhs_handle, lhs_shape.dim_sizes(), rhs_handle, in Compile()
|
D | extract_image_patches_op.cc | 102 std::vector<int64> lhs_shape(num_dims, 1); in Compile() local 105 lhs_shape[i] = ksizes_[input_dim]; in Compile() 108 lhs_shape[num_spatial_dims] = depth; in Compile() 109 lhs_shape[num_spatial_dims + 1] = 1; in Compile() 116 auto lhs = xla::Reshape(iota, lhs_shape); in Compile()
|
D | strided_slice_op.cc | 258 TensorShape lhs_shape; in Compile() local 260 OP_REQUIRES_OK(ctx, ctx->ReadVariableInput(0, dtype_, &lhs_shape, &lhs)); in Compile() 268 &begin_tensor, &end_tensor, strides_tensor, lhs_shape, in Compile()
|
D | xla_dot_op.cc | 45 const TensorShape lhs_shape = context->InputShape(0); in Compile() local
|
D | cwise_ops.h | 60 const absl::Span<const int64>& lhs_shape, const xla::XlaOp& rhs,
|
D | xla_conv_op.cc | 44 const TensorShape lhs_shape = context->InputShape(0); in Compile() local
|
D | binary_ops.cc | 42 const absl::Span<const int64>& lhs_shape, const xla::XlaOp& rhs, \ 48 (void)lhs_shape; \
|
/external/tensorflow/tensorflow/compiler/xla/service/ |
D | dot_decomposer.cc | 39 const Shape& lhs_shape = lhs->shape(); in DecomposeBatchDot() local 51 CHECK_EQ(lhs_shape.dimensions(dnums.lhs_batch_dimensions(i)), in DecomposeBatchDot() 53 batch_size *= lhs_shape.dimensions(dnums.lhs_batch_dimensions(i)); in DecomposeBatchDot() 66 PrimitiveType lhs_type = lhs_shape.element_type(); in DecomposeBatchDot() 67 const int64 lhs_rows = lhs_shape.dimensions(num_batch_dims + 0); in DecomposeBatchDot() 68 const int64 lhs_cols = lhs_shape.dimensions(num_batch_dims + 1); in DecomposeBatchDot() 172 const auto& lhs_shape = original_dot->operand(0)->shape(); in CanonicalizeDot() local 173 const int64 lhs_rank = lhs_shape.rank(); in CanonicalizeDot() 185 lhs_contracting_size *= lhs_shape.dimensions(i); in CanonicalizeDot() 188 batch_dim_sizes.push_back(lhs_shape.dimensions(i)); in CanonicalizeDot() [all …]
|
D | batch_dot_simplification.cc | 29 const Shape& lhs_shape = lhs->shape(); in ElideDegenerateBatchDimensionFromBatchDot() local 40 if (lhs_shape.dimensions(batch_dim) == 1) { in ElideDegenerateBatchDimensionFromBatchDot()
|
D | hlo_cost_analysis.cc | 240 const Shape& lhs_shape = dot->operand(0)->shape(); in HandleDot() local 247 reduction_width *= lhs_shape.dimensions(dim); in HandleDot() 439 const Shape& lhs_shape = lhs->shape(); in HandleConvolution() local 448 ShapeUtil::GetDimension(lhs_shape, input_feature_dim); in HandleConvolution() 451 const int64 batch = ShapeUtil::GetDimension(lhs_shape, input_batch_dim); in HandleConvolution() 479 const int64 input_limit = lhs_shape.dimensions(input_spatial_dim); in HandleConvolution()
|
D | shape_inference_test.cc | 394 Shape lhs_shape = ShapeUtil::MakeShape(F32, {10, 11, 3, 4}); in TEST_F() local 427 lhs_shape, rhs_shape, /*feature_group_count=*/1, /*batch_group_count=*/1, in TEST_F() 439 Shape lhs_shape = ShapeUtil::MakeShape(F32, {10, 11, 103, 4}); in TEST_F() local 473 lhs_shape, rhs_shape, /*feature_group_count=*/1, /*batch_group_count=*/1, in TEST_F() 485 Shape lhs_shape = ShapeUtil::MakeShape(F32, {10, 11, 3, 4}); in TEST_F() local 519 lhs_shape, rhs_shape, /*feature_group_count=*/1, /*batch_group_count=*/1, in TEST_F() 529 Shape lhs_shape = ShapeUtil::MakeShape(F32, {10, 11, 3, 4}); in TEST_F() local 558 lhs_shape, rhs_shape, /*feature_group_count=*/1, /*batch_group_count=*/1, in TEST_F() 1049 Shape lhs_shape = ShapeUtil::MakeShape(F32, {5, 2, 11, 3}); in TEST_F() local 1063 ShapeInference::InferDotOpShape(lhs_shape, rhs_shape, dot_dnums); in TEST_F() [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | dot_op_emitter.cc | 57 Shape lhs_shape; member 66 lhs_shape = instr.operand(0)->shape(); in DotInfo() 377 const Shape& lhs_shape = lhs_array_.GetShape(); in Emit() local 380 if (ShapeUtil::IsScalar(lhs_shape) || ShapeUtil::IsScalar(rhs_shape)) { in Emit() 382 TF_RET_CHECK(ShapeUtil::IsScalar(lhs_shape) && in Emit() 409 const Shape& lhs_shape = lhs_array_.GetShape(); in EmitNaiveLlvmIrGemm() local 420 CHECK_EQ(lhs_shape.dimensions(lhs_reduction_dimension), in EmitNaiveLlvmIrGemm() 424 lhs_reduction_dimension == LayoutUtil::Minor(lhs_shape.layout(), 0); in EmitNaiveLlvmIrGemm() 448 0, lhs_shape.dimensions(lhs_reduction_dimension), "reduction", in EmitNaiveLlvmIrGemm() 458 llvm_ir::IrArray::Index lhs_index(lhs_multi_index, lhs_shape, in EmitNaiveLlvmIrGemm() [all …]
|
D | cpu_layout_assignment_test.cc | 66 Shape lhs_shape = ShapeUtil::MakeShapeWithLayout(F32, {1, 12}, {0, 1}); in TEST_F() local 70 HloInstruction::CreateParameter(0, lhs_shape, "param0")); in TEST_F() 81 ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape)); in TEST_F() 101 Shape lhs_shape = ShapeUtil::MakeShapeWithLayout(F32, {1, 12}, {0, 1}); in TEST_F() local 105 HloInstruction::CreateParameter(0, lhs_shape, "param0")); in TEST_F() 107 HloInstruction::CreateParameter(1, lhs_shape, "param1")); in TEST_F() 122 ShapeLayout(LayoutUtil::GetWithDefaultLayout(lhs_shape)); in TEST_F() 185 Shape lhs_shape = ShapeUtil::MakeShapeWithLayout(F32, {1, 12}, {0, 1}); in TEST_F() local 189 HloInstruction::CreateConstant(Literal::CreateFromShape(lhs_shape))); in TEST_F() 218 Shape lhs_shape = ShapeUtil::MakeShapeWithLayout(F32, {1, 12}, {0, 1}); in TEST_F() local [all …]
|
D | dot_op_emitter_internal.h | 38 Shape lhs_shape; member 45 lhs_shape = instr.operand(0)->shape(); in DotInfo()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | ir_emission_utils.cc | 47 bool AreValidGemmShapes(const Shape& lhs_shape, const Shape& rhs_shape, in AreValidGemmShapes() argument 58 return type_is_allowed && IsRank2(lhs_shape, batch_dimensions_size) && in AreValidGemmShapes() 61 !ShapeUtil::IsZeroElementArray(lhs_shape) && in AreValidGemmShapes() 67 const Shape& lhs_shape = dot.operand(0)->shape(); in DotImplementedAsGemm() local 73 if (AreValidGemmShapes(lhs_shape, rhs_shape, dot.shape(), in DotImplementedAsGemm() 78 CHECK_EQ(lhs_shape.dimensions(dim_numbers.lhs_contracting_dimensions(0)), in DotImplementedAsGemm()
|
D | ir_emitter.cc | 474 const Shape& lhs_shape = lhs_instruction->shape(); in HandleDot() local 483 if (ShapeUtil::IsScalar(lhs_shape) && ShapeUtil::IsScalar(rhs_shape)) { in HandleDot() 490 if (ShapeUtil::ElementIsComplex(lhs_shape)) { in HandleDot() 495 } else if (ShapeUtil::ElementIsFloating(lhs_shape)) { in HandleDot() 498 TF_RET_CHECK(ShapeUtil::ElementIsIntegral(lhs_shape)); in HandleDot() 507 TF_RET_CHECK(!ShapeUtil::IsScalar(lhs_shape) && in HandleDot() 520 TF_RET_CHECK(lhs_shape.dimensions(lhs_reduction_dimension) == in HandleDot() 523 << ") = " << lhs_shape.dimensions(lhs_reduction_dimension) in HandleDot() 548 /*end_index=*/lhs_shape.dimensions(lhs_reduction_dimension), in HandleDot() 587 if (ShapeUtil::ElementIsComplex(lhs_shape)) { in HandleDot() [all …]
|
D | gpu_layout_assignment.cc | 101 Shape lhs_shape = instr->operand(0)->shape(); in AddBackendConstraintsToDnnConvCustomCall() local 113 input_shape = &lhs_shape; in AddBackendConstraintsToDnnConvCustomCall() 120 output_shape = &lhs_shape; in AddBackendConstraintsToDnnConvCustomCall() 123 input_shape = &lhs_shape; in AddBackendConstraintsToDnnConvCustomCall() 152 TF_RETURN_IF_ERROR(constraints->SetOperandLayout(lhs_shape, instr, 0)); in AddBackendConstraintsToDnnConvCustomCall()
|
D | cudnn_conv_runner.cc | 329 const auto& lhs_shape = conv->operand(0)->shape(); in GetCudnnConvParams() local 343 params.input_shape = &lhs_shape; in GetCudnnConvParams() 353 params.output_shape = &lhs_shape; in GetCudnnConvParams() 359 params.input_shape = &lhs_shape; in GetCudnnConvParams() 368 params.input_shape = &lhs_shape; in GetCudnnConvParams()
|
D | gemm_thunk.h | 43 const Shape& lhs_shape, const Shape& rhs_shape,
|
D | gemm_thunk.cc | 296 const Shape& lhs_shape, const Shape& rhs_shape, in GemmThunk() argument 304 lhs_shape_(lhs_shape), in GemmThunk()
|
/external/tensorflow/tensorflow/core/kernels/ |
D | cwise_ops_test.cc | 190 TensorShape lhs_shape; in BiasAddGrad() local 192 lhs_shape = TensorShape({channels, rows, cols}); in BiasAddGrad() 194 lhs_shape = TensorShape({rows, cols, channels}); in BiasAddGrad() 196 Tensor lhs(type, lhs_shape); in BiasAddGrad() 247 TensorShape lhs_shape, rhs_shape; in BcastAdd() local 249 lhs_shape = TensorShape({rows, cols}); in BcastAdd() 252 lhs_shape = TensorShape({rows, cols}); in BcastAdd() 255 lhs_shape = TensorShape({rows, 1}); in BcastAdd() 258 lhs_shape = TensorShape({1, cols}); in BcastAdd() 261 Tensor lhs(DT_FLOAT, lhs_shape); in BcastAdd()
|
/external/tensorflow/tensorflow/compiler/xla/tests/ |
D | matrix_ops_simple_test.cc | 196 Shape lhs_shape = in TestImpl() local 211 auto lhs_arg = Parameter(&builder, 0, lhs_shape, "lhs"); in TestImpl()
|
D | xla_hlo_profile_test.cc | 192 Shape lhs_shape = ShapeUtil::MakeShape(F32, {m, k}); in XLA_TEST_F() local 208 ExecuteAndFetchProfile(&profile_output, client, computation, lhs_shape, in XLA_TEST_F()
|
/external/tensorflow/tensorflow/compiler/xla/client/ |
D | xla_builder.cc | 487 TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs)); in BinaryOp() 491 binop, lhs_shape, rhs_shape, broadcast_dimensions)); in BinaryOp() 505 const int64 lhs_rank = lhs_shape.rank(); in BinaryOp() 514 const Shape& from_shape = should_broadcast_lhs ? lhs_shape : rhs_shape; in BinaryOp() 557 TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs)); in TernaryOp() 561 Shape shape, ShapeInference::InferTernaryOpShape(triop, lhs_shape, in TernaryOp() 568 if (!lhs_shape.IsTuple() && in TernaryOp() 569 !ShapeUtil::SameDimensions(shape, lhs_shape)) { in TernaryOp() 1013 TF_ASSIGN_OR_RETURN(const Shape& lhs_shape, GetShape(lhs)); in Dot() 1017 lhs_shape.dimensions_size() == 1 ? 0 : 1); in Dot() [all …]
|