/external/tensorflow/tensorflow/compiler/xla/service/ |
D | gather_expander.cc | 33 if (start_indices_shape.dimensions_size() == index_vector_dim) { in TransposeIndexVectorDimToLast() 37 if (index_vector_dim == (start_indices_shape.dimensions_size() - 1)) { in TransposeIndexVectorDimToLast() 42 permutation.reserve(start_indices_shape.dimensions_size()); in TransposeIndexVectorDimToLast() 43 for (int64 i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) { in TransposeIndexVectorDimToLast() 63 index_vector_dim == start_indices->shape().dimensions_size(); in CanonicalizeGatherIndices() 73 if (shape.dimensions_size() == index_dims_in_start_indices) { in CanonicalizeGatherIndices() 80 shape.dimensions_size() - index_dims_in_start_indices); in CanonicalizeGatherIndices() 90 batch_dim_bounds.reserve(start_indices_shape.dimensions_size()); in AdjustBatchDimsInAccumulator() 91 for (int64 i = 0, e = start_indices_shape.dimensions_size(); i < e; i++) { in AdjustBatchDimsInAccumulator() 159 bool has_scalar_indices = start_indices->shape().dimensions_size() == 1; in GatherLoopBody() [all …]
|
D | convolution_4d_expander_test.cc | 47 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F() 55 EXPECT_EQ(new_convolution->window().dimensions_size(), 2); in TEST_F() 72 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F() 82 EXPECT_EQ(new_convolution->window().dimensions_size(), 3); in TEST_F() 99 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F() 107 EXPECT_EQ(new_convolution->window().dimensions_size(), 0); in TEST_F() 124 EXPECT_EQ(root->window().dimensions_size(), 3); in TEST_F() 143 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F() 162 EXPECT_EQ(root->window().dimensions_size(), 4); in TEST_F()
|
D | scatter_expander.cc | 35 if (scatter_indices_shape.dimensions_size() == index_vector_dim) { in TransposeIndexVectorDimToLast() 39 if (index_vector_dim == (scatter_indices_shape.dimensions_size() - 1)) { in TransposeIndexVectorDimToLast() 44 permutation.reserve(scatter_indices_shape.dimensions_size()); in TransposeIndexVectorDimToLast() 45 for (int64 i = 0, e = scatter_indices_shape.dimensions_size(); i < e; i++) { in TransposeIndexVectorDimToLast() 70 index_vector_dim == scatter_indices->shape().dimensions_size(); in CanonicalizeScatterIndices() 80 if (shape.dimensions_size() == index_dims_in_scatter_indices) { in CanonicalizeScatterIndices() 87 shape.dimensions_size() - index_dims_in_scatter_indices); in CanonicalizeScatterIndices() 117 int64 num_scatter_dims = scatter_indices_shape.dimensions_size(); in AdjustScatterDims() 118 if (index_vector_dim < scatter_indices_shape.dimensions_size()) { in AdjustScatterDims() 229 bool has_scalar_indices = scatter_indices->shape().dimensions_size() == 1; in ScatterLoopBody() [all …]
|
D | shape_inference.cc | 163 if (window.dimensions_size() != base_shape.rank()) { in InferWindowOutputShape() 166 window.dimensions_size(), base_shape.rank()); in InferWindowOutputShape() 169 std::vector<int64> output_dimensions(window.dimensions_size()); in InferWindowOutputShape() 170 std::vector<bool> output_is_dynamic(window.dimensions_size()); in InferWindowOutputShape() 171 for (int64 i = 0; i < window.dimensions_size(); ++i) { in InferWindowOutputShape() 416 for (int64 i = 0; i < shape->dimensions_size(); ++i) { in InferConcatOpShape() 514 if (operand_shape.rank() != padding_config.dimensions_size()) { in InferPadShape() 540 for (int64 i = 0; i < operand_shape.dimensions_size(); ++i) { in InferPadShape() 819 for (int i = 0; i < smaller_shape.dimensions_size(); ++i) { in InferInDimBroadcastShape() 826 if (dimension_to_match >= larger_shape.dimensions_size()) { in InferInDimBroadcastShape() [all …]
|
D | hlo_creation_utils.cc | 384 CHECK_GE(operand_shape.dimensions_size(), n); in CollapseFirstNDims() 391 new_shape_dims.reserve(operand_shape.dimensions_size() - n + 1); in CollapseFirstNDims() 409 new_shape_dims.reserve(n + operand_shape.dimensions_size()); in PrependDegenerateDims() 417 CHECK_GT(operand->shape().dimensions_size(), 0); in ExpandFirstDimIntoNDims() 422 operand->shape().dimensions_size() - 1); in ExpandFirstDimIntoNDims() 447 operand_shape.dimensions_size() + dims_to_insert.size(); in InsertDegenerateDims() 477 CHECK_EQ(operand->shape().dimensions_size(), 1); in PadVectorWithZeros()
|
D | indexed_array_analysis.cc | 203 std::vector<IndexComponent> simulated_index(a->shape().dimensions_size(), in FoldGatherOfGather() 256 if (dim_numbers.index_vector_dim() != indices->shape().dimensions_size()) { in ComputeArrayForGather() 278 for (int64 i = 0, e = source->shape().dimensions_size(); i < e; i++) { in ComputeArrayForGather() 292 for (int64 i = 0, e = shape.dimensions_size(); i < e; i++) { in ComputeArrayForGather() 478 for (int64 i = 0, e = source_shape.dimensions_size(); i < e; i++) { in ReshapeToRemoveDegenerateDims() 500 for (int64 i = 0, e = shape.dimensions_size(); i < e; i++) { in ReshapeToRemoveDegenerateDims() 536 operand->shape().dimensions_size()); in ReshapeToAddDegenerateDims() 623 for (int64 i = 0, e = shape.dimensions_size(); i < e; i++) { in FoldReshapeOfGather() 892 broadcast_instr->shape().dimensions_size(), IndexComponent::Broadcasted); in ComputeArrayForElementwiseBinaryOp()
|
D | depthwise_convolution_converter.cc | 87 int64 num_dims = shape.dimensions_size(); in SwapInputOutputFeatureDims() 150 std::vector<int64> transpose_dims(lhs->shape().dimensions_size()); in HandleBackwardFilterBatchGroupConvolution()
|
D | hlo_sharding.cc | 168 return std::vector<int64>(shape.dimensions_size(), 0); in TileOffsetForDevice() 171 CHECK_EQ(shape.dimensions_size(), tile_assignment_.num_dimensions()); in TileOffsetForDevice() 190 CHECK_EQ(shape.dimensions_size(), tile_assignment_.num_dimensions()); in TileLimitForDevice() 439 for (int64 i = 0; i < shape.dimensions_size(); ++i) { in TileShape()
|
D | hlo_evaluator_typed_visitor.h | 1057 CHECK_EQ(window.dimensions_size(), num_spatial_dims); 1423 pad->padding_config().dimensions_size()); 1473 std::vector<int64> zero_base(evaluated_operand.shape().dimensions_size(), 1475 std::vector<int64> step(evaluated_operand.shape().dimensions_size(), 1); 1852 DimensionVector window_index(window.dimensions_size()); 1901 if (indices.shape().dimensions_size() != index_vector_dim) { 1917 int64 updates_rank = updates_shape.dimensions_size(); 1936 int64 updates_rank = updates_shape.dimensions_size(); 1966 for (int64 i = 0; i < updates_shape.dimensions_size(); i++) { 1971 for (int64 i = 0; i < input_shape.dimensions_size(); i++) { [all …]
|
/external/tensorflow/tensorflow/compiler/xla/service/cpu/ |
D | ir_emission_utils.cc | 105 dnums.input_feature_dimension() == input_shape.dimensions_size() - 1 && in PotentiallyImplementedAsEigenConvolution() 108 output_shape.dimensions_size() - 1 && in PotentiallyImplementedAsEigenConvolution() 110 kernel_shape.dimensions_size() - 2 && in PotentiallyImplementedAsEigenConvolution() 112 kernel_shape.dimensions_size() - 1; in PotentiallyImplementedAsEigenConvolution()
|
D | cpu_layout_assignment.cc | 82 std::vector<int64> dimension_order(new_shape.dimensions_size()); in RowMajorShape() 90 std::vector<int64> dimension_order(new_shape.dimensions_size()); in ColMajorShape()
|
D | cpu_instruction_fusion.cc | 48 hlo->opcode() == HloOpcode::kDot && hlo_shape.dimensions_size() <= 1 && in IsNonComplexNonBatchedMatrixVectorDot() 151 if (output_shape.dimensions_size() <= 1) { in ShouldFuse()
|
/external/tensorflow/tensorflow/compiler/xla/service/gpu/ |
D | gpu_conv_padding_legalization.cc | 63 MakeNoPaddingConfig(input->shape().dimensions_size()); in MaybePaddedAndSlicedInput() 94 std::vector<int64> start_indices(input->shape().dimensions_size(), 0); in MaybePaddedAndSlicedInput() 97 std::vector<int64> strides(input->shape().dimensions_size(), 1); in MaybePaddedAndSlicedInput() 132 for (size_t i = 0; i < kernel->shape().dimensions_size(); ++i) { in MaybePaddedKernel() 167 for (size_t i = 0; i < new_conv_window.dimensions_size(); ++i) { in CanonicalizeForwardConvolution() 225 for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { in CanonicalizeBackwardFilterConvolution() 290 for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { in CanonicalizeBackwardInputConvolution() 347 std::vector<int64> start_indices(new_backward_conv->shape().dimensions_size(), in CanonicalizeBackwardInputConvolution() 352 std::vector<int64> strides(new_backward_conv->shape().dimensions_size(), 1LL); in CanonicalizeBackwardInputConvolution() 353 for (size_t i = 0; i < backward_conv->window().dimensions_size(); ++i) { in CanonicalizeBackwardInputConvolution()
|
D | fft_thunk.cc | 127 for (int i = 0; i < input_shape_.dimensions_size() - fft_rank; ++i) { in ExecuteOnStream() 139 auto dim_offset = input_shape_.dimensions_size() - fft_rank + i; in ExecuteOnStream()
|
/external/tensorflow/tensorflow/compiler/xla/client/lib/ |
D | sorting.cc | 29 int last_dim = input_shape.dimensions_size() - 1; in TopK() 46 std::vector<int64> start_indices(input_shape.dimensions_size(), 0); in TopK() 49 std::vector<int64> strides(input_shape.dimensions_size(), 1); in TopK()
|
D | quantize.h | 124 std::vector<int64> shift_transpose_dimensions(shape.dimensions_size()); 128 shape.dimensions_size()); 159 std::vector<int64> transpose_dimensions(shape.dimensions_size()); 176 std::vector<int64> result_dimensions(shape.dimensions_size());
|
/external/tensorflow/tensorflow/compiler/xla/ |
D | shape.cc | 26 dimensions_.reserve(shape_proto.dimensions_size()); in Shape() 35 if (shape_proto.dimensions_size() != in Shape() 45 shape_proto.dimensions_size(), shape_proto.is_dynamic_dimension_size()); in Shape() 61 proto.mutable_dimensions()->Reserve(dimensions_size()); in ToProto()
|
D | text_literal_reader.cc | 113 if (coordinate_values.size() != shape.dimensions_size()) { in ReadAllLines() 117 shape.dimensions_size(), coordinate_values.size(), line); in ReadAllLines()
|
D | index_util.cc | 30 DCHECK_EQ(shape.dimensions_size(), multi_index.size()); in MultidimensionalIndexToLinearIndex() 104 std::vector<int64> multi_index(shape.dimensions_size()); in LinearIndexToMultidimensionalIndex()
|
D | literal_comparison.cc | 174 if (dimension == expected.shape().dimensions_size()) { in Equal() 709 std::vector<int64> multi_index(expected.shape().dimensions_size(), 0); in EqualHelper() 904 if (expected.dimensions_size() != actual.dimensions_size()) { in EqualShapes() 906 expected.dimensions_size(), in EqualShapes() 907 actual.dimensions_size()); in EqualShapes() 909 for (int i = 0; i < expected.dimensions_size(); ++i) { in EqualShapes()
|
D | shape_util.cc | 235 std::vector<int64> dims(shape.dimensions_size()); in MakeShapeWithDescendingLayoutAndSamePhysicalLayout() 236 for (int i = 0; i < shape.dimensions_size(); ++i) { in MakeShapeWithDescendingLayoutAndSamePhysicalLayout() 246 for (int i = 0; i < shape.dimensions_size(); ++i) { in MakeShapeWithDescendingLayoutAndSamePhysicalLayout() 427 DCHECK_EQ(shape.dimensions_size(), shape.rank()); in ElementsIn() 482 for (int i = 0; i < shape.dimensions_size(); ++i) { in HumanString() 668 if (shape.dimensions_size() != 0) { in ValidateShapeWithOptionalLayoutInternal() 685 if (shape.dimensions_size() != 0) { in ValidateShapeWithOptionalLayoutInternal() 1466 for (int i = 0; i < shape.dimensions_size(); ++i) { in Hash()
|
/external/tensorflow/tensorflow/compiler/tf2xla/kernels/ |
D | conv_op_helpers.cc | 50 int64 input_feature_dim = filter_shape.dimensions_size() - 2; in GroupedFilterShapeForDepthwiseConvolution() 51 int64 output_feature_dim = filter_shape.dimensions_size() - 1; in GroupedFilterShapeForDepthwiseConvolution() 69 int num_dims = filter_shape.dimensions_size(); in TransposeFilterForGroupConvolutionBackpropInput() 93 int num_dims = input_shape.dimensions_size(); in TransposeInputForGroupConvolutionBackpropFilter() 219 if (input_shape.dimensions_size() != num_dims) { in MakeXlaForwardConvOp() 223 if (filter_shape.dimensions_size() != num_dims) { in MakeXlaForwardConvOp()
|
D | cast_op.cc | 165 xla_input_shape.dimensions_size() - 1); in Compile() 191 input_xla_shape.dimensions_size() - 1)); in Compile() 195 {input_xla_shape.dimensions_size() - 1}); in Compile()
|
D | tensor_list_utils.cc | 331 element_part_shape.dimensions_size() + 1, in ExecuteTensorListPushBack() 347 std::vector<xla::XlaOp> start_indices(element_shape.dimensions_size() + 1, in ExecuteTensorListPushBack() 386 std::vector<xla::XlaOp> start_indices(list_part_shape.dimensions_size(), in ExecuteTensorListPopBack() 434 std::vector<xla::XlaOp> start_indices(element_shape.dimensions_size() + 1, in ExecuteTensorListSetItem() 467 std::vector<xla::XlaOp> start_indices(buffer_shape.dimensions_size(), in ExecuteTensorListGetItem()
|
/external/tensorflow/tensorflow/compiler/tf2xla/lib/ |
D | scatter.cc | 140 ? indices_shape.dimensions_size() - 1 in XlaScatter() 141 : indices_shape.dimensions_size()); in XlaScatter()
|