• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #include "tensorflow/compiler/xla/service/cpu/ir_emission_utils.h"
17 
18 #include "tensorflow/compiler/xla/layout_util.h"
19 #include "tensorflow/compiler/xla/service/cpu/cpu_runtime.h"
20 #include "tensorflow/compiler/xla/service/hlo_module.h"
21 #include "tensorflow/compiler/xla/shape_util.h"
22 #include "tensorflow/compiler/xla/window_util.h"
23 
24 namespace xla {
25 namespace cpu {
26 
GetMinimumAlignmentForArray(const Shape & shape,const TargetMachineFeatures & target_machine_features)27 int64 GetMinimumAlignmentForArray(
28     const Shape& shape, const TargetMachineFeatures& target_machine_features) {
29   CHECK(shape.IsArray());
30   CHECK(!LayoutUtil::HasLayout(shape) || LayoutUtil::IsDense(shape.layout()));
31 
32   // We don't require a layout to be set on `shape`.  This only works on CPU
33   // because we don't pad our tensors or otherwise have complicated data tiling
34   // schemes.
35 
36   int64 allocation_size_bytes =
37       ShapeUtil::ElementsIn(shape) *
38       ShapeUtil::ByteSizeOfPrimitiveType(shape.element_type());
39   return target_machine_features.minimum_alignment_for_allocation(
40       allocation_size_bytes);
41 }
42 
PotentiallyImplementedAsEigenConvolution(const HloInstruction & convolution,const TargetMachineFeatures & target_machine_features)43 bool PotentiallyImplementedAsEigenConvolution(
44     const HloInstruction& convolution,
45     const TargetMachineFeatures& target_machine_features) {
46   // The following conditions are necessary (but not sufficient) for
47   // implementing `convolution` with Eigen convolution:
48   // - the input and kernel have a non-zero number of elements.
49   // - the input is in NHWC order.
50   // - the kernel is in HWIO order.
51   //
52   // To be sufficient, certain layout constraints need to be satisfied as well.
53   const Shape& input_shape = convolution.operand(0)->shape();
54   const Shape& kernel_shape = convolution.operand(1)->shape();
55   const Shape& output_shape = convolution.shape();
56 
57   auto is_aligned = [&](const Shape& shape) {
58     return GetMinimumAlignmentForArray(shape, target_machine_features) >=
59            TargetMachineFeatures::kEigenExpectedTensorAlignment;
60   };
61 
62   if (!is_aligned(input_shape) || !is_aligned(kernel_shape) ||
63       !is_aligned(output_shape)) {
64     return false;
65   }
66 
67   if (ShapeUtil::IsZeroElementArray(input_shape) ||
68       ShapeUtil::IsZeroElementArray(kernel_shape)) {
69     return false;
70   }
71   // Make sure input and kernel has the same data type.
72   CHECK(
73       ShapeUtil::SameElementTypeIgnoringFpPrecision(input_shape, kernel_shape));
74   // TODO(b/65408531): Explore using Eigen dot for complex64 type.
75   PrimitiveType primitive_type = input_shape.element_type();
76   if (primitive_type != F16 && primitive_type != F32) {
77     return false;
78   }
79   if (window_util::HasWindowReversal(convolution.window())) {
80     return false;
81   }
82 
83   const ConvolutionDimensionNumbers& dnums =
84       convolution.convolution_dimension_numbers();
85   // Only 1D and 2D convolutions are supported at the moment.
86   // TODO(b/32897908): add an optimized implementation for 3D convolution.
87   const int64 num_spatial_dims = dnums.output_spatial_dimensions_size();
88   if (num_spatial_dims > 2) {
89     return false;
90   }
91 
92   for (int64 i = 0; i < num_spatial_dims; ++i) {
93     if (dnums.input_spatial_dimensions(i) != i + 1) {
94       return false;
95     }
96     if (dnums.kernel_spatial_dimensions(i) != i) {
97       return false;
98     }
99     if (dnums.output_spatial_dimensions(i) != i + 1) {
100       return false;
101     }
102   }
103 
104   return dnums.input_batch_dimension() == 0 &&
105          dnums.input_feature_dimension() == input_shape.dimensions_size() - 1 &&
106          dnums.output_batch_dimension() == 0 &&
107          dnums.output_feature_dimension() ==
108              output_shape.dimensions_size() - 1 &&
109          dnums.kernel_input_feature_dimension() ==
110              kernel_shape.dimensions_size() - 2 &&
111          dnums.kernel_output_feature_dimension() ==
112              kernel_shape.dimensions_size() - 1;
113 }
114 
115 }  // namespace cpu
116 }  // namespace xla
117