• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /* Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2 
3 Licensed under the Apache License, Version 2.0 (the "License");
4 you may not use this file except in compliance with the License.
5 You may obtain a copy of the License at
6 
7     http://www.apache.org/licenses/LICENSE-2.0
8 
9 Unless required by applicable law or agreed to in writing, software
10 distributed under the License is distributed on an "AS IS" BASIS,
11 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 See the License for the specific language governing permissions and
13 limitations under the License.
14 ==============================================================================*/
15 
16 #ifndef TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
17 #define TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
18 
19 #define EIGEN_USE_CUSTOM_THREAD_POOL
20 #define EIGEN_USE_THREADS
21 
22 // NOTE: Eigen is slightly different internally and externally. We need to
23 // hack the unsupported/Eigen/CXX11/Tensor header instantiation macros at
24 // specific places, so we need two copies of the hacked file, one for
25 // internal and one for external.
26 // If you have trouble simply undef out the reducer macro e.g.
27 // TFLITE_REDUCE_INSTANTIATIONS_GOOGLE, but be aware this will make
28 // the binary much bigger!
29 // #define TFLITE_REDUCE_INSTANTIATIONS_OPEN_SOURCE
30 #define Eigen EigenForTFLite
31 #if defined(TFLITE_REDUCE_INSTANTIATIONS_GOOGLE)
32 #include "tensorflow/lite/kernels/internal/optimized/eigen_tensor_reduced_instantiations_google.h"
33 #elif defined(TFLITE_REDUCE_INSTANTIATIONS_OPEN_SOURCE)
34 #include "tensorflow/lite/kernels/internal/optimized/eigen_tensor_reduced_instantiations_oss.h"
35 #else
36 #include "unsupported/Eigen/CXX11/Tensor"
37 #endif
38 
39 namespace Eigen {
40 
41 /** SpatialConvolution
42  * \ingroup CXX11_NeuralNetworks_Module
43  *
44  * \brief Applies a 2D convolution over a multichannel input image.
45  *
46  * The input parameter is expected to be a tensor with a rank of 3 or more
47  * (channels, height, width, and optionally others)
48  * The kernel parameter is expected to be a 4D tensor (filters, channels,
49  * kernel_height, kernel_width)
50  * The input and the kernel must both be in col-major layout. The result will
51  * also be in col-major layout.
52  *
53  * If col_in_stride, row_in_stride > 1, then applies convolution with holes
54  * (aka atrous convolution), sampling every col_in_stride, row_in_stride input
55  * pixels.
56  *
57  * The result can be assigned to a tensor of rank equal to the rank of the
58  * input. The dimensions of the result will be filters, height, width (and
59  * others if applicable).
60  *
61  * It is possible to swap the order of the width and height dimensions provided
62  * that the same order is used in the input, the kernel, and the output.
63  *
64  */
65 template <typename Input, typename Kernel>
66 EIGEN_DEVICE_FUNC
67     EIGEN_ALWAYS_INLINE static const typename internal::conditional<
68         internal::traits<Input>::Layout == ColMajor,
69         TensorReshapingOp<
70             const DSizes<typename internal::traits<Input>::Index,
71                          internal::traits<Input>::NumDimensions>,
72             const TensorContractionOp<
73                 const array<IndexPair<typename internal::traits<Input>::Index>,
74                             1>,
75                 const TensorReshapingOp<
76                     const DSizes<typename internal::traits<Input>::Index, 2>,
77                     const Kernel>,
78                 const TensorReshapingOp<
79                     const DSizes<typename internal::traits<Input>::Index, 2>,
80                     const TensorImagePatchOp<Dynamic, Dynamic,
81                                              const Input> > > >,
82         TensorReshapingOp<
83             const DSizes<typename internal::traits<Input>::Index,
84                          internal::traits<Input>::NumDimensions>,
85             const TensorContractionOp<
86                 const array<IndexPair<typename internal::traits<Input>::Index>,
87                             1>,
88                 const TensorReshapingOp<
89                     const DSizes<typename internal::traits<Input>::Index, 2>,
90                     const TensorImagePatchOp<Dynamic, Dynamic, const Input> >,
91                 const TensorReshapingOp<
92                     const DSizes<typename internal::traits<Input>::Index, 2>,
93                     const Kernel> > > >::type
94     SpatialConvolution(const Input& input, const Kernel& kernel,
95                        const DenseIndex row_stride = 1,
96                        const DenseIndex col_stride = 1,
97                        const PaddingType padding_type = PADDING_SAME,
98                        const DenseIndex row_in_stride = 1,
99                        const DenseIndex col_in_stride = 1) {
100   typedef typename internal::traits<Input>::Index TensorIndex;
101   TensorRef<Tensor<typename internal::traits<Input>::Scalar,
102                    internal::traits<Input>::NumDimensions,
103                    internal::traits<Input>::Layout, TensorIndex> >
104       in(input);
105   TensorRef<Tensor<typename internal::traits<Kernel>::Scalar,
106                    internal::traits<Kernel>::NumDimensions,
107                    internal::traits<Kernel>::Layout, TensorIndex> >
108       kern(kernel);
109 
110   EIGEN_STATIC_ASSERT(
111       internal::traits<Input>::Layout == internal::traits<Kernel>::Layout,
112       YOU_MADE_A_PROGRAMMING_MISTAKE);
113   const bool isColMajor = (internal::traits<Input>::Layout == ColMajor);
114 
115   const int NumDims = internal::traits<Input>::NumDimensions;
116 
117   // Number of filters to apply. This is the same as the output depth of the
118   // result
119   const TensorIndex kernelFilters =
120       isColMajor ? kern.dimensions()[0] : kern.dimensions()[3];
121   // Number of channels. This is the same as the input depth.
122   const TensorIndex kernelChannels =
123       isColMajor ? kern.dimensions()[1] : kern.dimensions()[2];
124   const TensorIndex kernelRows =
125       isColMajor ? kern.dimensions()[2] : kern.dimensions()[1];
126   const TensorIndex kernelCols =
127       isColMajor ? kern.dimensions()[3] : kern.dimensions()[0];
128 
129   const DenseIndex kernelRowsEff =
130       kernelRows + (kernelRows - 1) * (row_in_stride - 1);
131   const DenseIndex kernelColsEff =
132       kernelCols + (kernelCols - 1) * (col_in_stride - 1);
133 
134   array<IndexPair<TensorIndex>, 1> contract_dims;
135   contract_dims[0] = IndexPair<TensorIndex>(1, 0);
136 
137   const TensorIndex InputRows =
138       isColMajor ? in.dimension(1) : in.dimension(NumDims - 2);
139   const TensorIndex InputCols =
140       isColMajor ? in.dimension(2) : in.dimension(NumDims - 3);
141 
142   TensorIndex out_height;
143   TensorIndex out_width;
144   switch (padding_type) {
145     case PADDING_VALID:
146       out_height = numext::ceil((InputRows - kernelRowsEff + 1.f) /
147                                 static_cast<float>(row_stride));
148       out_width = numext::ceil((InputCols - kernelColsEff + 1.f) /
149                                static_cast<float>(col_stride));
150       break;
151     case PADDING_SAME:
152       out_height = numext::ceil(InputRows / static_cast<float>(row_stride));
153       out_width = numext::ceil(InputCols / static_cast<float>(col_stride));
154       break;
155     default:
156       // Initialize unused variables to avoid a compiler warning
157       out_height = 0;
158       out_width = 0;
159       eigen_assert(false && "unexpected padding");
160   }
161 
162   // Molds the output of the patch extraction code into a 2d tensor:
163   // - the first dimension (dims[0]): the patch values to be multiplied with the
164   // kernels
165   // - the second dimension (dims[1]): everything else
166   DSizes<TensorIndex, 2> pre_contract_dims;
167   if (isColMajor) {
168     pre_contract_dims[0] = kernelChannels * kernelRows * kernelCols;
169     pre_contract_dims[1] = out_height * out_width;
170     for (int i = 3; i < NumDims; ++i) {
171       pre_contract_dims[1] *= in.dimension(i);
172     }
173   } else {
174     pre_contract_dims[1] = kernelChannels * kernelRows * kernelCols;
175     pre_contract_dims[0] = out_height * out_width;
176     for (int i = 0; i < NumDims - 3; ++i) {
177       pre_contract_dims[0] *= in.dimension(i);
178     }
179   }
180 
181   // Molds the output of the contraction into the shape expected by the used
182   // (assuming this is ColMajor):
183   // - 1st dim: kernel filters
184   // - 2nd dim: output height
185   // - 3rd dim: output width
186   // - 4th dim and beyond: everything else including batch size
187   DSizes<TensorIndex, NumDims> post_contract_dims;
188   if (isColMajor) {
189     post_contract_dims[0] = kernelFilters;
190     post_contract_dims[1] = out_height;
191     post_contract_dims[2] = out_width;
192     for (int i = 3; i < NumDims; ++i) {
193       post_contract_dims[i] = in.dimension(i);
194     }
195   } else {
196     post_contract_dims[NumDims - 1] = kernelFilters;
197     post_contract_dims[NumDims - 2] = out_height;
198     post_contract_dims[NumDims - 3] = out_width;
199     for (int i = 0; i < NumDims - 3; ++i) {
200       post_contract_dims[i] = in.dimension(i);
201     }
202   }
203 
204   DSizes<TensorIndex, 2> kernel_dims;
205   if (isColMajor) {
206     kernel_dims[0] = kernelFilters;
207     kernel_dims[1] = kernelChannels * kernelRows * kernelCols;
208   } else {
209     kernel_dims[0] = kernelChannels * kernelRows * kernelCols;
210     kernel_dims[1] = kernelFilters;
211   }
212   // TODO(yangke): choose() is defined in TensorContraction.h -- consider
213   // moving it to somewhere more "common".
214   return input
215       .extract_image_patches(kernelRows, kernelCols, row_stride, col_stride,
216                              row_in_stride, col_in_stride, padding_type)
217       .reshape(pre_contract_dims)
218       .contract(kernel.reshape(kernel_dims), contract_dims)
219       .reshape(post_contract_dims);
220 }
221 
222 }  // end namespace Eigen
223 
224 // clang-format on
225 
226 #endif  // TENSORFLOW_LITE_KERNELS_INTERNAL_OPTIMIZED_EIGEN_SPATIAL_CONVOLUTIONS_H_
227