• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 
4 #ifndef EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
5 #define EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
6 
7 namespace Eigen {
8 
9 /** \class TensorVolumePatch
10   * \ingroup CXX11_Tensor_Module
11   *
12   * \brief Patch extraction specialized for processing of volumetric data.
13   * This assumes that the input has a least 4 dimensions ordered as follows:
14   *  - channels
15   *  - planes
16   *  - rows
17   *  - columns
18   *  - (optional) additional dimensions such as time or batch size.
19   * Calling the volume patch code with patch_planes, patch_rows, and patch_cols
20   * is equivalent to calling the regular patch extraction code with parameters
21   * d, patch_planes, patch_rows, patch_cols, and 1 for all the additional
22   * dimensions.
23   */
24 namespace internal {
25 
26 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
27 struct traits<TensorVolumePatchOp<Planes, Rows, Cols, XprType> > : public traits<XprType>
28 {
29   typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
30   typedef traits<XprType> XprTraits;
31   typedef typename XprTraits::StorageKind StorageKind;
32   typedef typename XprTraits::Index Index;
33   typedef typename XprType::Nested Nested;
34   typedef typename remove_reference<Nested>::type _Nested;
35   static const int NumDimensions = XprTraits::NumDimensions + 1;
36   static const int Layout = XprTraits::Layout;
37   typedef typename XprTraits::PointerType PointerType;
38 
39 };
40 
41 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
42 struct eval<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, Eigen::Dense>
43 {
44   typedef const TensorVolumePatchOp<Planes, Rows, Cols, XprType>& type;
45 };
46 
47 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
48 struct nested<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, 1, typename eval<TensorVolumePatchOp<Planes, Rows, Cols, XprType> >::type>
49 {
50   typedef TensorVolumePatchOp<Planes, Rows, Cols, XprType> type;
51 };
52 
53 }  // end namespace internal
54 
55 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename XprType>
56 class TensorVolumePatchOp : public TensorBase<TensorVolumePatchOp<Planes, Rows, Cols, XprType>, ReadOnlyAccessors>
57 {
58   public:
59   typedef typename Eigen::internal::traits<TensorVolumePatchOp>::Scalar Scalar;
60   typedef typename Eigen::NumTraits<Scalar>::Real RealScalar;
61   typedef typename XprType::CoeffReturnType CoeffReturnType;
62   typedef typename Eigen::internal::nested<TensorVolumePatchOp>::type Nested;
63   typedef typename Eigen::internal::traits<TensorVolumePatchOp>::StorageKind StorageKind;
64   typedef typename Eigen::internal::traits<TensorVolumePatchOp>::Index Index;
65 
66   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols,
67                                                             DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides,
68                                                             DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides,
69                                                             DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
70                                                             PaddingType padding_type, Scalar padding_value)
71                                                             : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
72                                                             m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
73                                                             m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
74                                                             m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
75                                                             m_padding_explicit(false), m_padding_top_z(0), m_padding_bottom_z(0), m_padding_top(0), m_padding_bottom(0), m_padding_left(0), m_padding_right(0),
76                                                             m_padding_type(padding_type), m_padding_value(padding_value) {}
77 
78   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorVolumePatchOp(const XprType& expr, DenseIndex patch_planes, DenseIndex patch_rows, DenseIndex patch_cols,
79                                                            DenseIndex plane_strides, DenseIndex row_strides, DenseIndex col_strides,
80                                                            DenseIndex in_plane_strides, DenseIndex in_row_strides, DenseIndex in_col_strides,
81                                                            DenseIndex plane_inflate_strides, DenseIndex row_inflate_strides, DenseIndex col_inflate_strides,
82                                                            DenseIndex padding_top_z, DenseIndex padding_bottom_z,
83                                                            DenseIndex padding_top, DenseIndex padding_bottom,
84                                                            DenseIndex padding_left, DenseIndex padding_right,
85                                                            Scalar padding_value)
86                                                            : m_xpr(expr), m_patch_planes(patch_planes), m_patch_rows(patch_rows), m_patch_cols(patch_cols),
87                                                            m_plane_strides(plane_strides), m_row_strides(row_strides), m_col_strides(col_strides),
88                                                            m_in_plane_strides(in_plane_strides), m_in_row_strides(in_row_strides), m_in_col_strides(in_col_strides),
89                                                            m_plane_inflate_strides(plane_inflate_strides), m_row_inflate_strides(row_inflate_strides), m_col_inflate_strides(col_inflate_strides),
90                                                            m_padding_explicit(true), m_padding_top_z(padding_top_z), m_padding_bottom_z(padding_bottom_z), m_padding_top(padding_top), m_padding_bottom(padding_bottom),
91                                                            m_padding_left(padding_left), m_padding_right(padding_right),
92                                                            m_padding_type(PADDING_VALID), m_padding_value(padding_value) {}
93 
94     EIGEN_DEVICE_FUNC
95     DenseIndex patch_planes() const { return m_patch_planes; }
96     EIGEN_DEVICE_FUNC
97     DenseIndex patch_rows() const { return m_patch_rows; }
98     EIGEN_DEVICE_FUNC
99     DenseIndex patch_cols() const { return m_patch_cols; }
100     EIGEN_DEVICE_FUNC
101     DenseIndex plane_strides() const { return m_plane_strides; }
102     EIGEN_DEVICE_FUNC
103     DenseIndex row_strides() const { return m_row_strides; }
104     EIGEN_DEVICE_FUNC
105     DenseIndex col_strides() const { return m_col_strides; }
106     EIGEN_DEVICE_FUNC
107     DenseIndex in_plane_strides() const { return m_in_plane_strides; }
108     EIGEN_DEVICE_FUNC
109     DenseIndex in_row_strides() const { return m_in_row_strides; }
110     EIGEN_DEVICE_FUNC
111     DenseIndex in_col_strides() const { return m_in_col_strides; }
112     EIGEN_DEVICE_FUNC
113     DenseIndex plane_inflate_strides() const { return m_plane_inflate_strides; }
114     EIGEN_DEVICE_FUNC
115     DenseIndex row_inflate_strides() const { return m_row_inflate_strides; }
116     EIGEN_DEVICE_FUNC
117     DenseIndex col_inflate_strides() const { return m_col_inflate_strides; }
118     EIGEN_DEVICE_FUNC
119     bool padding_explicit() const { return m_padding_explicit; }
120     EIGEN_DEVICE_FUNC
121     DenseIndex padding_top_z() const { return m_padding_top_z; }
122     EIGEN_DEVICE_FUNC
123     DenseIndex padding_bottom_z() const { return m_padding_bottom_z; }
124     EIGEN_DEVICE_FUNC
125     DenseIndex padding_top() const { return m_padding_top; }
126     EIGEN_DEVICE_FUNC
127     DenseIndex padding_bottom() const { return m_padding_bottom; }
128     EIGEN_DEVICE_FUNC
129     DenseIndex padding_left() const { return m_padding_left; }
130     EIGEN_DEVICE_FUNC
131     DenseIndex padding_right() const { return m_padding_right; }
132     EIGEN_DEVICE_FUNC
133     PaddingType padding_type() const { return m_padding_type; }
134     EIGEN_DEVICE_FUNC
135     Scalar padding_value() const { return m_padding_value; }
136 
137     EIGEN_DEVICE_FUNC
138     const typename internal::remove_all<typename XprType::Nested>::type&
139     expression() const { return m_xpr; }
140 
141   protected:
142     typename XprType::Nested m_xpr;
143     const DenseIndex m_patch_planes;
144     const DenseIndex m_patch_rows;
145     const DenseIndex m_patch_cols;
146     const DenseIndex m_plane_strides;
147     const DenseIndex m_row_strides;
148     const DenseIndex m_col_strides;
149     const DenseIndex m_in_plane_strides;
150     const DenseIndex m_in_row_strides;
151     const DenseIndex m_in_col_strides;
152     const DenseIndex m_plane_inflate_strides;
153     const DenseIndex m_row_inflate_strides;
154     const DenseIndex m_col_inflate_strides;
155     const bool m_padding_explicit;
156     const DenseIndex m_padding_top_z;
157     const DenseIndex m_padding_bottom_z;
158     const DenseIndex m_padding_top;
159     const DenseIndex m_padding_bottom;
160     const DenseIndex m_padding_left;
161     const DenseIndex m_padding_right;
162     const PaddingType m_padding_type;
163     const Scalar m_padding_value;
164 };
165 
166 
167 // Eval as rvalue
168 template<DenseIndex Planes, DenseIndex Rows, DenseIndex Cols, typename ArgType, typename Device>
169 struct TensorEvaluator<const TensorVolumePatchOp<Planes, Rows, Cols, ArgType>, Device>
170 {
171   typedef TensorVolumePatchOp<Planes, Rows, Cols, ArgType> XprType;
172   typedef typename XprType::Index Index;
173   static const int NumInputDims = internal::array_size<typename TensorEvaluator<ArgType, Device>::Dimensions>::value;
174   static const int NumDims = NumInputDims + 1;
175   typedef DSizes<Index, NumDims> Dimensions;
176   typedef typename internal::remove_const<typename XprType::Scalar>::type Scalar;
177   typedef typename XprType::CoeffReturnType CoeffReturnType;
178   typedef typename PacketType<CoeffReturnType, Device>::type PacketReturnType;
179   static const int PacketSize = PacketType<CoeffReturnType, Device>::size;
180   typedef StorageMemory<CoeffReturnType, Device> Storage;
181   typedef typename Storage::Type EvaluatorPointerType;
182 
183   enum {
184     IsAligned = false,
185     PacketAccess = TensorEvaluator<ArgType, Device>::PacketAccess,
186     BlockAccess = false,
187     PreferBlockAccess = TensorEvaluator<ArgType, Device>::PreferBlockAccess,
188     Layout = TensorEvaluator<ArgType, Device>::Layout,
189     CoordAccess = false,
190     RawAccess = false
191   };
192 
193   //===- Tensor block evaluation strategy (see TensorBlock.h) -------------===//
194   typedef internal::TensorBlockNotImplemented TensorBlock;
195   //===--------------------------------------------------------------------===//
196 
197   EIGEN_STRONG_INLINE TensorEvaluator(const XprType& op, const Device& device) :
198  m_impl(op.expression(), device)
199   {
200     EIGEN_STATIC_ASSERT((NumDims >= 5), YOU_MADE_A_PROGRAMMING_MISTAKE);
201 
202     m_paddingValue = op.padding_value();
203 
204     const typename TensorEvaluator<ArgType, Device>::Dimensions& input_dims = m_impl.dimensions();
205 
206     // Cache a few variables.
207     if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
208       m_inputDepth = input_dims[0];
209       m_inputPlanes = input_dims[1];
210       m_inputRows = input_dims[2];
211       m_inputCols = input_dims[3];
212     } else {
213       m_inputDepth = input_dims[NumInputDims-1];
214       m_inputPlanes = input_dims[NumInputDims-2];
215       m_inputRows = input_dims[NumInputDims-3];
216       m_inputCols = input_dims[NumInputDims-4];
217     }
218 
219     m_plane_strides = op.plane_strides();
220     m_row_strides = op.row_strides();
221     m_col_strides = op.col_strides();
222 
223     // Input strides and effective input/patch size
224     m_in_plane_strides = op.in_plane_strides();
225     m_in_row_strides = op.in_row_strides();
226     m_in_col_strides = op.in_col_strides();
227     m_plane_inflate_strides = op.plane_inflate_strides();
228     m_row_inflate_strides = op.row_inflate_strides();
229     m_col_inflate_strides = op.col_inflate_strides();
230 
231     // The "effective" spatial size after inflating data with zeros.
232     m_input_planes_eff = (m_inputPlanes - 1) * m_plane_inflate_strides + 1;
233     m_input_rows_eff = (m_inputRows - 1) * m_row_inflate_strides + 1;
234     m_input_cols_eff = (m_inputCols - 1) * m_col_inflate_strides + 1;
235     m_patch_planes_eff = op.patch_planes() + (op.patch_planes() - 1) * (m_in_plane_strides - 1);
236     m_patch_rows_eff = op.patch_rows() + (op.patch_rows() - 1) * (m_in_row_strides - 1);
237     m_patch_cols_eff = op.patch_cols() + (op.patch_cols() - 1) * (m_in_col_strides - 1);
238 
239     if (op.padding_explicit()) {
240       m_outputPlanes = numext::ceil((m_input_planes_eff + op.padding_top_z() + op.padding_bottom_z() - m_patch_planes_eff + 1.f) / static_cast<float>(m_plane_strides));
241       m_outputRows = numext::ceil((m_input_rows_eff + op.padding_top() + op.padding_bottom() - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
242       m_outputCols = numext::ceil((m_input_cols_eff + op.padding_left() + op.padding_right() - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
243       m_planePaddingTop = op.padding_top_z();
244       m_rowPaddingTop = op.padding_top();
245       m_colPaddingLeft = op.padding_left();
246     } else {
247       // Computing padding from the type
248       switch (op.padding_type()) {
249         case PADDING_VALID:
250           m_outputPlanes = numext::ceil((m_input_planes_eff - m_patch_planes_eff + 1.f) / static_cast<float>(m_plane_strides));
251           m_outputRows = numext::ceil((m_input_rows_eff - m_patch_rows_eff + 1.f) / static_cast<float>(m_row_strides));
252           m_outputCols = numext::ceil((m_input_cols_eff - m_patch_cols_eff + 1.f) / static_cast<float>(m_col_strides));
253           m_planePaddingTop = 0;
254           m_rowPaddingTop = 0;
255           m_colPaddingLeft = 0;
256           break;
257         case PADDING_SAME: {
258           m_outputPlanes = numext::ceil(m_input_planes_eff / static_cast<float>(m_plane_strides));
259           m_outputRows = numext::ceil(m_input_rows_eff / static_cast<float>(m_row_strides));
260           m_outputCols = numext::ceil(m_input_cols_eff / static_cast<float>(m_col_strides));
261           const Index dz = (m_outputPlanes - 1) * m_plane_strides + m_patch_planes_eff - m_input_planes_eff;
262           const Index dy = (m_outputRows - 1) * m_row_strides + m_patch_rows_eff - m_input_rows_eff;
263           const Index dx = (m_outputCols - 1) * m_col_strides + m_patch_cols_eff - m_input_cols_eff;
264           m_planePaddingTop = dz / 2;
265           m_rowPaddingTop = dy / 2;
266           m_colPaddingLeft = dx / 2;
267           break;
268         }
269         default:
270           eigen_assert(false && "unexpected padding");
271       }
272     }
273     eigen_assert(m_outputRows > 0);
274     eigen_assert(m_outputCols > 0);
275     eigen_assert(m_outputPlanes > 0);
276 
277     // Dimensions for result of extraction.
278     if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
279       // ColMajor
280       // 0: depth
281       // 1: patch_planes
282       // 2: patch_rows
283       // 3: patch_cols
284       // 4: number of patches
285       // 5 and beyond: anything else (such as batch).
286       m_dimensions[0] = input_dims[0];
287       m_dimensions[1] = op.patch_planes();
288       m_dimensions[2] = op.patch_rows();
289       m_dimensions[3] = op.patch_cols();
290       m_dimensions[4] = m_outputPlanes * m_outputRows * m_outputCols;
291       for (int i = 5; i < NumDims; ++i) {
292         m_dimensions[i] = input_dims[i-1];
293       }
294     } else {
295       // RowMajor
296       // NumDims-1: depth
297       // NumDims-2: patch_planes
298       // NumDims-3: patch_rows
299       // NumDims-4: patch_cols
300       // NumDims-5: number of patches
301       // NumDims-6 and beyond: anything else (such as batch).
302       m_dimensions[NumDims-1] = input_dims[NumInputDims-1];
303       m_dimensions[NumDims-2] = op.patch_planes();
304       m_dimensions[NumDims-3] = op.patch_rows();
305       m_dimensions[NumDims-4] = op.patch_cols();
306       m_dimensions[NumDims-5] = m_outputPlanes * m_outputRows * m_outputCols;
307       for (int i = NumDims-6; i >= 0; --i) {
308         m_dimensions[i] = input_dims[i];
309       }
310     }
311 
312     // Strides for the output tensor.
313     if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
314       m_rowStride = m_dimensions[1];
315       m_colStride = m_dimensions[2] * m_rowStride;
316       m_patchStride = m_colStride * m_dimensions[3] * m_dimensions[0];
317       m_otherStride = m_patchStride * m_dimensions[4];
318     } else {
319       m_rowStride = m_dimensions[NumDims-2];
320       m_colStride = m_dimensions[NumDims-3] * m_rowStride;
321       m_patchStride = m_colStride * m_dimensions[NumDims-4] * m_dimensions[NumDims-1];
322       m_otherStride = m_patchStride * m_dimensions[NumDims-5];
323     }
324 
325     // Strides for navigating through the input tensor.
326     m_planeInputStride = m_inputDepth;
327     m_rowInputStride = m_inputDepth * m_inputPlanes;
328     m_colInputStride = m_inputDepth * m_inputRows * m_inputPlanes;
329     m_otherInputStride = m_inputDepth * m_inputRows * m_inputCols * m_inputPlanes;
330 
331     m_outputPlanesRows = m_outputPlanes * m_outputRows;
332 
333     // Fast representations of different variables.
334     m_fastOtherStride = internal::TensorIntDivisor<Index>(m_otherStride);
335 
336     m_fastPatchStride = internal::TensorIntDivisor<Index>(m_patchStride);
337     m_fastColStride = internal::TensorIntDivisor<Index>(m_colStride);
338     m_fastRowStride = internal::TensorIntDivisor<Index>(m_rowStride);
339     m_fastInputRowStride = internal::TensorIntDivisor<Index>(m_row_inflate_strides);
340     m_fastInputColStride = internal::TensorIntDivisor<Index>(m_col_inflate_strides);
341     m_fastInputPlaneStride = internal::TensorIntDivisor<Index>(m_plane_inflate_strides);
342     m_fastInputColsEff = internal::TensorIntDivisor<Index>(m_input_cols_eff);
343     m_fastOutputPlanes = internal::TensorIntDivisor<Index>(m_outputPlanes);
344     m_fastOutputPlanesRows = internal::TensorIntDivisor<Index>(m_outputPlanesRows);
345 
346     if (static_cast<int>(Layout) == static_cast<int>(ColMajor)) {
347       m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[0]);
348     } else {
349       m_fastOutputDepth = internal::TensorIntDivisor<Index>(m_dimensions[NumDims-1]);
350     }
351   }
352 
353   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE const Dimensions& dimensions() const { return m_dimensions; }
354 
355   EIGEN_STRONG_INLINE bool evalSubExprsIfNeeded(EvaluatorPointerType /*data*/) {
356     m_impl.evalSubExprsIfNeeded(NULL);
357     return true;
358   }
359 
360   EIGEN_STRONG_INLINE void cleanup() {
361     m_impl.cleanup();
362   }
363 
364   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE CoeffReturnType coeff(Index index) const
365   {
366     // Patch index corresponding to the passed in index.
367     const Index patchIndex = index / m_fastPatchStride;
368 
369     // Spatial offset within the patch. This has to be translated into 3D
370     // coordinates within the patch.
371     const Index patchOffset = (index - patchIndex * m_patchStride) / m_fastOutputDepth;
372 
373     // Batch, etc.
374     const Index otherIndex = (NumDims == 5) ? 0 : index / m_fastOtherStride;
375     const Index patch3DIndex = (NumDims == 5) ? patchIndex : (index - otherIndex * m_otherStride) / m_fastPatchStride;
376 
377     // Calculate column index in the input original tensor.
378     const Index colIndex = patch3DIndex / m_fastOutputPlanesRows;
379     const Index colOffset = patchOffset / m_fastColStride;
380     const Index inputCol = colIndex * m_col_strides + colOffset * m_in_col_strides - m_colPaddingLeft;
381     const Index origInputCol = (m_col_inflate_strides == 1) ? inputCol : ((inputCol >= 0) ? (inputCol / m_fastInputColStride) : 0);
382     if (inputCol < 0 || inputCol >= m_input_cols_eff ||
383         ((m_col_inflate_strides != 1) && (inputCol != origInputCol * m_col_inflate_strides))) {
384       return Scalar(m_paddingValue);
385     }
386 
387     // Calculate row index in the original input tensor.
388     const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes;
389     const Index rowOffset = (patchOffset - colOffset * m_colStride) / m_fastRowStride;
390     const Index inputRow = rowIndex * m_row_strides + rowOffset * m_in_row_strides - m_rowPaddingTop;
391     const Index origInputRow = (m_row_inflate_strides == 1) ? inputRow : ((inputRow >= 0) ? (inputRow / m_fastInputRowStride) : 0);
392     if (inputRow < 0 || inputRow >= m_input_rows_eff ||
393         ((m_row_inflate_strides != 1) && (inputRow != origInputRow * m_row_inflate_strides))) {
394       return Scalar(m_paddingValue);
395     }
396 
397     // Calculate plane index in the original input tensor.
398     const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex));
399     const Index planeOffset = patchOffset - colOffset * m_colStride - rowOffset * m_rowStride;
400     const Index inputPlane = planeIndex * m_plane_strides + planeOffset * m_in_plane_strides - m_planePaddingTop;
401     const Index origInputPlane = (m_plane_inflate_strides == 1) ? inputPlane : ((inputPlane >= 0) ? (inputPlane / m_fastInputPlaneStride) : 0);
402     if (inputPlane < 0 || inputPlane >= m_input_planes_eff ||
403         ((m_plane_inflate_strides != 1) && (inputPlane != origInputPlane * m_plane_inflate_strides))) {
404       return Scalar(m_paddingValue);
405     }
406 
407     const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
408     const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
409 
410     const Index inputIndex = depth +
411         origInputRow * m_rowInputStride +
412         origInputCol * m_colInputStride +
413         origInputPlane * m_planeInputStride +
414         otherIndex * m_otherInputStride;
415 
416     return m_impl.coeff(inputIndex);
417   }
418 
419   template<int LoadMode>
420   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packet(Index index) const
421   {
422     EIGEN_STATIC_ASSERT((PacketSize > 1), YOU_MADE_A_PROGRAMMING_MISTAKE)
423     eigen_assert(index+PacketSize-1 < dimensions().TotalSize());
424 
425     if (m_in_row_strides != 1 || m_in_col_strides != 1 || m_row_inflate_strides != 1 || m_col_inflate_strides != 1 ||
426         m_in_plane_strides != 1 || m_plane_inflate_strides != 1) {
427       return packetWithPossibleZero(index);
428     }
429 
430     const Index indices[2] = {index, index + PacketSize - 1};
431     const Index patchIndex = indices[0] / m_fastPatchStride;
432     if (patchIndex != indices[1] / m_fastPatchStride) {
433       return packetWithPossibleZero(index);
434     }
435     const Index otherIndex = (NumDims == 5) ? 0 : indices[0] / m_fastOtherStride;
436     eigen_assert(otherIndex == indices[1] / m_fastOtherStride);
437 
438     // Find the offset of the element wrt the location of the first element.
439     const Index patchOffsets[2] = {(indices[0] - patchIndex * m_patchStride) / m_fastOutputDepth,
440                                    (indices[1] - patchIndex * m_patchStride) / m_fastOutputDepth};
441 
442     const Index patch3DIndex = (NumDims == 5) ? patchIndex : (indices[0] - otherIndex * m_otherStride) / m_fastPatchStride;
443     eigen_assert(patch3DIndex == (indices[1] - otherIndex * m_otherStride) / m_fastPatchStride);
444 
445     const Index colIndex = patch3DIndex / m_fastOutputPlanesRows;
446     const Index colOffsets[2] = {
447       patchOffsets[0] / m_fastColStride,
448       patchOffsets[1] / m_fastColStride};
449 
450     // Calculate col indices in the original input tensor.
451     const Index inputCols[2] = {
452       colIndex * m_col_strides + colOffsets[0] - m_colPaddingLeft,
453       colIndex * m_col_strides + colOffsets[1] - m_colPaddingLeft};
454     if (inputCols[1] < 0 || inputCols[0] >= m_inputCols) {
455       return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
456     }
457 
458     if (inputCols[0] != inputCols[1]) {
459       return packetWithPossibleZero(index);
460     }
461 
462     const Index rowIndex = (patch3DIndex - colIndex * m_outputPlanesRows) / m_fastOutputPlanes;
463     const Index rowOffsets[2] = {
464       (patchOffsets[0] - colOffsets[0] * m_colStride) / m_fastRowStride,
465       (patchOffsets[1] - colOffsets[1] * m_colStride) / m_fastRowStride};
466     eigen_assert(rowOffsets[0] <= rowOffsets[1]);
467     // Calculate col indices in the original input tensor.
468     const Index inputRows[2] = {
469       rowIndex * m_row_strides + rowOffsets[0] - m_rowPaddingTop,
470       rowIndex * m_row_strides + rowOffsets[1] - m_rowPaddingTop};
471 
472     if (inputRows[1] < 0 || inputRows[0] >= m_inputRows) {
473       return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
474     }
475 
476     if (inputRows[0] != inputRows[1]) {
477       return packetWithPossibleZero(index);
478     }
479 
480     const Index planeIndex = (patch3DIndex - m_outputPlanes * (colIndex * m_outputRows + rowIndex));
481     const Index planeOffsets[2] = {
482       patchOffsets[0] - colOffsets[0] * m_colStride - rowOffsets[0] * m_rowStride,
483       patchOffsets[1] - colOffsets[1] * m_colStride - rowOffsets[1] * m_rowStride};
484     eigen_assert(planeOffsets[0] <= planeOffsets[1]);
485     const Index inputPlanes[2] = {
486       planeIndex * m_plane_strides + planeOffsets[0] - m_planePaddingTop,
487       planeIndex * m_plane_strides + planeOffsets[1] - m_planePaddingTop};
488 
489     if (inputPlanes[1] < 0 || inputPlanes[0] >= m_inputPlanes) {
490       return internal::pset1<PacketReturnType>(Scalar(m_paddingValue));
491     }
492 
493     if (inputPlanes[0] >= 0 && inputPlanes[1] < m_inputPlanes) {
494       // no padding
495       const int depth_index = static_cast<int>(Layout) == static_cast<int>(ColMajor) ? 0 : NumDims - 1;
496       const Index depth = index - (index / m_fastOutputDepth) * m_dimensions[depth_index];
497       const Index inputIndex = depth +
498           inputRows[0] * m_rowInputStride +
499           inputCols[0] * m_colInputStride +
500           m_planeInputStride * inputPlanes[0] +
501           otherIndex * m_otherInputStride;
502       return m_impl.template packet<Unaligned>(inputIndex);
503     }
504 
505     return packetWithPossibleZero(index);
506   }
507 
508   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorOpCost
509   costPerCoeff(bool vectorized) const {
510     const double compute_cost =
511         10 * TensorOpCost::DivCost<Index>() + 21 * TensorOpCost::MulCost<Index>() +
512         8 * TensorOpCost::AddCost<Index>();
513     return TensorOpCost(0, 0, compute_cost, vectorized, PacketSize);
514   }
515 
516   EIGEN_DEVICE_FUNC EvaluatorPointerType data() const { return NULL; }
517 
518   const TensorEvaluator<ArgType, Device>& impl() const { return m_impl; }
519 
520 
521   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planePaddingTop() const { return m_planePaddingTop; }
522   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowPaddingTop() const { return m_rowPaddingTop; }
523   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colPaddingLeft() const { return m_colPaddingLeft; }
524   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputPlanes() const { return m_outputPlanes; }
525   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputRows() const { return m_outputRows; }
526   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index outputCols() const { return m_outputCols; }
527   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userPlaneStride() const { return m_plane_strides; }
528   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userRowStride() const { return m_row_strides; }
529   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userColStride() const { return m_col_strides; }
530   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInPlaneStride() const { return m_in_plane_strides; }
531   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInRowStride() const { return m_in_row_strides; }
532   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index userInColStride() const { return m_in_col_strides; }
533   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index planeInflateStride() const { return m_plane_inflate_strides; }
534   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index rowInflateStride() const { return m_row_inflate_strides; }
535   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Index colInflateStride() const { return m_col_inflate_strides; }
536 
537 #ifdef EIGEN_USE_SYCL
538   // binding placeholder accessors to a command group handler for SYCL
539   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE void bind(cl::sycl::handler &cgh) const {
540     m_impl.bind(cgh);
541   }
542 #endif
543  protected:
544   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE PacketReturnType packetWithPossibleZero(Index index) const
545   {
546     EIGEN_ALIGN_MAX typename internal::remove_const<CoeffReturnType>::type values[PacketSize];
547     EIGEN_UNROLL_LOOP
548     for (int i = 0; i < PacketSize; ++i) {
549       values[i] = coeff(index+i);
550     }
551     PacketReturnType rslt = internal::pload<PacketReturnType>(values);
552     return rslt;
553   }
554 
555   Dimensions m_dimensions;
556 
557   // Parameters passed to the constructor.
558   Index m_plane_strides;
559   Index m_row_strides;
560   Index m_col_strides;
561 
562   Index m_outputPlanes;
563   Index m_outputRows;
564   Index m_outputCols;
565 
566   Index m_planePaddingTop;
567   Index m_rowPaddingTop;
568   Index m_colPaddingLeft;
569 
570   Index m_in_plane_strides;
571   Index m_in_row_strides;
572   Index m_in_col_strides;
573 
574   Index m_plane_inflate_strides;
575   Index m_row_inflate_strides;
576   Index m_col_inflate_strides;
577 
578   // Cached input size.
579   Index m_inputDepth;
580   Index m_inputPlanes;
581   Index m_inputRows;
582   Index m_inputCols;
583 
584   // Other cached variables.
585   Index m_outputPlanesRows;
586 
587   // Effective input/patch post-inflation size.
588   Index m_input_planes_eff;
589   Index m_input_rows_eff;
590   Index m_input_cols_eff;
591   Index m_patch_planes_eff;
592   Index m_patch_rows_eff;
593   Index m_patch_cols_eff;
594 
595   // Strides for the output tensor.
596   Index m_otherStride;
597   Index m_patchStride;
598   Index m_rowStride;
599   Index m_colStride;
600 
601   // Strides for the input tensor.
602   Index m_planeInputStride;
603   Index m_rowInputStride;
604   Index m_colInputStride;
605   Index m_otherInputStride;
606 
607   internal::TensorIntDivisor<Index> m_fastOtherStride;
608   internal::TensorIntDivisor<Index> m_fastPatchStride;
609   internal::TensorIntDivisor<Index> m_fastColStride;
610   internal::TensorIntDivisor<Index> m_fastRowStride;
611   internal::TensorIntDivisor<Index> m_fastInputPlaneStride;
612   internal::TensorIntDivisor<Index> m_fastInputRowStride;
613   internal::TensorIntDivisor<Index> m_fastInputColStride;
614   internal::TensorIntDivisor<Index> m_fastInputColsEff;
615   internal::TensorIntDivisor<Index> m_fastOutputPlanesRows;
616   internal::TensorIntDivisor<Index> m_fastOutputPlanes;
617   internal::TensorIntDivisor<Index> m_fastOutputDepth;
618 
619   Scalar m_paddingValue;
620 
621   TensorEvaluator<ArgType, Device> m_impl;
622 
623 
624 };
625 
626 
627 } // end namespace Eigen
628 
629 #endif // EIGEN_CXX11_TENSOR_TENSOR_VOLUME_PATCH_H
630