• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2011 Benoit Jacob <jacob.benoit.1@gmail.com>
5 // Copyright (C) 2011-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6 // Copyright (C) 2011-2012 Jitse Niesen <jitse@maths.leeds.ac.uk>
7 //
8 // This Source Code Form is subject to the terms of the Mozilla
9 // Public License v. 2.0. If a copy of the MPL was not distributed
10 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
11 
12 
13 #ifndef EIGEN_COREEVALUATORS_H
14 #define EIGEN_COREEVALUATORS_H
15 
16 namespace Eigen {
17 
18 namespace internal {
19 
20 // This class returns the evaluator kind from the expression storage kind.
21 // Default assumes index based accessors
22 template<typename StorageKind>
23 struct storage_kind_to_evaluator_kind {
24   typedef IndexBased Kind;
25 };
26 
27 // This class returns the evaluator shape from the expression storage kind.
28 // It can be Dense, Sparse, Triangular, Diagonal, SelfAdjoint, Band, etc.
29 template<typename StorageKind> struct storage_kind_to_shape;
30 
31 template<> struct storage_kind_to_shape<Dense>                  { typedef DenseShape Shape;           };
32 template<> struct storage_kind_to_shape<SolverStorage>          { typedef SolverShape Shape;           };
33 template<> struct storage_kind_to_shape<PermutationStorage>     { typedef PermutationShape Shape;     };
34 template<> struct storage_kind_to_shape<TranspositionsStorage>  { typedef TranspositionsShape Shape;  };
35 
36 // Evaluators have to be specialized with respect to various criteria such as:
37 //  - storage/structure/shape
38 //  - scalar type
39 //  - etc.
40 // Therefore, we need specialization of evaluator providing additional template arguments for each kind of evaluators.
41 // We currently distinguish the following kind of evaluators:
42 // - unary_evaluator    for expressions taking only one arguments (CwiseUnaryOp, CwiseUnaryView, Transpose, MatrixWrapper, ArrayWrapper, Reverse, Replicate)
43 // - binary_evaluator   for expression taking two arguments (CwiseBinaryOp)
44 // - ternary_evaluator   for expression taking three arguments (CwiseTernaryOp)
45 // - product_evaluator  for linear algebra products (Product); special case of binary_evaluator because it requires additional tags for dispatching.
46 // - mapbase_evaluator  for Map, Block, Ref
47 // - block_evaluator    for Block (special dispatching to a mapbase_evaluator or unary_evaluator)
48 
49 template< typename T,
50           typename Arg1Kind   = typename evaluator_traits<typename T::Arg1>::Kind,
51           typename Arg2Kind   = typename evaluator_traits<typename T::Arg2>::Kind,
52           typename Arg3Kind   = typename evaluator_traits<typename T::Arg3>::Kind,
53           typename Arg1Scalar = typename traits<typename T::Arg1>::Scalar,
54           typename Arg2Scalar = typename traits<typename T::Arg2>::Scalar,
55           typename Arg3Scalar = typename traits<typename T::Arg3>::Scalar> struct ternary_evaluator;
56 
57 template< typename T,
58           typename LhsKind   = typename evaluator_traits<typename T::Lhs>::Kind,
59           typename RhsKind   = typename evaluator_traits<typename T::Rhs>::Kind,
60           typename LhsScalar = typename traits<typename T::Lhs>::Scalar,
61           typename RhsScalar = typename traits<typename T::Rhs>::Scalar> struct binary_evaluator;
62 
63 template< typename T,
64           typename Kind   = typename evaluator_traits<typename T::NestedExpression>::Kind,
65           typename Scalar = typename T::Scalar> struct unary_evaluator;
66 
67 // evaluator_traits<T> contains traits for evaluator<T>
68 
69 template<typename T>
70 struct evaluator_traits_base
71 {
72   // by default, get evaluator kind and shape from storage
73   typedef typename storage_kind_to_evaluator_kind<typename traits<T>::StorageKind>::Kind Kind;
74   typedef typename storage_kind_to_shape<typename traits<T>::StorageKind>::Shape Shape;
75 };
76 
77 // Default evaluator traits
78 template<typename T>
79 struct evaluator_traits : public evaluator_traits_base<T>
80 {
81 };
82 
83 template<typename T, typename Shape = typename evaluator_traits<T>::Shape >
84 struct evaluator_assume_aliasing {
85   static const bool value = false;
86 };
87 
88 // By default, we assume a unary expression:
89 template<typename T>
90 struct evaluator : public unary_evaluator<T>
91 {
92   typedef unary_evaluator<T> Base;
93   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
94   explicit evaluator(const T& xpr) : Base(xpr) {}
95 };
96 
97 
98 // TODO: Think about const-correctness
99 template<typename T>
100 struct evaluator<const T>
101   : evaluator<T>
102 {
103   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
104   explicit evaluator(const T& xpr) : evaluator<T>(xpr) {}
105 };
106 
107 // ---------- base class for all evaluators ----------
108 
109 template<typename ExpressionType>
110 struct evaluator_base
111 {
112   // TODO that's not very nice to have to propagate all these traits. They are currently only needed to handle outer,inner indices.
113   typedef traits<ExpressionType> ExpressionTraits;
114 
115   enum {
116     Alignment = 0
117   };
118   // noncopyable:
119   // Don't make this class inherit noncopyable as this kills EBO (Empty Base Optimization)
120   // and make complex evaluator much larger than then should do.
121   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE evaluator_base() {}
122   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE ~evaluator_base() {}
123 private:
124   EIGEN_DEVICE_FUNC evaluator_base(const evaluator_base&);
125   EIGEN_DEVICE_FUNC const evaluator_base& operator=(const evaluator_base&);
126 };
127 
128 // -------------------- Matrix and Array --------------------
129 //
130 // evaluator<PlainObjectBase> is a common base class for the
131 // Matrix and Array evaluators.
132 // Here we directly specialize evaluator. This is not really a unary expression, and it is, by definition, dense,
133 // so no need for more sophisticated dispatching.
134 
135 // this helper permits to completely eliminate m_outerStride if it is known at compiletime.
136 template<typename Scalar,int OuterStride> class plainobjectbase_evaluator_data {
137 public:
138   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
139   plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr)
140   {
141 #ifndef EIGEN_INTERNAL_DEBUGGING
142     EIGEN_UNUSED_VARIABLE(outerStride);
143 #endif
144     eigen_internal_assert(outerStride==OuterStride);
145   }
146   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
147   Index outerStride() const EIGEN_NOEXCEPT { return OuterStride; }
148   const Scalar *data;
149 };
150 
151 template<typename Scalar> class plainobjectbase_evaluator_data<Scalar,Dynamic> {
152 public:
153   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
154   plainobjectbase_evaluator_data(const Scalar* ptr, Index outerStride) : data(ptr), m_outerStride(outerStride) {}
155   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
156   Index outerStride() const { return m_outerStride; }
157   const Scalar *data;
158 protected:
159   Index m_outerStride;
160 };
161 
162 template<typename Derived>
163 struct evaluator<PlainObjectBase<Derived> >
164   : evaluator_base<Derived>
165 {
166   typedef PlainObjectBase<Derived> PlainObjectType;
167   typedef typename PlainObjectType::Scalar Scalar;
168   typedef typename PlainObjectType::CoeffReturnType CoeffReturnType;
169 
170   enum {
171     IsRowMajor = PlainObjectType::IsRowMajor,
172     IsVectorAtCompileTime = PlainObjectType::IsVectorAtCompileTime,
173     RowsAtCompileTime = PlainObjectType::RowsAtCompileTime,
174     ColsAtCompileTime = PlainObjectType::ColsAtCompileTime,
175 
176     CoeffReadCost = NumTraits<Scalar>::ReadCost,
177     Flags = traits<Derived>::EvaluatorFlags,
178     Alignment = traits<Derived>::Alignment
179   };
180   enum {
181     // We do not need to know the outer stride for vectors
182     OuterStrideAtCompileTime = IsVectorAtCompileTime  ? 0
183                                                       : int(IsRowMajor) ? ColsAtCompileTime
184                                                                         : RowsAtCompileTime
185   };
186 
187   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
188   evaluator()
189     : m_d(0,OuterStrideAtCompileTime)
190   {
191     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
192   }
193 
194   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
195   explicit evaluator(const PlainObjectType& m)
196     : m_d(m.data(),IsVectorAtCompileTime ? 0 : m.outerStride())
197   {
198     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
199   }
200 
201   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
202   CoeffReturnType coeff(Index row, Index col) const
203   {
204     if (IsRowMajor)
205       return m_d.data[row * m_d.outerStride() + col];
206     else
207       return m_d.data[row + col * m_d.outerStride()];
208   }
209 
210   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
211   CoeffReturnType coeff(Index index) const
212   {
213     return m_d.data[index];
214   }
215 
216   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
217   Scalar& coeffRef(Index row, Index col)
218   {
219     if (IsRowMajor)
220       return const_cast<Scalar*>(m_d.data)[row * m_d.outerStride() + col];
221     else
222       return const_cast<Scalar*>(m_d.data)[row + col * m_d.outerStride()];
223   }
224 
225   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
226   Scalar& coeffRef(Index index)
227   {
228     return const_cast<Scalar*>(m_d.data)[index];
229   }
230 
231   template<int LoadMode, typename PacketType>
232   EIGEN_STRONG_INLINE
233   PacketType packet(Index row, Index col) const
234   {
235     if (IsRowMajor)
236       return ploadt<PacketType, LoadMode>(m_d.data + row * m_d.outerStride() + col);
237     else
238       return ploadt<PacketType, LoadMode>(m_d.data + row + col * m_d.outerStride());
239   }
240 
241   template<int LoadMode, typename PacketType>
242   EIGEN_STRONG_INLINE
243   PacketType packet(Index index) const
244   {
245     return ploadt<PacketType, LoadMode>(m_d.data + index);
246   }
247 
248   template<int StoreMode,typename PacketType>
249   EIGEN_STRONG_INLINE
250   void writePacket(Index row, Index col, const PacketType& x)
251   {
252     if (IsRowMajor)
253       return pstoret<Scalar, PacketType, StoreMode>
254 	            (const_cast<Scalar*>(m_d.data) + row * m_d.outerStride() + col, x);
255     else
256       return pstoret<Scalar, PacketType, StoreMode>
257                     (const_cast<Scalar*>(m_d.data) + row + col * m_d.outerStride(), x);
258   }
259 
260   template<int StoreMode, typename PacketType>
261   EIGEN_STRONG_INLINE
262   void writePacket(Index index, const PacketType& x)
263   {
264     return pstoret<Scalar, PacketType, StoreMode>(const_cast<Scalar*>(m_d.data) + index, x);
265   }
266 
267 protected:
268 
269   plainobjectbase_evaluator_data<Scalar,OuterStrideAtCompileTime> m_d;
270 };
271 
272 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
273 struct evaluator<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
274   : evaluator<PlainObjectBase<Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
275 {
276   typedef Matrix<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
277 
278   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
279   evaluator() {}
280 
281   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
282   explicit evaluator(const XprType& m)
283     : evaluator<PlainObjectBase<XprType> >(m)
284   { }
285 };
286 
287 template<typename Scalar, int Rows, int Cols, int Options, int MaxRows, int MaxCols>
288 struct evaluator<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> >
289   : evaluator<PlainObjectBase<Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> > >
290 {
291   typedef Array<Scalar, Rows, Cols, Options, MaxRows, MaxCols> XprType;
292 
293   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
294   evaluator() {}
295 
296   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
297   explicit evaluator(const XprType& m)
298     : evaluator<PlainObjectBase<XprType> >(m)
299   { }
300 };
301 
302 // -------------------- Transpose --------------------
303 
304 template<typename ArgType>
305 struct unary_evaluator<Transpose<ArgType>, IndexBased>
306   : evaluator_base<Transpose<ArgType> >
307 {
308   typedef Transpose<ArgType> XprType;
309 
310   enum {
311     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
312     Flags = evaluator<ArgType>::Flags ^ RowMajorBit,
313     Alignment = evaluator<ArgType>::Alignment
314   };
315 
316   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
317   explicit unary_evaluator(const XprType& t) : m_argImpl(t.nestedExpression()) {}
318 
319   typedef typename XprType::Scalar Scalar;
320   typedef typename XprType::CoeffReturnType CoeffReturnType;
321 
322   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
323   CoeffReturnType coeff(Index row, Index col) const
324   {
325     return m_argImpl.coeff(col, row);
326   }
327 
328   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
329   CoeffReturnType coeff(Index index) const
330   {
331     return m_argImpl.coeff(index);
332   }
333 
334   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
335   Scalar& coeffRef(Index row, Index col)
336   {
337     return m_argImpl.coeffRef(col, row);
338   }
339 
340   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
341   typename XprType::Scalar& coeffRef(Index index)
342   {
343     return m_argImpl.coeffRef(index);
344   }
345 
346   template<int LoadMode, typename PacketType>
347   EIGEN_STRONG_INLINE
348   PacketType packet(Index row, Index col) const
349   {
350     return m_argImpl.template packet<LoadMode,PacketType>(col, row);
351   }
352 
353   template<int LoadMode, typename PacketType>
354   EIGEN_STRONG_INLINE
355   PacketType packet(Index index) const
356   {
357     return m_argImpl.template packet<LoadMode,PacketType>(index);
358   }
359 
360   template<int StoreMode, typename PacketType>
361   EIGEN_STRONG_INLINE
362   void writePacket(Index row, Index col, const PacketType& x)
363   {
364     m_argImpl.template writePacket<StoreMode,PacketType>(col, row, x);
365   }
366 
367   template<int StoreMode, typename PacketType>
368   EIGEN_STRONG_INLINE
369   void writePacket(Index index, const PacketType& x)
370   {
371     m_argImpl.template writePacket<StoreMode,PacketType>(index, x);
372   }
373 
374 protected:
375   evaluator<ArgType> m_argImpl;
376 };
377 
378 // -------------------- CwiseNullaryOp --------------------
379 // Like Matrix and Array, this is not really a unary expression, so we directly specialize evaluator.
380 // Likewise, there is not need to more sophisticated dispatching here.
381 
382 template<typename Scalar,typename NullaryOp,
383          bool has_nullary = has_nullary_operator<NullaryOp>::value,
384          bool has_unary   = has_unary_operator<NullaryOp>::value,
385          bool has_binary  = has_binary_operator<NullaryOp>::value>
386 struct nullary_wrapper
387 {
388   template <typename IndexType>
389   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const { return op(i,j); }
390   template <typename IndexType>
391   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
392 
393   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const { return op.template packetOp<T>(i,j); }
394   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
395 };
396 
397 template<typename Scalar,typename NullaryOp>
398 struct nullary_wrapper<Scalar,NullaryOp,true,false,false>
399 {
400   template <typename IndexType>
401   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType=0, IndexType=0) const { return op(); }
402   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType=0, IndexType=0) const { return op.template packetOp<T>(); }
403 };
404 
405 template<typename Scalar,typename NullaryOp>
406 struct nullary_wrapper<Scalar,NullaryOp,false,false,true>
407 {
408   template <typename IndexType>
409   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j=0) const { return op(i,j); }
410   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j=0) const { return op.template packetOp<T>(i,j); }
411 };
412 
413 // We need the following specialization for vector-only functors assigned to a runtime vector,
414 // for instance, using linspace and assigning a RowVectorXd to a MatrixXd or even a row of a MatrixXd.
415 // In this case, i==0 and j is used for the actual iteration.
416 template<typename Scalar,typename NullaryOp>
417 struct nullary_wrapper<Scalar,NullaryOp,false,true,false>
418 {
419   template <typename IndexType>
420   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
421     eigen_assert(i==0 || j==0);
422     return op(i+j);
423   }
424   template <typename T, typename IndexType> EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
425     eigen_assert(i==0 || j==0);
426     return op.template packetOp<T>(i+j);
427   }
428 
429   template <typename IndexType>
430   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const { return op(i); }
431   template <typename T, typename IndexType>
432   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const { return op.template packetOp<T>(i); }
433 };
434 
435 template<typename Scalar,typename NullaryOp>
436 struct nullary_wrapper<Scalar,NullaryOp,false,false,false> {};
437 
438 #if 0 && EIGEN_COMP_MSVC>0
439 // Disable this ugly workaround. This is now handled in traits<Ref>::match,
440 // but this piece of code might still become handly if some other weird compilation
441 // erros pop up again.
442 
443 // MSVC exhibits a weird compilation error when
444 // compiling:
445 //    Eigen::MatrixXf A = MatrixXf::Random(3,3);
446 //    Ref<const MatrixXf> R = 2.f*A;
447 // and that has_*ary_operator<scalar_constant_op<float>> have not been instantiated yet.
448 // The "problem" is that evaluator<2.f*A> is instantiated by traits<Ref>::match<2.f*A>
449 // and at that time has_*ary_operator<T> returns true regardless of T.
450 // Then nullary_wrapper is badly instantiated as nullary_wrapper<.,.,true,true,true>.
451 // The trick is thus to defer the proper instantiation of nullary_wrapper when coeff(),
452 // and packet() are really instantiated as implemented below:
453 
454 // This is a simple wrapper around Index to enforce the re-instantiation of
455 // has_*ary_operator when needed.
456 template<typename T> struct nullary_wrapper_workaround_msvc {
457   nullary_wrapper_workaround_msvc(const T&);
458   operator T()const;
459 };
460 
461 template<typename Scalar,typename NullaryOp>
462 struct nullary_wrapper<Scalar,NullaryOp,true,true,true>
463 {
464   template <typename IndexType>
465   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i, IndexType j) const {
466     return nullary_wrapper<Scalar,NullaryOp,
467     has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
468     has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
469     has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i,j);
470   }
471   template <typename IndexType>
472   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE Scalar operator()(const NullaryOp& op, IndexType i) const {
473     return nullary_wrapper<Scalar,NullaryOp,
474     has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
475     has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
476     has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().operator()(op,i);
477   }
478 
479   template <typename T, typename IndexType>
480   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i, IndexType j) const {
481     return nullary_wrapper<Scalar,NullaryOp,
482     has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
483     has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
484     has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i,j);
485   }
486   template <typename T, typename IndexType>
487   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T packetOp(const NullaryOp& op, IndexType i) const {
488     return nullary_wrapper<Scalar,NullaryOp,
489     has_nullary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
490     has_unary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value,
491     has_binary_operator<NullaryOp,nullary_wrapper_workaround_msvc<IndexType> >::value>().template packetOp<T>(op,i);
492   }
493 };
494 #endif // MSVC workaround
495 
496 template<typename NullaryOp, typename PlainObjectType>
497 struct evaluator<CwiseNullaryOp<NullaryOp,PlainObjectType> >
498   : evaluator_base<CwiseNullaryOp<NullaryOp,PlainObjectType> >
499 {
500   typedef CwiseNullaryOp<NullaryOp,PlainObjectType> XprType;
501   typedef typename internal::remove_all<PlainObjectType>::type PlainObjectTypeCleaned;
502 
503   enum {
504     CoeffReadCost = internal::functor_traits<NullaryOp>::Cost,
505 
506     Flags = (evaluator<PlainObjectTypeCleaned>::Flags
507           &  (  HereditaryBits
508               | (functor_has_linear_access<NullaryOp>::ret  ? LinearAccessBit : 0)
509               | (functor_traits<NullaryOp>::PacketAccess    ? PacketAccessBit : 0)))
510           | (functor_traits<NullaryOp>::IsRepeatable ? 0 : EvalBeforeNestingBit),
511     Alignment = AlignedMax
512   };
513 
514   EIGEN_DEVICE_FUNC explicit evaluator(const XprType& n)
515     : m_functor(n.functor()), m_wrapper()
516   {
517     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
518   }
519 
520   typedef typename XprType::CoeffReturnType CoeffReturnType;
521 
522   template <typename IndexType>
523   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
524   CoeffReturnType coeff(IndexType row, IndexType col) const
525   {
526     return m_wrapper(m_functor, row, col);
527   }
528 
529   template <typename IndexType>
530   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
531   CoeffReturnType coeff(IndexType index) const
532   {
533     return m_wrapper(m_functor,index);
534   }
535 
536   template<int LoadMode, typename PacketType, typename IndexType>
537   EIGEN_STRONG_INLINE
538   PacketType packet(IndexType row, IndexType col) const
539   {
540     return m_wrapper.template packetOp<PacketType>(m_functor, row, col);
541   }
542 
543   template<int LoadMode, typename PacketType, typename IndexType>
544   EIGEN_STRONG_INLINE
545   PacketType packet(IndexType index) const
546   {
547     return m_wrapper.template packetOp<PacketType>(m_functor, index);
548   }
549 
550 protected:
551   const NullaryOp m_functor;
552   const internal::nullary_wrapper<CoeffReturnType,NullaryOp> m_wrapper;
553 };
554 
555 // -------------------- CwiseUnaryOp --------------------
556 
557 template<typename UnaryOp, typename ArgType>
558 struct unary_evaluator<CwiseUnaryOp<UnaryOp, ArgType>, IndexBased >
559   : evaluator_base<CwiseUnaryOp<UnaryOp, ArgType> >
560 {
561   typedef CwiseUnaryOp<UnaryOp, ArgType> XprType;
562 
563   enum {
564     CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
565 
566     Flags = evaluator<ArgType>::Flags
567           & (HereditaryBits | LinearAccessBit | (functor_traits<UnaryOp>::PacketAccess ? PacketAccessBit : 0)),
568     Alignment = evaluator<ArgType>::Alignment
569   };
570 
571   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
572   explicit unary_evaluator(const XprType& op) : m_d(op)
573   {
574     EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
575     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
576   }
577 
578   typedef typename XprType::CoeffReturnType CoeffReturnType;
579 
580   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
581   CoeffReturnType coeff(Index row, Index col) const
582   {
583     return m_d.func()(m_d.argImpl.coeff(row, col));
584   }
585 
586   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
587   CoeffReturnType coeff(Index index) const
588   {
589     return m_d.func()(m_d.argImpl.coeff(index));
590   }
591 
592   template<int LoadMode, typename PacketType>
593   EIGEN_STRONG_INLINE
594   PacketType packet(Index row, Index col) const
595   {
596     return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(row, col));
597   }
598 
599   template<int LoadMode, typename PacketType>
600   EIGEN_STRONG_INLINE
601   PacketType packet(Index index) const
602   {
603     return m_d.func().packetOp(m_d.argImpl.template packet<LoadMode, PacketType>(index));
604   }
605 
606 protected:
607 
608   // this helper permits to completely eliminate the functor if it is empty
609   struct Data
610   {
611     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
612     Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
613     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
614     const UnaryOp& func() const { return op; }
615     UnaryOp op;
616     evaluator<ArgType> argImpl;
617   };
618 
619   Data m_d;
620 };
621 
622 // -------------------- CwiseTernaryOp --------------------
623 
624 // this is a ternary expression
625 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
626 struct evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
627   : public ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
628 {
629   typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
630   typedef ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> > Base;
631 
632   EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr) : Base(xpr) {}
633 };
634 
635 template<typename TernaryOp, typename Arg1, typename Arg2, typename Arg3>
636 struct ternary_evaluator<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3>, IndexBased, IndexBased>
637   : evaluator_base<CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> >
638 {
639   typedef CwiseTernaryOp<TernaryOp, Arg1, Arg2, Arg3> XprType;
640 
641   enum {
642     CoeffReadCost = int(evaluator<Arg1>::CoeffReadCost) + int(evaluator<Arg2>::CoeffReadCost) + int(evaluator<Arg3>::CoeffReadCost) + int(functor_traits<TernaryOp>::Cost),
643 
644     Arg1Flags = evaluator<Arg1>::Flags,
645     Arg2Flags = evaluator<Arg2>::Flags,
646     Arg3Flags = evaluator<Arg3>::Flags,
647     SameType = is_same<typename Arg1::Scalar,typename Arg2::Scalar>::value && is_same<typename Arg1::Scalar,typename Arg3::Scalar>::value,
648     StorageOrdersAgree = (int(Arg1Flags)&RowMajorBit)==(int(Arg2Flags)&RowMajorBit) && (int(Arg1Flags)&RowMajorBit)==(int(Arg3Flags)&RowMajorBit),
649     Flags0 = (int(Arg1Flags) | int(Arg2Flags) | int(Arg3Flags)) & (
650         HereditaryBits
651         | (int(Arg1Flags) & int(Arg2Flags) & int(Arg3Flags) &
652            ( (StorageOrdersAgree ? LinearAccessBit : 0)
653            | (functor_traits<TernaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
654            )
655         )
656      ),
657     Flags = (Flags0 & ~RowMajorBit) | (Arg1Flags & RowMajorBit),
658     Alignment = EIGEN_PLAIN_ENUM_MIN(
659         EIGEN_PLAIN_ENUM_MIN(evaluator<Arg1>::Alignment, evaluator<Arg2>::Alignment),
660         evaluator<Arg3>::Alignment)
661   };
662 
663   EIGEN_DEVICE_FUNC explicit ternary_evaluator(const XprType& xpr) : m_d(xpr)
664   {
665     EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<TernaryOp>::Cost);
666     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
667   }
668 
669   typedef typename XprType::CoeffReturnType CoeffReturnType;
670 
671   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
672   CoeffReturnType coeff(Index row, Index col) const
673   {
674     return m_d.func()(m_d.arg1Impl.coeff(row, col), m_d.arg2Impl.coeff(row, col), m_d.arg3Impl.coeff(row, col));
675   }
676 
677   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
678   CoeffReturnType coeff(Index index) const
679   {
680     return m_d.func()(m_d.arg1Impl.coeff(index), m_d.arg2Impl.coeff(index), m_d.arg3Impl.coeff(index));
681   }
682 
683   template<int LoadMode, typename PacketType>
684   EIGEN_STRONG_INLINE
685   PacketType packet(Index row, Index col) const
686   {
687     return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(row, col),
688                                m_d.arg2Impl.template packet<LoadMode,PacketType>(row, col),
689                                m_d.arg3Impl.template packet<LoadMode,PacketType>(row, col));
690   }
691 
692   template<int LoadMode, typename PacketType>
693   EIGEN_STRONG_INLINE
694   PacketType packet(Index index) const
695   {
696     return m_d.func().packetOp(m_d.arg1Impl.template packet<LoadMode,PacketType>(index),
697                                m_d.arg2Impl.template packet<LoadMode,PacketType>(index),
698                                m_d.arg3Impl.template packet<LoadMode,PacketType>(index));
699   }
700 
701 protected:
702   // this helper permits to completely eliminate the functor if it is empty
703   struct Data
704   {
705     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
706     Data(const XprType& xpr) : op(xpr.functor()), arg1Impl(xpr.arg1()), arg2Impl(xpr.arg2()), arg3Impl(xpr.arg3()) {}
707     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
708     const TernaryOp& func() const { return op; }
709     TernaryOp op;
710     evaluator<Arg1> arg1Impl;
711     evaluator<Arg2> arg2Impl;
712     evaluator<Arg3> arg3Impl;
713   };
714 
715   Data m_d;
716 };
717 
718 // -------------------- CwiseBinaryOp --------------------
719 
720 // this is a binary expression
721 template<typename BinaryOp, typename Lhs, typename Rhs>
722 struct evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
723   : public binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
724 {
725   typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
726   typedef binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs> > Base;
727 
728   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
729   explicit evaluator(const XprType& xpr) : Base(xpr) {}
730 };
731 
732 template<typename BinaryOp, typename Lhs, typename Rhs>
733 struct binary_evaluator<CwiseBinaryOp<BinaryOp, Lhs, Rhs>, IndexBased, IndexBased>
734   : evaluator_base<CwiseBinaryOp<BinaryOp, Lhs, Rhs> >
735 {
736   typedef CwiseBinaryOp<BinaryOp, Lhs, Rhs> XprType;
737 
738   enum {
739     CoeffReadCost = int(evaluator<Lhs>::CoeffReadCost) + int(evaluator<Rhs>::CoeffReadCost) + int(functor_traits<BinaryOp>::Cost),
740 
741     LhsFlags = evaluator<Lhs>::Flags,
742     RhsFlags = evaluator<Rhs>::Flags,
743     SameType = is_same<typename Lhs::Scalar,typename Rhs::Scalar>::value,
744     StorageOrdersAgree = (int(LhsFlags)&RowMajorBit)==(int(RhsFlags)&RowMajorBit),
745     Flags0 = (int(LhsFlags) | int(RhsFlags)) & (
746         HereditaryBits
747       | (int(LhsFlags) & int(RhsFlags) &
748            ( (StorageOrdersAgree ? LinearAccessBit : 0)
749            | (functor_traits<BinaryOp>::PacketAccess && StorageOrdersAgree && SameType ? PacketAccessBit : 0)
750            )
751         )
752      ),
753     Flags = (Flags0 & ~RowMajorBit) | (LhsFlags & RowMajorBit),
754     Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<Lhs>::Alignment,evaluator<Rhs>::Alignment)
755   };
756 
757   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
758   explicit binary_evaluator(const XprType& xpr) : m_d(xpr)
759   {
760     EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<BinaryOp>::Cost);
761     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
762   }
763 
764   typedef typename XprType::CoeffReturnType CoeffReturnType;
765 
766   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
767   CoeffReturnType coeff(Index row, Index col) const
768   {
769     return m_d.func()(m_d.lhsImpl.coeff(row, col), m_d.rhsImpl.coeff(row, col));
770   }
771 
772   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
773   CoeffReturnType coeff(Index index) const
774   {
775     return m_d.func()(m_d.lhsImpl.coeff(index), m_d.rhsImpl.coeff(index));
776   }
777 
778   template<int LoadMode, typename PacketType>
779   EIGEN_STRONG_INLINE
780   PacketType packet(Index row, Index col) const
781   {
782     return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(row, col),
783                                m_d.rhsImpl.template packet<LoadMode,PacketType>(row, col));
784   }
785 
786   template<int LoadMode, typename PacketType>
787   EIGEN_STRONG_INLINE
788   PacketType packet(Index index) const
789   {
790     return m_d.func().packetOp(m_d.lhsImpl.template packet<LoadMode,PacketType>(index),
791                                m_d.rhsImpl.template packet<LoadMode,PacketType>(index));
792   }
793 
794 protected:
795 
796   // this helper permits to completely eliminate the functor if it is empty
797   struct Data
798   {
799     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
800     Data(const XprType& xpr) : op(xpr.functor()), lhsImpl(xpr.lhs()), rhsImpl(xpr.rhs()) {}
801     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
802     const BinaryOp& func() const { return op; }
803     BinaryOp op;
804     evaluator<Lhs> lhsImpl;
805     evaluator<Rhs> rhsImpl;
806   };
807 
808   Data m_d;
809 };
810 
811 // -------------------- CwiseUnaryView --------------------
812 
813 template<typename UnaryOp, typename ArgType>
814 struct unary_evaluator<CwiseUnaryView<UnaryOp, ArgType>, IndexBased>
815   : evaluator_base<CwiseUnaryView<UnaryOp, ArgType> >
816 {
817   typedef CwiseUnaryView<UnaryOp, ArgType> XprType;
818 
819   enum {
820     CoeffReadCost = int(evaluator<ArgType>::CoeffReadCost) + int(functor_traits<UnaryOp>::Cost),
821 
822     Flags = (evaluator<ArgType>::Flags & (HereditaryBits | LinearAccessBit | DirectAccessBit)),
823 
824     Alignment = 0 // FIXME it is not very clear why alignment is necessarily lost...
825   };
826 
827   EIGEN_DEVICE_FUNC explicit unary_evaluator(const XprType& op) : m_d(op)
828   {
829     EIGEN_INTERNAL_CHECK_COST_VALUE(functor_traits<UnaryOp>::Cost);
830     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
831   }
832 
833   typedef typename XprType::Scalar Scalar;
834   typedef typename XprType::CoeffReturnType CoeffReturnType;
835 
836   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
837   CoeffReturnType coeff(Index row, Index col) const
838   {
839     return m_d.func()(m_d.argImpl.coeff(row, col));
840   }
841 
842   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
843   CoeffReturnType coeff(Index index) const
844   {
845     return m_d.func()(m_d.argImpl.coeff(index));
846   }
847 
848   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
849   Scalar& coeffRef(Index row, Index col)
850   {
851     return m_d.func()(m_d.argImpl.coeffRef(row, col));
852   }
853 
854   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
855   Scalar& coeffRef(Index index)
856   {
857     return m_d.func()(m_d.argImpl.coeffRef(index));
858   }
859 
860 protected:
861 
862   // this helper permits to completely eliminate the functor if it is empty
863   struct Data
864   {
865     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
866     Data(const XprType& xpr) : op(xpr.functor()), argImpl(xpr.nestedExpression()) {}
867     EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
868     const UnaryOp& func() const { return op; }
869     UnaryOp op;
870     evaluator<ArgType> argImpl;
871   };
872 
873   Data m_d;
874 };
875 
876 // -------------------- Map --------------------
877 
878 // FIXME perhaps the PlainObjectType could be provided by Derived::PlainObject ?
879 // but that might complicate template specialization
880 template<typename Derived, typename PlainObjectType>
881 struct mapbase_evaluator;
882 
883 template<typename Derived, typename PlainObjectType>
884 struct mapbase_evaluator : evaluator_base<Derived>
885 {
886   typedef Derived  XprType;
887   typedef typename XprType::PointerType PointerType;
888   typedef typename XprType::Scalar Scalar;
889   typedef typename XprType::CoeffReturnType CoeffReturnType;
890 
891   enum {
892     IsRowMajor = XprType::RowsAtCompileTime,
893     ColsAtCompileTime = XprType::ColsAtCompileTime,
894     CoeffReadCost = NumTraits<Scalar>::ReadCost
895   };
896 
897   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
898   explicit mapbase_evaluator(const XprType& map)
899     : m_data(const_cast<PointerType>(map.data())),
900       m_innerStride(map.innerStride()),
901       m_outerStride(map.outerStride())
902   {
903     EIGEN_STATIC_ASSERT(EIGEN_IMPLIES(evaluator<Derived>::Flags&PacketAccessBit, internal::inner_stride_at_compile_time<Derived>::ret==1),
904                         PACKET_ACCESS_REQUIRES_TO_HAVE_INNER_STRIDE_FIXED_TO_1);
905     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
906   }
907 
908   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
909   CoeffReturnType coeff(Index row, Index col) const
910   {
911     return m_data[col * colStride() + row * rowStride()];
912   }
913 
914   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
915   CoeffReturnType coeff(Index index) const
916   {
917     return m_data[index * m_innerStride.value()];
918   }
919 
920   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
921   Scalar& coeffRef(Index row, Index col)
922   {
923     return m_data[col * colStride() + row * rowStride()];
924   }
925 
926   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
927   Scalar& coeffRef(Index index)
928   {
929     return m_data[index * m_innerStride.value()];
930   }
931 
932   template<int LoadMode, typename PacketType>
933   EIGEN_STRONG_INLINE
934   PacketType packet(Index row, Index col) const
935   {
936     PointerType ptr = m_data + row * rowStride() + col * colStride();
937     return internal::ploadt<PacketType, LoadMode>(ptr);
938   }
939 
940   template<int LoadMode, typename PacketType>
941   EIGEN_STRONG_INLINE
942   PacketType packet(Index index) const
943   {
944     return internal::ploadt<PacketType, LoadMode>(m_data + index * m_innerStride.value());
945   }
946 
947   template<int StoreMode, typename PacketType>
948   EIGEN_STRONG_INLINE
949   void writePacket(Index row, Index col, const PacketType& x)
950   {
951     PointerType ptr = m_data + row * rowStride() + col * colStride();
952     return internal::pstoret<Scalar, PacketType, StoreMode>(ptr, x);
953   }
954 
955   template<int StoreMode, typename PacketType>
956   EIGEN_STRONG_INLINE
957   void writePacket(Index index, const PacketType& x)
958   {
959     internal::pstoret<Scalar, PacketType, StoreMode>(m_data + index * m_innerStride.value(), x);
960   }
961 protected:
962   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
963   Index rowStride() const EIGEN_NOEXCEPT {
964     return XprType::IsRowMajor ? m_outerStride.value() : m_innerStride.value();
965   }
966   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
967   Index colStride() const EIGEN_NOEXCEPT {
968      return XprType::IsRowMajor ? m_innerStride.value() : m_outerStride.value();
969   }
970 
971   PointerType m_data;
972   const internal::variable_if_dynamic<Index, XprType::InnerStrideAtCompileTime> m_innerStride;
973   const internal::variable_if_dynamic<Index, XprType::OuterStrideAtCompileTime> m_outerStride;
974 };
975 
976 template<typename PlainObjectType, int MapOptions, typename StrideType>
977 struct evaluator<Map<PlainObjectType, MapOptions, StrideType> >
978   : public mapbase_evaluator<Map<PlainObjectType, MapOptions, StrideType>, PlainObjectType>
979 {
980   typedef Map<PlainObjectType, MapOptions, StrideType> XprType;
981   typedef typename XprType::Scalar Scalar;
982   // TODO: should check for smaller packet types once we can handle multi-sized packet types
983   typedef typename packet_traits<Scalar>::type PacketScalar;
984 
985   enum {
986     InnerStrideAtCompileTime = StrideType::InnerStrideAtCompileTime == 0
987                              ? int(PlainObjectType::InnerStrideAtCompileTime)
988                              : int(StrideType::InnerStrideAtCompileTime),
989     OuterStrideAtCompileTime = StrideType::OuterStrideAtCompileTime == 0
990                              ? int(PlainObjectType::OuterStrideAtCompileTime)
991                              : int(StrideType::OuterStrideAtCompileTime),
992     HasNoInnerStride = InnerStrideAtCompileTime == 1,
993     HasNoOuterStride = StrideType::OuterStrideAtCompileTime == 0,
994     HasNoStride = HasNoInnerStride && HasNoOuterStride,
995     IsDynamicSize = PlainObjectType::SizeAtCompileTime==Dynamic,
996 
997     PacketAccessMask = bool(HasNoInnerStride) ? ~int(0) : ~int(PacketAccessBit),
998     LinearAccessMask = bool(HasNoStride) || bool(PlainObjectType::IsVectorAtCompileTime) ? ~int(0) : ~int(LinearAccessBit),
999     Flags = int( evaluator<PlainObjectType>::Flags) & (LinearAccessMask&PacketAccessMask),
1000 
1001     Alignment = int(MapOptions)&int(AlignedMask)
1002   };
1003 
1004   EIGEN_DEVICE_FUNC explicit evaluator(const XprType& map)
1005     : mapbase_evaluator<XprType, PlainObjectType>(map)
1006   { }
1007 };
1008 
1009 // -------------------- Ref --------------------
1010 
1011 template<typename PlainObjectType, int RefOptions, typename StrideType>
1012 struct evaluator<Ref<PlainObjectType, RefOptions, StrideType> >
1013   : public mapbase_evaluator<Ref<PlainObjectType, RefOptions, StrideType>, PlainObjectType>
1014 {
1015   typedef Ref<PlainObjectType, RefOptions, StrideType> XprType;
1016 
1017   enum {
1018     Flags = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Flags,
1019     Alignment = evaluator<Map<PlainObjectType, RefOptions, StrideType> >::Alignment
1020   };
1021 
1022   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1023   explicit evaluator(const XprType& ref)
1024     : mapbase_evaluator<XprType, PlainObjectType>(ref)
1025   { }
1026 };
1027 
1028 // -------------------- Block --------------------
1029 
1030 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel,
1031          bool HasDirectAccess = internal::has_direct_access<ArgType>::ret> struct block_evaluator;
1032 
1033 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1034 struct evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1035   : block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel>
1036 {
1037   typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1038   typedef typename XprType::Scalar Scalar;
1039   // TODO: should check for smaller packet types once we can handle multi-sized packet types
1040   typedef typename packet_traits<Scalar>::type PacketScalar;
1041 
1042   enum {
1043     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1044 
1045     RowsAtCompileTime = traits<XprType>::RowsAtCompileTime,
1046     ColsAtCompileTime = traits<XprType>::ColsAtCompileTime,
1047     MaxRowsAtCompileTime = traits<XprType>::MaxRowsAtCompileTime,
1048     MaxColsAtCompileTime = traits<XprType>::MaxColsAtCompileTime,
1049 
1050     ArgTypeIsRowMajor = (int(evaluator<ArgType>::Flags)&RowMajorBit) != 0,
1051     IsRowMajor = (MaxRowsAtCompileTime==1 && MaxColsAtCompileTime!=1) ? 1
1052                : (MaxColsAtCompileTime==1 && MaxRowsAtCompileTime!=1) ? 0
1053                : ArgTypeIsRowMajor,
1054     HasSameStorageOrderAsArgType = (IsRowMajor == ArgTypeIsRowMajor),
1055     InnerSize = IsRowMajor ? int(ColsAtCompileTime) : int(RowsAtCompileTime),
1056     InnerStrideAtCompileTime = HasSameStorageOrderAsArgType
1057                              ? int(inner_stride_at_compile_time<ArgType>::ret)
1058                              : int(outer_stride_at_compile_time<ArgType>::ret),
1059     OuterStrideAtCompileTime = HasSameStorageOrderAsArgType
1060                              ? int(outer_stride_at_compile_time<ArgType>::ret)
1061                              : int(inner_stride_at_compile_time<ArgType>::ret),
1062     MaskPacketAccessBit = (InnerStrideAtCompileTime == 1 || HasSameStorageOrderAsArgType) ? PacketAccessBit : 0,
1063 
1064     FlagsLinearAccessBit = (RowsAtCompileTime == 1 || ColsAtCompileTime == 1 || (InnerPanel && (evaluator<ArgType>::Flags&LinearAccessBit))) ? LinearAccessBit : 0,
1065     FlagsRowMajorBit = XprType::Flags&RowMajorBit,
1066     Flags0 = evaluator<ArgType>::Flags & ( (HereditaryBits & ~RowMajorBit) |
1067                                            DirectAccessBit |
1068                                            MaskPacketAccessBit),
1069     Flags = Flags0 | FlagsLinearAccessBit | FlagsRowMajorBit,
1070 
1071     PacketAlignment = unpacket_traits<PacketScalar>::alignment,
1072     Alignment0 = (InnerPanel && (OuterStrideAtCompileTime!=Dynamic)
1073                              && (OuterStrideAtCompileTime!=0)
1074                              && (((OuterStrideAtCompileTime * int(sizeof(Scalar))) % int(PacketAlignment)) == 0)) ? int(PacketAlignment) : 0,
1075     Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ArgType>::Alignment, Alignment0)
1076   };
1077   typedef block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel> block_evaluator_type;
1078   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1079   explicit evaluator(const XprType& block) : block_evaluator_type(block)
1080   {
1081     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1082   }
1083 };
1084 
1085 // no direct-access => dispatch to a unary evaluator
1086 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1087 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /*HasDirectAccess*/ false>
1088   : unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1089 {
1090   typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1091 
1092   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1093   explicit block_evaluator(const XprType& block)
1094     : unary_evaluator<XprType>(block)
1095   {}
1096 };
1097 
1098 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1099 struct unary_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>, IndexBased>
1100   : evaluator_base<Block<ArgType, BlockRows, BlockCols, InnerPanel> >
1101 {
1102   typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1103 
1104   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1105   explicit unary_evaluator(const XprType& block)
1106     : m_argImpl(block.nestedExpression()),
1107       m_startRow(block.startRow()),
1108       m_startCol(block.startCol()),
1109       m_linear_offset(ForwardLinearAccess?(ArgType::IsRowMajor ? block.startRow()*block.nestedExpression().cols() + block.startCol() : block.startCol()*block.nestedExpression().rows() + block.startRow()):0)
1110   { }
1111 
1112   typedef typename XprType::Scalar Scalar;
1113   typedef typename XprType::CoeffReturnType CoeffReturnType;
1114 
1115   enum {
1116     RowsAtCompileTime = XprType::RowsAtCompileTime,
1117     ForwardLinearAccess = (InnerPanel || int(XprType::IsRowMajor)==int(ArgType::IsRowMajor)) && bool(evaluator<ArgType>::Flags&LinearAccessBit)
1118   };
1119 
1120   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1121   CoeffReturnType coeff(Index row, Index col) const
1122   {
1123     return m_argImpl.coeff(m_startRow.value() + row, m_startCol.value() + col);
1124   }
1125 
1126   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1127   CoeffReturnType coeff(Index index) const
1128   {
1129     return linear_coeff_impl(index, bool_constant<ForwardLinearAccess>());
1130   }
1131 
1132   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1133   Scalar& coeffRef(Index row, Index col)
1134   {
1135     return m_argImpl.coeffRef(m_startRow.value() + row, m_startCol.value() + col);
1136   }
1137 
1138   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1139   Scalar& coeffRef(Index index)
1140   {
1141     return linear_coeffRef_impl(index, bool_constant<ForwardLinearAccess>());
1142   }
1143 
1144   template<int LoadMode, typename PacketType>
1145   EIGEN_STRONG_INLINE
1146   PacketType packet(Index row, Index col) const
1147   {
1148     return m_argImpl.template packet<LoadMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col);
1149   }
1150 
1151   template<int LoadMode, typename PacketType>
1152   EIGEN_STRONG_INLINE
1153   PacketType packet(Index index) const
1154   {
1155     if (ForwardLinearAccess)
1156       return m_argImpl.template packet<LoadMode,PacketType>(m_linear_offset.value() + index);
1157     else
1158       return packet<LoadMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1159                                          RowsAtCompileTime == 1 ? index : 0);
1160   }
1161 
1162   template<int StoreMode, typename PacketType>
1163   EIGEN_STRONG_INLINE
1164   void writePacket(Index row, Index col, const PacketType& x)
1165   {
1166     return m_argImpl.template writePacket<StoreMode,PacketType>(m_startRow.value() + row, m_startCol.value() + col, x);
1167   }
1168 
1169   template<int StoreMode, typename PacketType>
1170   EIGEN_STRONG_INLINE
1171   void writePacket(Index index, const PacketType& x)
1172   {
1173     if (ForwardLinearAccess)
1174       return m_argImpl.template writePacket<StoreMode,PacketType>(m_linear_offset.value() + index, x);
1175     else
1176       return writePacket<StoreMode,PacketType>(RowsAtCompileTime == 1 ? 0 : index,
1177                                               RowsAtCompileTime == 1 ? index : 0,
1178                                               x);
1179   }
1180 
1181 protected:
1182   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1183   CoeffReturnType linear_coeff_impl(Index index, internal::true_type /* ForwardLinearAccess */) const
1184   {
1185     return m_argImpl.coeff(m_linear_offset.value() + index);
1186   }
1187   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1188   CoeffReturnType linear_coeff_impl(Index index, internal::false_type /* not ForwardLinearAccess */) const
1189   {
1190     return coeff(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1191   }
1192 
1193   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1194   Scalar& linear_coeffRef_impl(Index index, internal::true_type /* ForwardLinearAccess */)
1195   {
1196     return m_argImpl.coeffRef(m_linear_offset.value() + index);
1197   }
1198   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1199   Scalar& linear_coeffRef_impl(Index index, internal::false_type /* not ForwardLinearAccess */)
1200   {
1201     return coeffRef(RowsAtCompileTime == 1 ? 0 : index, RowsAtCompileTime == 1 ? index : 0);
1202   }
1203 
1204   evaluator<ArgType> m_argImpl;
1205   const variable_if_dynamic<Index, (ArgType::RowsAtCompileTime == 1 && BlockRows==1) ? 0 : Dynamic> m_startRow;
1206   const variable_if_dynamic<Index, (ArgType::ColsAtCompileTime == 1 && BlockCols==1) ? 0 : Dynamic> m_startCol;
1207   const variable_if_dynamic<Index, ForwardLinearAccess ? Dynamic : 0> m_linear_offset;
1208 };
1209 
1210 // TODO: This evaluator does not actually use the child evaluator;
1211 // all action is via the data() as returned by the Block expression.
1212 
1213 template<typename ArgType, int BlockRows, int BlockCols, bool InnerPanel>
1214 struct block_evaluator<ArgType, BlockRows, BlockCols, InnerPanel, /* HasDirectAccess */ true>
1215   : mapbase_evaluator<Block<ArgType, BlockRows, BlockCols, InnerPanel>,
1216                       typename Block<ArgType, BlockRows, BlockCols, InnerPanel>::PlainObject>
1217 {
1218   typedef Block<ArgType, BlockRows, BlockCols, InnerPanel> XprType;
1219   typedef typename XprType::Scalar Scalar;
1220 
1221   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1222   explicit block_evaluator(const XprType& block)
1223     : mapbase_evaluator<XprType, typename XprType::PlainObject>(block)
1224   {
1225     // TODO: for the 3.3 release, this should be turned to an internal assertion, but let's keep it as is for the beta lifetime
1226     eigen_assert(((internal::UIntPtr(block.data()) % EIGEN_PLAIN_ENUM_MAX(1,evaluator<XprType>::Alignment)) == 0) && "data is not aligned");
1227   }
1228 };
1229 
1230 
1231 // -------------------- Select --------------------
1232 // NOTE shall we introduce a ternary_evaluator?
1233 
1234 // TODO enable vectorization for Select
1235 template<typename ConditionMatrixType, typename ThenMatrixType, typename ElseMatrixType>
1236 struct evaluator<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1237   : evaluator_base<Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> >
1238 {
1239   typedef Select<ConditionMatrixType, ThenMatrixType, ElseMatrixType> XprType;
1240   enum {
1241     CoeffReadCost = evaluator<ConditionMatrixType>::CoeffReadCost
1242                   + EIGEN_PLAIN_ENUM_MAX(evaluator<ThenMatrixType>::CoeffReadCost,
1243                                          evaluator<ElseMatrixType>::CoeffReadCost),
1244 
1245     Flags = (unsigned int)evaluator<ThenMatrixType>::Flags & evaluator<ElseMatrixType>::Flags & HereditaryBits,
1246 
1247     Alignment = EIGEN_PLAIN_ENUM_MIN(evaluator<ThenMatrixType>::Alignment, evaluator<ElseMatrixType>::Alignment)
1248   };
1249 
1250   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1251   explicit evaluator(const XprType& select)
1252     : m_conditionImpl(select.conditionMatrix()),
1253       m_thenImpl(select.thenMatrix()),
1254       m_elseImpl(select.elseMatrix())
1255   {
1256     EIGEN_INTERNAL_CHECK_COST_VALUE(CoeffReadCost);
1257   }
1258 
1259   typedef typename XprType::CoeffReturnType CoeffReturnType;
1260 
1261   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1262   CoeffReturnType coeff(Index row, Index col) const
1263   {
1264     if (m_conditionImpl.coeff(row, col))
1265       return m_thenImpl.coeff(row, col);
1266     else
1267       return m_elseImpl.coeff(row, col);
1268   }
1269 
1270   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1271   CoeffReturnType coeff(Index index) const
1272   {
1273     if (m_conditionImpl.coeff(index))
1274       return m_thenImpl.coeff(index);
1275     else
1276       return m_elseImpl.coeff(index);
1277   }
1278 
1279 protected:
1280   evaluator<ConditionMatrixType> m_conditionImpl;
1281   evaluator<ThenMatrixType> m_thenImpl;
1282   evaluator<ElseMatrixType> m_elseImpl;
1283 };
1284 
1285 
1286 // -------------------- Replicate --------------------
1287 
1288 template<typename ArgType, int RowFactor, int ColFactor>
1289 struct unary_evaluator<Replicate<ArgType, RowFactor, ColFactor> >
1290   : evaluator_base<Replicate<ArgType, RowFactor, ColFactor> >
1291 {
1292   typedef Replicate<ArgType, RowFactor, ColFactor> XprType;
1293   typedef typename XprType::CoeffReturnType CoeffReturnType;
1294   enum {
1295     Factor = (RowFactor==Dynamic || ColFactor==Dynamic) ? Dynamic : RowFactor*ColFactor
1296   };
1297   typedef typename internal::nested_eval<ArgType,Factor>::type ArgTypeNested;
1298   typedef typename internal::remove_all<ArgTypeNested>::type ArgTypeNestedCleaned;
1299 
1300   enum {
1301     CoeffReadCost = evaluator<ArgTypeNestedCleaned>::CoeffReadCost,
1302     LinearAccessMask = XprType::IsVectorAtCompileTime ? LinearAccessBit : 0,
1303     Flags = (evaluator<ArgTypeNestedCleaned>::Flags & (HereditaryBits|LinearAccessMask) & ~RowMajorBit) | (traits<XprType>::Flags & RowMajorBit),
1304 
1305     Alignment = evaluator<ArgTypeNestedCleaned>::Alignment
1306   };
1307 
1308   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1309   explicit unary_evaluator(const XprType& replicate)
1310     : m_arg(replicate.nestedExpression()),
1311       m_argImpl(m_arg),
1312       m_rows(replicate.nestedExpression().rows()),
1313       m_cols(replicate.nestedExpression().cols())
1314   {}
1315 
1316   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1317   CoeffReturnType coeff(Index row, Index col) const
1318   {
1319     // try to avoid using modulo; this is a pure optimization strategy
1320     const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1321                            : RowFactor==1 ? row
1322                            : row % m_rows.value();
1323     const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1324                            : ColFactor==1 ? col
1325                            : col % m_cols.value();
1326 
1327     return m_argImpl.coeff(actual_row, actual_col);
1328   }
1329 
1330   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1331   CoeffReturnType coeff(Index index) const
1332   {
1333     // try to avoid using modulo; this is a pure optimization strategy
1334     const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1335                                   ? (ColFactor==1 ?  index : index%m_cols.value())
1336                                   : (RowFactor==1 ?  index : index%m_rows.value());
1337 
1338     return m_argImpl.coeff(actual_index);
1339   }
1340 
1341   template<int LoadMode, typename PacketType>
1342   EIGEN_STRONG_INLINE
1343   PacketType packet(Index row, Index col) const
1344   {
1345     const Index actual_row = internal::traits<XprType>::RowsAtCompileTime==1 ? 0
1346                            : RowFactor==1 ? row
1347                            : row % m_rows.value();
1348     const Index actual_col = internal::traits<XprType>::ColsAtCompileTime==1 ? 0
1349                            : ColFactor==1 ? col
1350                            : col % m_cols.value();
1351 
1352     return m_argImpl.template packet<LoadMode,PacketType>(actual_row, actual_col);
1353   }
1354 
1355   template<int LoadMode, typename PacketType>
1356   EIGEN_STRONG_INLINE
1357   PacketType packet(Index index) const
1358   {
1359     const Index actual_index = internal::traits<XprType>::RowsAtCompileTime==1
1360                                   ? (ColFactor==1 ?  index : index%m_cols.value())
1361                                   : (RowFactor==1 ?  index : index%m_rows.value());
1362 
1363     return m_argImpl.template packet<LoadMode,PacketType>(actual_index);
1364   }
1365 
1366 protected:
1367   const ArgTypeNested m_arg;
1368   evaluator<ArgTypeNestedCleaned> m_argImpl;
1369   const variable_if_dynamic<Index, ArgType::RowsAtCompileTime> m_rows;
1370   const variable_if_dynamic<Index, ArgType::ColsAtCompileTime> m_cols;
1371 };
1372 
1373 // -------------------- MatrixWrapper and ArrayWrapper --------------------
1374 //
1375 // evaluator_wrapper_base<T> is a common base class for the
1376 // MatrixWrapper and ArrayWrapper evaluators.
1377 
1378 template<typename XprType>
1379 struct evaluator_wrapper_base
1380   : evaluator_base<XprType>
1381 {
1382   typedef typename remove_all<typename XprType::NestedExpressionType>::type ArgType;
1383   enum {
1384     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1385     Flags = evaluator<ArgType>::Flags,
1386     Alignment = evaluator<ArgType>::Alignment
1387   };
1388 
1389   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1390   explicit evaluator_wrapper_base(const ArgType& arg) : m_argImpl(arg) {}
1391 
1392   typedef typename ArgType::Scalar Scalar;
1393   typedef typename ArgType::CoeffReturnType CoeffReturnType;
1394 
1395   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1396   CoeffReturnType coeff(Index row, Index col) const
1397   {
1398     return m_argImpl.coeff(row, col);
1399   }
1400 
1401   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1402   CoeffReturnType coeff(Index index) const
1403   {
1404     return m_argImpl.coeff(index);
1405   }
1406 
1407   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1408   Scalar& coeffRef(Index row, Index col)
1409   {
1410     return m_argImpl.coeffRef(row, col);
1411   }
1412 
1413   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1414   Scalar& coeffRef(Index index)
1415   {
1416     return m_argImpl.coeffRef(index);
1417   }
1418 
1419   template<int LoadMode, typename PacketType>
1420   EIGEN_STRONG_INLINE
1421   PacketType packet(Index row, Index col) const
1422   {
1423     return m_argImpl.template packet<LoadMode,PacketType>(row, col);
1424   }
1425 
1426   template<int LoadMode, typename PacketType>
1427   EIGEN_STRONG_INLINE
1428   PacketType packet(Index index) const
1429   {
1430     return m_argImpl.template packet<LoadMode,PacketType>(index);
1431   }
1432 
1433   template<int StoreMode, typename PacketType>
1434   EIGEN_STRONG_INLINE
1435   void writePacket(Index row, Index col, const PacketType& x)
1436   {
1437     m_argImpl.template writePacket<StoreMode>(row, col, x);
1438   }
1439 
1440   template<int StoreMode, typename PacketType>
1441   EIGEN_STRONG_INLINE
1442   void writePacket(Index index, const PacketType& x)
1443   {
1444     m_argImpl.template writePacket<StoreMode>(index, x);
1445   }
1446 
1447 protected:
1448   evaluator<ArgType> m_argImpl;
1449 };
1450 
1451 template<typename TArgType>
1452 struct unary_evaluator<MatrixWrapper<TArgType> >
1453   : evaluator_wrapper_base<MatrixWrapper<TArgType> >
1454 {
1455   typedef MatrixWrapper<TArgType> XprType;
1456 
1457   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1458   explicit unary_evaluator(const XprType& wrapper)
1459     : evaluator_wrapper_base<MatrixWrapper<TArgType> >(wrapper.nestedExpression())
1460   { }
1461 };
1462 
1463 template<typename TArgType>
1464 struct unary_evaluator<ArrayWrapper<TArgType> >
1465   : evaluator_wrapper_base<ArrayWrapper<TArgType> >
1466 {
1467   typedef ArrayWrapper<TArgType> XprType;
1468 
1469   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1470   explicit unary_evaluator(const XprType& wrapper)
1471     : evaluator_wrapper_base<ArrayWrapper<TArgType> >(wrapper.nestedExpression())
1472   { }
1473 };
1474 
1475 
1476 // -------------------- Reverse --------------------
1477 
1478 // defined in Reverse.h:
1479 template<typename PacketType, bool ReversePacket> struct reverse_packet_cond;
1480 
1481 template<typename ArgType, int Direction>
1482 struct unary_evaluator<Reverse<ArgType, Direction> >
1483   : evaluator_base<Reverse<ArgType, Direction> >
1484 {
1485   typedef Reverse<ArgType, Direction> XprType;
1486   typedef typename XprType::Scalar Scalar;
1487   typedef typename XprType::CoeffReturnType CoeffReturnType;
1488 
1489   enum {
1490     IsRowMajor = XprType::IsRowMajor,
1491     IsColMajor = !IsRowMajor,
1492     ReverseRow = (Direction == Vertical)   || (Direction == BothDirections),
1493     ReverseCol = (Direction == Horizontal) || (Direction == BothDirections),
1494     ReversePacket = (Direction == BothDirections)
1495                     || ((Direction == Vertical)   && IsColMajor)
1496                     || ((Direction == Horizontal) && IsRowMajor),
1497 
1498     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1499 
1500     // let's enable LinearAccess only with vectorization because of the product overhead
1501     // FIXME enable DirectAccess with negative strides?
1502     Flags0 = evaluator<ArgType>::Flags,
1503     LinearAccess = ( (Direction==BothDirections) && (int(Flags0)&PacketAccessBit) )
1504                   || ((ReverseRow && XprType::ColsAtCompileTime==1) || (ReverseCol && XprType::RowsAtCompileTime==1))
1505                  ? LinearAccessBit : 0,
1506 
1507     Flags = int(Flags0) & (HereditaryBits | PacketAccessBit | LinearAccess),
1508 
1509     Alignment = 0 // FIXME in some rare cases, Alignment could be preserved, like a Vector4f.
1510   };
1511 
1512   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1513   explicit unary_evaluator(const XprType& reverse)
1514     : m_argImpl(reverse.nestedExpression()),
1515       m_rows(ReverseRow ? reverse.nestedExpression().rows() : 1),
1516       m_cols(ReverseCol ? reverse.nestedExpression().cols() : 1)
1517   { }
1518 
1519   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1520   CoeffReturnType coeff(Index row, Index col) const
1521   {
1522     return m_argImpl.coeff(ReverseRow ? m_rows.value() - row - 1 : row,
1523                            ReverseCol ? m_cols.value() - col - 1 : col);
1524   }
1525 
1526   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1527   CoeffReturnType coeff(Index index) const
1528   {
1529     return m_argImpl.coeff(m_rows.value() * m_cols.value() - index - 1);
1530   }
1531 
1532   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1533   Scalar& coeffRef(Index row, Index col)
1534   {
1535     return m_argImpl.coeffRef(ReverseRow ? m_rows.value() - row - 1 : row,
1536                               ReverseCol ? m_cols.value() - col - 1 : col);
1537   }
1538 
1539   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1540   Scalar& coeffRef(Index index)
1541   {
1542     return m_argImpl.coeffRef(m_rows.value() * m_cols.value() - index - 1);
1543   }
1544 
1545   template<int LoadMode, typename PacketType>
1546   EIGEN_STRONG_INLINE
1547   PacketType packet(Index row, Index col) const
1548   {
1549     enum {
1550       PacketSize = unpacket_traits<PacketType>::size,
1551       OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,
1552       OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1
1553     };
1554     typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1555     return reverse_packet::run(m_argImpl.template packet<LoadMode,PacketType>(
1556                                   ReverseRow ? m_rows.value() - row - OffsetRow : row,
1557                                   ReverseCol ? m_cols.value() - col - OffsetCol : col));
1558   }
1559 
1560   template<int LoadMode, typename PacketType>
1561   EIGEN_STRONG_INLINE
1562   PacketType packet(Index index) const
1563   {
1564     enum { PacketSize = unpacket_traits<PacketType>::size };
1565     return preverse(m_argImpl.template packet<LoadMode,PacketType>(m_rows.value() * m_cols.value() - index - PacketSize));
1566   }
1567 
1568   template<int LoadMode, typename PacketType>
1569   EIGEN_STRONG_INLINE
1570   void writePacket(Index row, Index col, const PacketType& x)
1571   {
1572     // FIXME we could factorize some code with packet(i,j)
1573     enum {
1574       PacketSize = unpacket_traits<PacketType>::size,
1575       OffsetRow  = ReverseRow && IsColMajor ? PacketSize : 1,
1576       OffsetCol  = ReverseCol && IsRowMajor ? PacketSize : 1
1577     };
1578     typedef internal::reverse_packet_cond<PacketType,ReversePacket> reverse_packet;
1579     m_argImpl.template writePacket<LoadMode>(
1580                                   ReverseRow ? m_rows.value() - row - OffsetRow : row,
1581                                   ReverseCol ? m_cols.value() - col - OffsetCol : col,
1582                                   reverse_packet::run(x));
1583   }
1584 
1585   template<int LoadMode, typename PacketType>
1586   EIGEN_STRONG_INLINE
1587   void writePacket(Index index, const PacketType& x)
1588   {
1589     enum { PacketSize = unpacket_traits<PacketType>::size };
1590     m_argImpl.template writePacket<LoadMode>
1591       (m_rows.value() * m_cols.value() - index - PacketSize, preverse(x));
1592   }
1593 
1594 protected:
1595   evaluator<ArgType> m_argImpl;
1596 
1597   // If we do not reverse rows, then we do not need to know the number of rows; same for columns
1598   // Nonetheless, in this case it is important to set to 1 such that the coeff(index) method works fine for vectors.
1599   const variable_if_dynamic<Index, ReverseRow ? ArgType::RowsAtCompileTime : 1> m_rows;
1600   const variable_if_dynamic<Index, ReverseCol ? ArgType::ColsAtCompileTime : 1> m_cols;
1601 };
1602 
1603 
1604 // -------------------- Diagonal --------------------
1605 
1606 template<typename ArgType, int DiagIndex>
1607 struct evaluator<Diagonal<ArgType, DiagIndex> >
1608   : evaluator_base<Diagonal<ArgType, DiagIndex> >
1609 {
1610   typedef Diagonal<ArgType, DiagIndex> XprType;
1611 
1612   enum {
1613     CoeffReadCost = evaluator<ArgType>::CoeffReadCost,
1614 
1615     Flags = (unsigned int)(evaluator<ArgType>::Flags & (HereditaryBits | DirectAccessBit) & ~RowMajorBit) | LinearAccessBit,
1616 
1617     Alignment = 0
1618   };
1619 
1620   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1621   explicit evaluator(const XprType& diagonal)
1622     : m_argImpl(diagonal.nestedExpression()),
1623       m_index(diagonal.index())
1624   { }
1625 
1626   typedef typename XprType::Scalar Scalar;
1627   typedef typename XprType::CoeffReturnType CoeffReturnType;
1628 
1629   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1630   CoeffReturnType coeff(Index row, Index) const
1631   {
1632     return m_argImpl.coeff(row + rowOffset(), row + colOffset());
1633   }
1634 
1635   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1636   CoeffReturnType coeff(Index index) const
1637   {
1638     return m_argImpl.coeff(index + rowOffset(), index + colOffset());
1639   }
1640 
1641   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1642   Scalar& coeffRef(Index row, Index)
1643   {
1644     return m_argImpl.coeffRef(row + rowOffset(), row + colOffset());
1645   }
1646 
1647   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE
1648   Scalar& coeffRef(Index index)
1649   {
1650     return m_argImpl.coeffRef(index + rowOffset(), index + colOffset());
1651   }
1652 
1653 protected:
1654   evaluator<ArgType> m_argImpl;
1655   const internal::variable_if_dynamicindex<Index, XprType::DiagIndex> m_index;
1656 
1657 private:
1658   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1659   Index rowOffset() const { return m_index.value() > 0 ? 0 : -m_index.value(); }
1660   EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE EIGEN_CONSTEXPR
1661   Index colOffset() const { return m_index.value() > 0 ? m_index.value() : 0; }
1662 };
1663 
1664 
1665 //----------------------------------------------------------------------
1666 // deprecated code
1667 //----------------------------------------------------------------------
1668 
1669 // -------------------- EvalToTemp --------------------
1670 
1671 // expression class for evaluating nested expression to a temporary
1672 
1673 template<typename ArgType> class EvalToTemp;
1674 
1675 template<typename ArgType>
1676 struct traits<EvalToTemp<ArgType> >
1677   : public traits<ArgType>
1678 { };
1679 
1680 template<typename ArgType>
1681 class EvalToTemp
1682   : public dense_xpr_base<EvalToTemp<ArgType> >::type
1683 {
1684  public:
1685 
1686   typedef typename dense_xpr_base<EvalToTemp>::type Base;
1687   EIGEN_GENERIC_PUBLIC_INTERFACE(EvalToTemp)
1688 
1689   explicit EvalToTemp(const ArgType& arg)
1690     : m_arg(arg)
1691   { }
1692 
1693   const ArgType& arg() const
1694   {
1695     return m_arg;
1696   }
1697 
1698   EIGEN_CONSTEXPR Index rows() const EIGEN_NOEXCEPT
1699   {
1700     return m_arg.rows();
1701   }
1702 
1703   EIGEN_CONSTEXPR Index cols() const EIGEN_NOEXCEPT
1704   {
1705     return m_arg.cols();
1706   }
1707 
1708  private:
1709   const ArgType& m_arg;
1710 };
1711 
1712 template<typename ArgType>
1713 struct evaluator<EvalToTemp<ArgType> >
1714   : public evaluator<typename ArgType::PlainObject>
1715 {
1716   typedef EvalToTemp<ArgType>                   XprType;
1717   typedef typename ArgType::PlainObject         PlainObject;
1718   typedef evaluator<PlainObject> Base;
1719 
1720   EIGEN_DEVICE_FUNC explicit evaluator(const XprType& xpr)
1721     : m_result(xpr.arg())
1722   {
1723     ::new (static_cast<Base*>(this)) Base(m_result);
1724   }
1725 
1726   // This constructor is used when nesting an EvalTo evaluator in another evaluator
1727   EIGEN_DEVICE_FUNC evaluator(const ArgType& arg)
1728     : m_result(arg)
1729   {
1730     ::new (static_cast<Base*>(this)) Base(m_result);
1731   }
1732 
1733 protected:
1734   PlainObject m_result;
1735 };
1736 
1737 } // namespace internal
1738 
1739 } // end namespace Eigen
1740 
1741 #endif // EIGEN_COREEVALUATORS_H
1742