• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2009-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
5 //
6 // This Source Code Form is subject to the terms of the Mozilla
7 // Public License v. 2.0. If a copy of the MPL was not distributed
8 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
9 
10 #ifndef EIGEN_SPARSE_SELFADJOINTVIEW_H
11 #define EIGEN_SPARSE_SELFADJOINTVIEW_H
12 
13 namespace Eigen {
14 
15 /** \ingroup SparseCore_Module
16   * \class SparseSelfAdjointView
17   *
18   * \brief Pseudo expression to manipulate a triangular sparse matrix as a selfadjoint matrix.
19   *
20   * \param MatrixType the type of the dense matrix storing the coefficients
21   * \param Mode can be either \c #Lower or \c #Upper
22   *
23   * This class is an expression of a sefladjoint matrix from a triangular part of a matrix
24   * with given dense storage of the coefficients. It is the return type of MatrixBase::selfadjointView()
25   * and most of the time this is the only way that it is used.
26   *
27   * \sa SparseMatrixBase::selfadjointView()
28   */
29 namespace internal {
30 
31 template<typename MatrixType, unsigned int Mode>
32 struct traits<SparseSelfAdjointView<MatrixType,Mode> > : traits<MatrixType> {
33 };
34 
35 template<int SrcMode,int DstMode,typename MatrixType,int DestOrder>
36 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
37 
38 template<int Mode,typename MatrixType,int DestOrder>
39 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm = 0);
40 
41 }
42 
43 template<typename MatrixType, unsigned int _Mode> class SparseSelfAdjointView
44   : public EigenBase<SparseSelfAdjointView<MatrixType,_Mode> >
45 {
46   public:
47 
48     enum {
49       Mode = _Mode,
50       RowsAtCompileTime = internal::traits<SparseSelfAdjointView>::RowsAtCompileTime,
51       ColsAtCompileTime = internal::traits<SparseSelfAdjointView>::ColsAtCompileTime
52     };
53 
54     typedef EigenBase<SparseSelfAdjointView> Base;
55     typedef typename MatrixType::Scalar Scalar;
56     typedef typename MatrixType::StorageIndex StorageIndex;
57     typedef Matrix<StorageIndex,Dynamic,1> VectorI;
58     typedef typename internal::ref_selector<MatrixType>::non_const_type MatrixTypeNested;
59     typedef typename internal::remove_all<MatrixTypeNested>::type _MatrixTypeNested;
60 
61     explicit inline SparseSelfAdjointView(MatrixType& matrix) : m_matrix(matrix)
62     {
63       eigen_assert(rows()==cols() && "SelfAdjointView is only for squared matrices");
64     }
65 
66     inline Index rows() const { return m_matrix.rows(); }
67     inline Index cols() const { return m_matrix.cols(); }
68 
69     /** \internal \returns a reference to the nested matrix */
70     const _MatrixTypeNested& matrix() const { return m_matrix; }
71     typename internal::remove_reference<MatrixTypeNested>::type& matrix() { return m_matrix; }
72 
73     /** \returns an expression of the matrix product between a sparse self-adjoint matrix \c *this and a sparse matrix \a rhs.
74       *
75       * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
76       * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
77       */
78     template<typename OtherDerived>
79     Product<SparseSelfAdjointView, OtherDerived>
80     operator*(const SparseMatrixBase<OtherDerived>& rhs) const
81     {
82       return Product<SparseSelfAdjointView, OtherDerived>(*this, rhs.derived());
83     }
84 
85     /** \returns an expression of the matrix product between a sparse matrix \a lhs and a sparse self-adjoint matrix \a rhs.
86       *
87       * Note that there is no algorithmic advantage of performing such a product compared to a general sparse-sparse matrix product.
88       * Indeed, the SparseSelfadjointView operand is first copied into a temporary SparseMatrix before computing the product.
89       */
90     template<typename OtherDerived> friend
91     Product<OtherDerived, SparseSelfAdjointView>
92     operator*(const SparseMatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
93     {
94       return Product<OtherDerived, SparseSelfAdjointView>(lhs.derived(), rhs);
95     }
96 
97     /** Efficient sparse self-adjoint matrix times dense vector/matrix product */
98     template<typename OtherDerived>
99     Product<SparseSelfAdjointView,OtherDerived>
100     operator*(const MatrixBase<OtherDerived>& rhs) const
101     {
102       return Product<SparseSelfAdjointView,OtherDerived>(*this, rhs.derived());
103     }
104 
105     /** Efficient dense vector/matrix times sparse self-adjoint matrix product */
106     template<typename OtherDerived> friend
107     Product<OtherDerived,SparseSelfAdjointView>
108     operator*(const MatrixBase<OtherDerived>& lhs, const SparseSelfAdjointView& rhs)
109     {
110       return Product<OtherDerived,SparseSelfAdjointView>(lhs.derived(), rhs);
111     }
112 
113     /** Perform a symmetric rank K update of the selfadjoint matrix \c *this:
114       * \f$ this = this + \alpha ( u u^* ) \f$ where \a u is a vector or matrix.
115       *
116       * \returns a reference to \c *this
117       *
118       * To perform \f$ this = this + \alpha ( u^* u ) \f$ you can simply
119       * call this function with u.adjoint().
120       */
121     template<typename DerivedU>
122     SparseSelfAdjointView& rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha = Scalar(1));
123 
124     /** \returns an expression of P H P^-1 */
125     // TODO implement twists in a more evaluator friendly fashion
126     SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode> twistedBy(const PermutationMatrix<Dynamic,Dynamic,StorageIndex>& perm) const
127     {
128       return SparseSymmetricPermutationProduct<_MatrixTypeNested,Mode>(m_matrix, perm);
129     }
130 
131     template<typename SrcMatrixType,int SrcMode>
132     SparseSelfAdjointView& operator=(const SparseSymmetricPermutationProduct<SrcMatrixType,SrcMode>& permutedMatrix)
133     {
134       internal::call_assignment_no_alias_no_transpose(*this, permutedMatrix);
135       return *this;
136     }
137 
138     SparseSelfAdjointView& operator=(const SparseSelfAdjointView& src)
139     {
140       PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
141       return *this = src.twistedBy(pnull);
142     }
143 
144     template<typename SrcMatrixType,unsigned int SrcMode>
145     SparseSelfAdjointView& operator=(const SparseSelfAdjointView<SrcMatrixType,SrcMode>& src)
146     {
147       PermutationMatrix<Dynamic,Dynamic,StorageIndex> pnull;
148       return *this = src.twistedBy(pnull);
149     }
150 
151     void resize(Index rows, Index cols)
152     {
153       EIGEN_ONLY_USED_FOR_DEBUG(rows);
154       EIGEN_ONLY_USED_FOR_DEBUG(cols);
155       eigen_assert(rows == this->rows() && cols == this->cols()
156                 && "SparseSelfadjointView::resize() does not actually allow to resize.");
157     }
158 
159   protected:
160 
161     MatrixTypeNested m_matrix;
162     //mutable VectorI m_countPerRow;
163     //mutable VectorI m_countPerCol;
164   private:
165     template<typename Dest> void evalTo(Dest &) const;
166 };
167 
168 /***************************************************************************
169 * Implementation of SparseMatrixBase methods
170 ***************************************************************************/
171 
172 template<typename Derived>
173 template<unsigned int UpLo>
174 typename SparseMatrixBase<Derived>::template ConstSelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView() const
175 {
176   return SparseSelfAdjointView<const Derived, UpLo>(derived());
177 }
178 
179 template<typename Derived>
180 template<unsigned int UpLo>
181 typename SparseMatrixBase<Derived>::template SelfAdjointViewReturnType<UpLo>::Type SparseMatrixBase<Derived>::selfadjointView()
182 {
183   return SparseSelfAdjointView<Derived, UpLo>(derived());
184 }
185 
186 /***************************************************************************
187 * Implementation of SparseSelfAdjointView methods
188 ***************************************************************************/
189 
190 template<typename MatrixType, unsigned int Mode>
191 template<typename DerivedU>
192 SparseSelfAdjointView<MatrixType,Mode>&
193 SparseSelfAdjointView<MatrixType,Mode>::rankUpdate(const SparseMatrixBase<DerivedU>& u, const Scalar& alpha)
194 {
195   SparseMatrix<Scalar,(MatrixType::Flags&RowMajorBit)?RowMajor:ColMajor> tmp = u * u.adjoint();
196   if(alpha==Scalar(0))
197     m_matrix = tmp.template triangularView<Mode>();
198   else
199     m_matrix += alpha * tmp.template triangularView<Mode>();
200 
201   return *this;
202 }
203 
204 namespace internal {
205 
206 // TODO currently a selfadjoint expression has the form SelfAdjointView<.,.>
207 //      in the future selfadjoint-ness should be defined by the expression traits
208 //      such that Transpose<SelfAdjointView<.,.> > is valid. (currently TriangularBase::transpose() is overloaded to make it work)
209 template<typename MatrixType, unsigned int Mode>
210 struct evaluator_traits<SparseSelfAdjointView<MatrixType,Mode> >
211 {
212   typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
213   typedef SparseSelfAdjointShape Shape;
214 };
215 
216 struct SparseSelfAdjoint2Sparse {};
217 
218 template<> struct AssignmentKind<SparseShape,SparseSelfAdjointShape> { typedef SparseSelfAdjoint2Sparse Kind; };
219 template<> struct AssignmentKind<SparseSelfAdjointShape,SparseShape> { typedef Sparse2Sparse Kind; };
220 
221 template< typename DstXprType, typename SrcXprType, typename Functor>
222 struct Assignment<DstXprType, SrcXprType, Functor, SparseSelfAdjoint2Sparse>
223 {
224   typedef typename DstXprType::StorageIndex StorageIndex;
225   typedef internal::assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar> AssignOpType;
226 
227   template<typename DestScalar,int StorageOrder>
228   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignOpType&/*func*/)
229   {
230     internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), dst);
231   }
232 
233   // FIXME: the handling of += and -= in sparse matrices should be cleanup so that next two overloads could be reduced to:
234   template<typename DestScalar,int StorageOrder,typename AssignFunc>
235   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src, const AssignFunc& func)
236   {
237     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
238     run(tmp, src, AssignOpType());
239     call_assignment_no_alias_no_transpose(dst, tmp, func);
240   }
241 
242   template<typename DestScalar,int StorageOrder>
243   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
244                   const internal::add_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
245   {
246     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
247     run(tmp, src, AssignOpType());
248     dst += tmp;
249   }
250 
251   template<typename DestScalar,int StorageOrder>
252   static void run(SparseMatrix<DestScalar,StorageOrder,StorageIndex> &dst, const SrcXprType &src,
253                   const internal::sub_assign_op<typename DstXprType::Scalar,typename SrcXprType::Scalar>& /* func */)
254   {
255     SparseMatrix<DestScalar,StorageOrder,StorageIndex> tmp(src.rows(),src.cols());
256     run(tmp, src, AssignOpType());
257     dst -= tmp;
258   }
259 
260   template<typename DestScalar>
261   static void run(DynamicSparseMatrix<DestScalar,ColMajor,StorageIndex>& dst, const SrcXprType &src, const AssignOpType&/*func*/)
262   {
263     // TODO directly evaluate into dst;
264     SparseMatrix<DestScalar,ColMajor,StorageIndex> tmp(dst.rows(),dst.cols());
265     internal::permute_symm_to_fullsymm<SrcXprType::Mode>(src.matrix(), tmp);
266     dst = tmp;
267   }
268 };
269 
270 } // end namespace internal
271 
272 /***************************************************************************
273 * Implementation of sparse self-adjoint time dense matrix
274 ***************************************************************************/
275 
276 namespace internal {
277 
278 template<int Mode, typename SparseLhsType, typename DenseRhsType, typename DenseResType, typename AlphaType>
279 inline void sparse_selfadjoint_time_dense_product(const SparseLhsType& lhs, const DenseRhsType& rhs, DenseResType& res, const AlphaType& alpha)
280 {
281   EIGEN_ONLY_USED_FOR_DEBUG(alpha);
282 
283   typedef typename internal::nested_eval<SparseLhsType,DenseRhsType::MaxColsAtCompileTime>::type SparseLhsTypeNested;
284   typedef typename internal::remove_all<SparseLhsTypeNested>::type SparseLhsTypeNestedCleaned;
285   typedef evaluator<SparseLhsTypeNestedCleaned> LhsEval;
286   typedef typename LhsEval::InnerIterator LhsIterator;
287   typedef typename SparseLhsType::Scalar LhsScalar;
288 
289   enum {
290     LhsIsRowMajor = (LhsEval::Flags&RowMajorBit)==RowMajorBit,
291     ProcessFirstHalf =
292               ((Mode&(Upper|Lower))==(Upper|Lower))
293           || ( (Mode&Upper) && !LhsIsRowMajor)
294           || ( (Mode&Lower) && LhsIsRowMajor),
295     ProcessSecondHalf = !ProcessFirstHalf
296   };
297 
298   SparseLhsTypeNested lhs_nested(lhs);
299   LhsEval lhsEval(lhs_nested);
300 
301   // work on one column at once
302   for (Index k=0; k<rhs.cols(); ++k)
303   {
304     for (Index j=0; j<lhs.outerSize(); ++j)
305     {
306       LhsIterator i(lhsEval,j);
307       // handle diagonal coeff
308       if (ProcessSecondHalf)
309       {
310         while (i && i.index()<j) ++i;
311         if(i && i.index()==j)
312         {
313           res(j,k) += alpha * i.value() * rhs(j,k);
314           ++i;
315         }
316       }
317 
318       // premultiplied rhs for scatters
319       typename ScalarBinaryOpTraits<AlphaType, typename DenseRhsType::Scalar>::ReturnType rhs_j(alpha*rhs(j,k));
320       // accumulator for partial scalar product
321       typename DenseResType::Scalar res_j(0);
322       for(; (ProcessFirstHalf ? i && i.index() < j : i) ; ++i)
323       {
324         LhsScalar lhs_ij = i.value();
325         if(!LhsIsRowMajor) lhs_ij = numext::conj(lhs_ij);
326         res_j += lhs_ij * rhs(i.index(),k);
327         res(i.index(),k) += numext::conj(lhs_ij) * rhs_j;
328       }
329       res(j,k) += alpha * res_j;
330 
331       // handle diagonal coeff
332       if (ProcessFirstHalf && i && (i.index()==j))
333         res(j,k) += alpha * i.value() * rhs(j,k);
334     }
335   }
336 }
337 
338 
339 template<typename LhsView, typename Rhs, int ProductType>
340 struct generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType>
341 : generic_product_impl_base<LhsView, Rhs, generic_product_impl<LhsView, Rhs, SparseSelfAdjointShape, DenseShape, ProductType> >
342 {
343   template<typename Dest>
344   static void scaleAndAddTo(Dest& dst, const LhsView& lhsView, const Rhs& rhs, const typename Dest::Scalar& alpha)
345   {
346     typedef typename LhsView::_MatrixTypeNested Lhs;
347     typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
348     typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
349     LhsNested lhsNested(lhsView.matrix());
350     RhsNested rhsNested(rhs);
351 
352     internal::sparse_selfadjoint_time_dense_product<LhsView::Mode>(lhsNested, rhsNested, dst, alpha);
353   }
354 };
355 
356 template<typename Lhs, typename RhsView, int ProductType>
357 struct generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType>
358 : generic_product_impl_base<Lhs, RhsView, generic_product_impl<Lhs, RhsView, DenseShape, SparseSelfAdjointShape, ProductType> >
359 {
360   template<typename Dest>
361   static void scaleAndAddTo(Dest& dst, const Lhs& lhs, const RhsView& rhsView, const typename Dest::Scalar& alpha)
362   {
363     typedef typename RhsView::_MatrixTypeNested Rhs;
364     typedef typename nested_eval<Lhs,Dynamic>::type LhsNested;
365     typedef typename nested_eval<Rhs,Dynamic>::type RhsNested;
366     LhsNested lhsNested(lhs);
367     RhsNested rhsNested(rhsView.matrix());
368 
369     // transpose everything
370     Transpose<Dest> dstT(dst);
371     internal::sparse_selfadjoint_time_dense_product<RhsView::Mode>(rhsNested.transpose(), lhsNested.transpose(), dstT, alpha);
372   }
373 };
374 
375 // NOTE: these two overloads are needed to evaluate the sparse selfadjoint view into a full sparse matrix
376 // TODO: maybe the copy could be handled by generic_product_impl so that these overloads would not be needed anymore
377 
378 template<typename LhsView, typename Rhs, int ProductTag>
379 struct product_evaluator<Product<LhsView, Rhs, DefaultProduct>, ProductTag, SparseSelfAdjointShape, SparseShape>
380   : public evaluator<typename Product<typename Rhs::PlainObject, Rhs, DefaultProduct>::PlainObject>
381 {
382   typedef Product<LhsView, Rhs, DefaultProduct> XprType;
383   typedef typename XprType::PlainObject PlainObject;
384   typedef evaluator<PlainObject> Base;
385 
386   product_evaluator(const XprType& xpr)
387     : m_lhs(xpr.lhs()), m_result(xpr.rows(), xpr.cols())
388   {
389     ::new (static_cast<Base*>(this)) Base(m_result);
390     generic_product_impl<typename Rhs::PlainObject, Rhs, SparseShape, SparseShape, ProductTag>::evalTo(m_result, m_lhs, xpr.rhs());
391   }
392 
393 protected:
394   typename Rhs::PlainObject m_lhs;
395   PlainObject m_result;
396 };
397 
398 template<typename Lhs, typename RhsView, int ProductTag>
399 struct product_evaluator<Product<Lhs, RhsView, DefaultProduct>, ProductTag, SparseShape, SparseSelfAdjointShape>
400   : public evaluator<typename Product<Lhs, typename Lhs::PlainObject, DefaultProduct>::PlainObject>
401 {
402   typedef Product<Lhs, RhsView, DefaultProduct> XprType;
403   typedef typename XprType::PlainObject PlainObject;
404   typedef evaluator<PlainObject> Base;
405 
406   product_evaluator(const XprType& xpr)
407     : m_rhs(xpr.rhs()), m_result(xpr.rows(), xpr.cols())
408   {
409     ::new (static_cast<Base*>(this)) Base(m_result);
410     generic_product_impl<Lhs, typename Lhs::PlainObject, SparseShape, SparseShape, ProductTag>::evalTo(m_result, xpr.lhs(), m_rhs);
411   }
412 
413 protected:
414   typename Lhs::PlainObject m_rhs;
415   PlainObject m_result;
416 };
417 
418 } // namespace internal
419 
420 /***************************************************************************
421 * Implementation of symmetric copies and permutations
422 ***************************************************************************/
423 namespace internal {
424 
425 template<int Mode,typename MatrixType,int DestOrder>
426 void permute_symm_to_fullsymm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DestOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
427 {
428   typedef typename MatrixType::StorageIndex StorageIndex;
429   typedef typename MatrixType::Scalar Scalar;
430   typedef SparseMatrix<Scalar,DestOrder,StorageIndex> Dest;
431   typedef Matrix<StorageIndex,Dynamic,1> VectorI;
432   typedef evaluator<MatrixType> MatEval;
433   typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
434 
435   MatEval matEval(mat);
436   Dest& dest(_dest.derived());
437   enum {
438     StorageOrderMatch = int(Dest::IsRowMajor) == int(MatrixType::IsRowMajor)
439   };
440 
441   Index size = mat.rows();
442   VectorI count;
443   count.resize(size);
444   count.setZero();
445   dest.resize(size,size);
446   for(Index j = 0; j<size; ++j)
447   {
448     Index jp = perm ? perm[j] : j;
449     for(MatIterator it(matEval,j); it; ++it)
450     {
451       Index i = it.index();
452       Index r = it.row();
453       Index c = it.col();
454       Index ip = perm ? perm[i] : i;
455       if(Mode==(Upper|Lower))
456         count[StorageOrderMatch ? jp : ip]++;
457       else if(r==c)
458         count[ip]++;
459       else if(( Mode==Lower && r>c) || ( Mode==Upper && r<c))
460       {
461         count[ip]++;
462         count[jp]++;
463       }
464     }
465   }
466   Index nnz = count.sum();
467 
468   // reserve space
469   dest.resizeNonZeros(nnz);
470   dest.outerIndexPtr()[0] = 0;
471   for(Index j=0; j<size; ++j)
472     dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
473   for(Index j=0; j<size; ++j)
474     count[j] = dest.outerIndexPtr()[j];
475 
476   // copy data
477   for(StorageIndex j = 0; j<size; ++j)
478   {
479     for(MatIterator it(matEval,j); it; ++it)
480     {
481       StorageIndex i = internal::convert_index<StorageIndex>(it.index());
482       Index r = it.row();
483       Index c = it.col();
484 
485       StorageIndex jp = perm ? perm[j] : j;
486       StorageIndex ip = perm ? perm[i] : i;
487 
488       if(Mode==(Upper|Lower))
489       {
490         Index k = count[StorageOrderMatch ? jp : ip]++;
491         dest.innerIndexPtr()[k] = StorageOrderMatch ? ip : jp;
492         dest.valuePtr()[k] = it.value();
493       }
494       else if(r==c)
495       {
496         Index k = count[ip]++;
497         dest.innerIndexPtr()[k] = ip;
498         dest.valuePtr()[k] = it.value();
499       }
500       else if(( (Mode&Lower)==Lower && r>c) || ( (Mode&Upper)==Upper && r<c))
501       {
502         if(!StorageOrderMatch)
503           std::swap(ip,jp);
504         Index k = count[jp]++;
505         dest.innerIndexPtr()[k] = ip;
506         dest.valuePtr()[k] = it.value();
507         k = count[ip]++;
508         dest.innerIndexPtr()[k] = jp;
509         dest.valuePtr()[k] = numext::conj(it.value());
510       }
511     }
512   }
513 }
514 
515 template<int _SrcMode,int _DstMode,typename MatrixType,int DstOrder>
516 void permute_symm_to_symm(const MatrixType& mat, SparseMatrix<typename MatrixType::Scalar,DstOrder,typename MatrixType::StorageIndex>& _dest, const typename MatrixType::StorageIndex* perm)
517 {
518   typedef typename MatrixType::StorageIndex StorageIndex;
519   typedef typename MatrixType::Scalar Scalar;
520   SparseMatrix<Scalar,DstOrder,StorageIndex>& dest(_dest.derived());
521   typedef Matrix<StorageIndex,Dynamic,1> VectorI;
522   typedef evaluator<MatrixType> MatEval;
523   typedef typename evaluator<MatrixType>::InnerIterator MatIterator;
524 
525   enum {
526     SrcOrder = MatrixType::IsRowMajor ? RowMajor : ColMajor,
527     StorageOrderMatch = int(SrcOrder) == int(DstOrder),
528     DstMode = DstOrder==RowMajor ? (_DstMode==Upper ? Lower : Upper) : _DstMode,
529     SrcMode = SrcOrder==RowMajor ? (_SrcMode==Upper ? Lower : Upper) : _SrcMode
530   };
531 
532   MatEval matEval(mat);
533 
534   Index size = mat.rows();
535   VectorI count(size);
536   count.setZero();
537   dest.resize(size,size);
538   for(StorageIndex j = 0; j<size; ++j)
539   {
540     StorageIndex jp = perm ? perm[j] : j;
541     for(MatIterator it(matEval,j); it; ++it)
542     {
543       StorageIndex i = it.index();
544       if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
545         continue;
546 
547       StorageIndex ip = perm ? perm[i] : i;
548       count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
549     }
550   }
551   dest.outerIndexPtr()[0] = 0;
552   for(Index j=0; j<size; ++j)
553     dest.outerIndexPtr()[j+1] = dest.outerIndexPtr()[j] + count[j];
554   dest.resizeNonZeros(dest.outerIndexPtr()[size]);
555   for(Index j=0; j<size; ++j)
556     count[j] = dest.outerIndexPtr()[j];
557 
558   for(StorageIndex j = 0; j<size; ++j)
559   {
560 
561     for(MatIterator it(matEval,j); it; ++it)
562     {
563       StorageIndex i = it.index();
564       if((int(SrcMode)==int(Lower) && i<j) || (int(SrcMode)==int(Upper) && i>j))
565         continue;
566 
567       StorageIndex jp = perm ? perm[j] : j;
568       StorageIndex ip = perm? perm[i] : i;
569 
570       Index k = count[int(DstMode)==int(Lower) ? (std::min)(ip,jp) : (std::max)(ip,jp)]++;
571       dest.innerIndexPtr()[k] = int(DstMode)==int(Lower) ? (std::max)(ip,jp) : (std::min)(ip,jp);
572 
573       if(!StorageOrderMatch) std::swap(ip,jp);
574       if( ((int(DstMode)==int(Lower) && ip<jp) || (int(DstMode)==int(Upper) && ip>jp)))
575         dest.valuePtr()[k] = numext::conj(it.value());
576       else
577         dest.valuePtr()[k] = it.value();
578     }
579   }
580 }
581 
582 }
583 
584 // TODO implement twists in a more evaluator friendly fashion
585 
586 namespace internal {
587 
588 template<typename MatrixType, int Mode>
589 struct traits<SparseSymmetricPermutationProduct<MatrixType,Mode> > : traits<MatrixType> {
590 };
591 
592 }
593 
594 template<typename MatrixType,int Mode>
595 class SparseSymmetricPermutationProduct
596   : public EigenBase<SparseSymmetricPermutationProduct<MatrixType,Mode> >
597 {
598   public:
599     typedef typename MatrixType::Scalar Scalar;
600     typedef typename MatrixType::StorageIndex StorageIndex;
601     enum {
602       RowsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::RowsAtCompileTime,
603       ColsAtCompileTime = internal::traits<SparseSymmetricPermutationProduct>::ColsAtCompileTime
604     };
605   protected:
606     typedef PermutationMatrix<Dynamic,Dynamic,StorageIndex> Perm;
607   public:
608     typedef Matrix<StorageIndex,Dynamic,1> VectorI;
609     typedef typename MatrixType::Nested MatrixTypeNested;
610     typedef typename internal::remove_all<MatrixTypeNested>::type NestedExpression;
611 
612     SparseSymmetricPermutationProduct(const MatrixType& mat, const Perm& perm)
613       : m_matrix(mat), m_perm(perm)
614     {}
615 
616     inline Index rows() const { return m_matrix.rows(); }
617     inline Index cols() const { return m_matrix.cols(); }
618 
619     const NestedExpression& matrix() const { return m_matrix; }
620     const Perm& perm() const { return m_perm; }
621 
622   protected:
623     MatrixTypeNested m_matrix;
624     const Perm& m_perm;
625 
626 };
627 
628 namespace internal {
629 
630 template<typename DstXprType, typename MatrixType, int Mode, typename Scalar>
631 struct Assignment<DstXprType, SparseSymmetricPermutationProduct<MatrixType,Mode>, internal::assign_op<Scalar,typename MatrixType::Scalar>, Sparse2Sparse>
632 {
633   typedef SparseSymmetricPermutationProduct<MatrixType,Mode> SrcXprType;
634   typedef typename DstXprType::StorageIndex DstIndex;
635   template<int Options>
636   static void run(SparseMatrix<Scalar,Options,DstIndex> &dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
637   {
638     // internal::permute_symm_to_fullsymm<Mode>(m_matrix,_dest,m_perm.indices().data());
639     SparseMatrix<Scalar,(Options&RowMajor)==RowMajor ? ColMajor : RowMajor, DstIndex> tmp;
640     internal::permute_symm_to_fullsymm<Mode>(src.matrix(),tmp,src.perm().indices().data());
641     dst = tmp;
642   }
643 
644   template<typename DestType,unsigned int DestMode>
645   static void run(SparseSelfAdjointView<DestType,DestMode>& dst, const SrcXprType &src, const internal::assign_op<Scalar,typename MatrixType::Scalar> &)
646   {
647     internal::permute_symm_to_symm<Mode,DestMode>(src.matrix(),dst.matrix(),src.perm().indices().data());
648   }
649 };
650 
651 } // end namespace internal
652 
653 } // end namespace Eigen
654 
655 #endif // EIGEN_SPARSE_SELFADJOINTVIEW_H
656