• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // This file is part of Eigen, a lightweight C++ template library
2 // for linear algebra.
3 //
4 // Copyright (C) 2012-2013 Desire Nuentsa <desire.nuentsa_wakam@inria.fr>
5 // Copyright (C) 2012-2014 Gael Guennebaud <gael.guennebaud@inria.fr>
6 //
7 // This Source Code Form is subject to the terms of the Mozilla
8 // Public License v. 2.0. If a copy of the MPL was not distributed
9 // with this file, You can obtain one at http://mozilla.org/MPL/2.0/.
10 
11 #ifndef EIGEN_SPARSE_QR_H
12 #define EIGEN_SPARSE_QR_H
13 
14 namespace Eigen {
15 
16 template<typename MatrixType, typename OrderingType> class SparseQR;
17 template<typename SparseQRType> struct SparseQRMatrixQReturnType;
18 template<typename SparseQRType> struct SparseQRMatrixQTransposeReturnType;
19 template<typename SparseQRType, typename Derived> struct SparseQR_QProduct;
20 namespace internal {
21   template <typename SparseQRType> struct traits<SparseQRMatrixQReturnType<SparseQRType> >
22   {
23     typedef typename SparseQRType::MatrixType ReturnType;
24     typedef typename ReturnType::StorageIndex StorageIndex;
25     typedef typename ReturnType::StorageKind StorageKind;
26     enum {
27       RowsAtCompileTime = Dynamic,
28       ColsAtCompileTime = Dynamic
29     };
30   };
31   template <typename SparseQRType> struct traits<SparseQRMatrixQTransposeReturnType<SparseQRType> >
32   {
33     typedef typename SparseQRType::MatrixType ReturnType;
34   };
35   template <typename SparseQRType, typename Derived> struct traits<SparseQR_QProduct<SparseQRType, Derived> >
36   {
37     typedef typename Derived::PlainObject ReturnType;
38   };
39 } // End namespace internal
40 
41 /**
42   * \ingroup SparseQR_Module
43   * \class SparseQR
44   * \brief Sparse left-looking rank-revealing QR factorization
45   *
46   * This class implements a left-looking rank-revealing QR decomposition
47   * of sparse matrices. When a column has a norm less than a given tolerance
48   * it is implicitly permuted to the end. The QR factorization thus obtained is
49   * given by A*P = Q*R where R is upper triangular or trapezoidal.
50   *
51   * P is the column permutation which is the product of the fill-reducing and the
52   * rank-revealing permutations. Use colsPermutation() to get it.
53   *
54   * Q is the orthogonal matrix represented as products of Householder reflectors.
55   * Use matrixQ() to get an expression and matrixQ().transpose() to get the transpose.
56   * You can then apply it to a vector.
57   *
58   * R is the sparse triangular or trapezoidal matrix. The later occurs when A is rank-deficient.
59   * matrixR().topLeftCorner(rank(), rank()) always returns a triangular factor of full rank.
60   *
61   * \tparam _MatrixType The type of the sparse matrix A, must be a column-major SparseMatrix<>
62   * \tparam _OrderingType The fill-reducing ordering method. See the \link OrderingMethods_Module
63   *  OrderingMethods \endlink module for the list of built-in and external ordering methods.
64   *
65   * \implsparsesolverconcept
66   *
67   * \warning The input sparse matrix A must be in compressed mode (see SparseMatrix::makeCompressed()).
68   *
69   */
70 template<typename _MatrixType, typename _OrderingType>
71 class SparseQR : public SparseSolverBase<SparseQR<_MatrixType,_OrderingType> >
72 {
73   protected:
74     typedef SparseSolverBase<SparseQR<_MatrixType,_OrderingType> > Base;
75     using Base::m_isInitialized;
76   public:
77     using Base::_solve_impl;
78     typedef _MatrixType MatrixType;
79     typedef _OrderingType OrderingType;
80     typedef typename MatrixType::Scalar Scalar;
81     typedef typename MatrixType::RealScalar RealScalar;
82     typedef typename MatrixType::StorageIndex StorageIndex;
83     typedef SparseMatrix<Scalar,ColMajor,StorageIndex> QRMatrixType;
84     typedef Matrix<StorageIndex, Dynamic, 1> IndexVector;
85     typedef Matrix<Scalar, Dynamic, 1> ScalarVector;
86     typedef PermutationMatrix<Dynamic, Dynamic, StorageIndex> PermutationType;
87 
88     enum {
89       ColsAtCompileTime = MatrixType::ColsAtCompileTime,
90       MaxColsAtCompileTime = MatrixType::MaxColsAtCompileTime
91     };
92 
93   public:
94     SparseQR () :  m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
95     { }
96 
97     /** Construct a QR factorization of the matrix \a mat.
98       *
99       * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
100       *
101       * \sa compute()
102       */
103     explicit SparseQR(const MatrixType& mat) : m_analysisIsok(false), m_lastError(""), m_useDefaultThreshold(true),m_isQSorted(false),m_isEtreeOk(false)
104     {
105       compute(mat);
106     }
107 
108     /** Computes the QR factorization of the sparse matrix \a mat.
109       *
110       * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
111       *
112       * \sa analyzePattern(), factorize()
113       */
114     void compute(const MatrixType& mat)
115     {
116       analyzePattern(mat);
117       factorize(mat);
118     }
119     void analyzePattern(const MatrixType& mat);
120     void factorize(const MatrixType& mat);
121 
122     /** \returns the number of rows of the represented matrix.
123       */
124     inline Index rows() const { return m_pmat.rows(); }
125 
126     /** \returns the number of columns of the represented matrix.
127       */
128     inline Index cols() const { return m_pmat.cols();}
129 
130     /** \returns a const reference to the \b sparse upper triangular matrix R of the QR factorization.
131       * \warning The entries of the returned matrix are not sorted. This means that using it in algorithms
132       *          expecting sorted entries will fail. This include random coefficient accesses (SpaseMatrix::coeff()),
133       *          and coefficient-wise operations. Matrix products and triangular solves are fine though.
134       *
135       * To sort the entries, you can assign it to a row-major matrix, and if a column-major matrix
136       * is required, you can copy it again:
137       * \code
138       * SparseMatrix<double>          R  = qr.matrixR();  // column-major, not sorted!
139       * SparseMatrix<double,RowMajor> Rr = qr.matrixR();  // row-major, sorted
140       * SparseMatrix<double>          Rc = Rr;            // column-major, sorted
141       * \endcode
142       */
143     const QRMatrixType& matrixR() const { return m_R; }
144 
145     /** \returns the number of non linearly dependent columns as determined by the pivoting threshold.
146       *
147       * \sa setPivotThreshold()
148       */
149     Index rank() const
150     {
151       eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
152       return m_nonzeropivots;
153     }
154 
155     /** \returns an expression of the matrix Q as products of sparse Householder reflectors.
156     * The common usage of this function is to apply it to a dense matrix or vector
157     * \code
158     * VectorXd B1, B2;
159     * // Initialize B1
160     * B2 = matrixQ() * B1;
161     * \endcode
162     *
163     * To get a plain SparseMatrix representation of Q:
164     * \code
165     * SparseMatrix<double> Q;
166     * Q = SparseQR<SparseMatrix<double> >(A).matrixQ();
167     * \endcode
168     * Internally, this call simply performs a sparse product between the matrix Q
169     * and a sparse identity matrix. However, due to the fact that the sparse
170     * reflectors are stored unsorted, two transpositions are needed to sort
171     * them before performing the product.
172     */
173     SparseQRMatrixQReturnType<SparseQR> matrixQ() const
174     { return SparseQRMatrixQReturnType<SparseQR>(*this); }
175 
176     /** \returns a const reference to the column permutation P that was applied to A such that A*P = Q*R
177       * It is the combination of the fill-in reducing permutation and numerical column pivoting.
178       */
179     const PermutationType& colsPermutation() const
180     {
181       eigen_assert(m_isInitialized && "Decomposition is not initialized.");
182       return m_outputPerm_c;
183     }
184 
185     /** \returns A string describing the type of error.
186       * This method is provided to ease debugging, not to handle errors.
187       */
188     std::string lastErrorMessage() const { return m_lastError; }
189 
190     /** \internal */
191     template<typename Rhs, typename Dest>
192     bool _solve_impl(const MatrixBase<Rhs> &B, MatrixBase<Dest> &dest) const
193     {
194       eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
195       eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
196 
197       Index rank = this->rank();
198 
199       // Compute Q^T * b;
200       typename Dest::PlainObject y, b;
201       y = this->matrixQ().transpose() * B;
202       b = y;
203 
204       // Solve with the triangular matrix R
205       y.resize((std::max<Index>)(cols(),y.rows()),y.cols());
206       y.topRows(rank) = this->matrixR().topLeftCorner(rank, rank).template triangularView<Upper>().solve(b.topRows(rank));
207       y.bottomRows(y.rows()-rank).setZero();
208 
209       // Apply the column permutation
210       if (m_perm_c.size())  dest = colsPermutation() * y.topRows(cols());
211       else                  dest = y.topRows(cols());
212 
213       m_info = Success;
214       return true;
215     }
216 
217     /** Sets the threshold that is used to determine linearly dependent columns during the factorization.
218       *
219       * In practice, if during the factorization the norm of the column that has to be eliminated is below
220       * this threshold, then the entire column is treated as zero, and it is moved at the end.
221       */
222     void setPivotThreshold(const RealScalar& threshold)
223     {
224       m_useDefaultThreshold = false;
225       m_threshold = threshold;
226     }
227 
228     /** \returns the solution X of \f$ A X = B \f$ using the current decomposition of A.
229       *
230       * \sa compute()
231       */
232     template<typename Rhs>
233     inline const Solve<SparseQR, Rhs> solve(const MatrixBase<Rhs>& B) const
234     {
235       eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
236       eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
237       return Solve<SparseQR, Rhs>(*this, B.derived());
238     }
239     template<typename Rhs>
240     inline const Solve<SparseQR, Rhs> solve(const SparseMatrixBase<Rhs>& B) const
241     {
242           eigen_assert(m_isInitialized && "The factorization should be called first, use compute()");
243           eigen_assert(this->rows() == B.rows() && "SparseQR::solve() : invalid number of rows in the right hand side matrix");
244           return Solve<SparseQR, Rhs>(*this, B.derived());
245     }
246 
247     /** \brief Reports whether previous computation was successful.
248       *
249       * \returns \c Success if computation was successful,
250       *          \c NumericalIssue if the QR factorization reports a numerical problem
251       *          \c InvalidInput if the input matrix is invalid
252       *
253       * \sa iparm()
254       */
255     ComputationInfo info() const
256     {
257       eigen_assert(m_isInitialized && "Decomposition is not initialized.");
258       return m_info;
259     }
260 
261 
262     /** \internal */
263     inline void _sort_matrix_Q()
264     {
265       if(this->m_isQSorted) return;
266       // The matrix Q is sorted during the transposition
267       SparseMatrix<Scalar, RowMajor, Index> mQrm(this->m_Q);
268       this->m_Q = mQrm;
269       this->m_isQSorted = true;
270     }
271 
272 
273   protected:
274     bool m_analysisIsok;
275     bool m_factorizationIsok;
276     mutable ComputationInfo m_info;
277     std::string m_lastError;
278     QRMatrixType m_pmat;            // Temporary matrix
279     QRMatrixType m_R;               // The triangular factor matrix
280     QRMatrixType m_Q;               // The orthogonal reflectors
281     ScalarVector m_hcoeffs;         // The Householder coefficients
282     PermutationType m_perm_c;       // Fill-reducing  Column  permutation
283     PermutationType m_pivotperm;    // The permutation for rank revealing
284     PermutationType m_outputPerm_c; // The final column permutation
285     RealScalar m_threshold;         // Threshold to determine null Householder reflections
286     bool m_useDefaultThreshold;     // Use default threshold
287     Index m_nonzeropivots;          // Number of non zero pivots found
288     IndexVector m_etree;            // Column elimination tree
289     IndexVector m_firstRowElt;      // First element in each row
290     bool m_isQSorted;               // whether Q is sorted or not
291     bool m_isEtreeOk;               // whether the elimination tree match the initial input matrix
292 
293     template <typename, typename > friend struct SparseQR_QProduct;
294 
295 };
296 
297 /** \brief Preprocessing step of a QR factorization
298   *
299   * \warning The matrix \a mat must be in compressed mode (see SparseMatrix::makeCompressed()).
300   *
301   * In this step, the fill-reducing permutation is computed and applied to the columns of A
302   * and the column elimination tree is computed as well. Only the sparsity pattern of \a mat is exploited.
303   *
304   * \note In this step it is assumed that there is no empty row in the matrix \a mat.
305   */
306 template <typename MatrixType, typename OrderingType>
307 void SparseQR<MatrixType,OrderingType>::analyzePattern(const MatrixType& mat)
308 {
309   eigen_assert(mat.isCompressed() && "SparseQR requires a sparse matrix in compressed mode. Call .makeCompressed() before passing it to SparseQR");
310   // Copy to a column major matrix if the input is rowmajor
311   typename internal::conditional<MatrixType::IsRowMajor,QRMatrixType,const MatrixType&>::type matCpy(mat);
312   // Compute the column fill reducing ordering
313   OrderingType ord;
314   ord(matCpy, m_perm_c);
315   Index n = mat.cols();
316   Index m = mat.rows();
317   Index diagSize = (std::min)(m,n);
318 
319   if (!m_perm_c.size())
320   {
321     m_perm_c.resize(n);
322     m_perm_c.indices().setLinSpaced(n, 0,StorageIndex(n-1));
323   }
324 
325   // Compute the column elimination tree of the permuted matrix
326   m_outputPerm_c = m_perm_c.inverse();
327   internal::coletree(matCpy, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
328   m_isEtreeOk = true;
329 
330   m_R.resize(m, n);
331   m_Q.resize(m, diagSize);
332 
333   // Allocate space for nonzero elements : rough estimation
334   m_R.reserve(2*mat.nonZeros()); //FIXME Get a more accurate estimation through symbolic factorization with the etree
335   m_Q.reserve(2*mat.nonZeros());
336   m_hcoeffs.resize(diagSize);
337   m_analysisIsok = true;
338 }
339 
340 /** \brief Performs the numerical QR factorization of the input matrix
341   *
342   * The function SparseQR::analyzePattern(const MatrixType&) must have been called beforehand with
343   * a matrix having the same sparsity pattern than \a mat.
344   *
345   * \param mat The sparse column-major matrix
346   */
347 template <typename MatrixType, typename OrderingType>
348 void SparseQR<MatrixType,OrderingType>::factorize(const MatrixType& mat)
349 {
350   using std::abs;
351 
352   eigen_assert(m_analysisIsok && "analyzePattern() should be called before this step");
353   StorageIndex m = StorageIndex(mat.rows());
354   StorageIndex n = StorageIndex(mat.cols());
355   StorageIndex diagSize = (std::min)(m,n);
356   IndexVector mark((std::max)(m,n)); mark.setConstant(-1);  // Record the visited nodes
357   IndexVector Ridx(n), Qidx(m);                             // Store temporarily the row indexes for the current column of R and Q
358   Index nzcolR, nzcolQ;                                     // Number of nonzero for the current column of R and Q
359   ScalarVector tval(m);                                     // The dense vector used to compute the current column
360   RealScalar pivotThreshold = m_threshold;
361 
362   m_R.setZero();
363   m_Q.setZero();
364   m_pmat = mat;
365   if(!m_isEtreeOk)
366   {
367     m_outputPerm_c = m_perm_c.inverse();
368     internal::coletree(m_pmat, m_etree, m_firstRowElt, m_outputPerm_c.indices().data());
369     m_isEtreeOk = true;
370   }
371 
372   m_pmat.uncompress(); // To have the innerNonZeroPtr allocated
373 
374   // Apply the fill-in reducing permutation lazily:
375   {
376     // If the input is row major, copy the original column indices,
377     // otherwise directly use the input matrix
378     //
379     IndexVector originalOuterIndicesCpy;
380     const StorageIndex *originalOuterIndices = mat.outerIndexPtr();
381     if(MatrixType::IsRowMajor)
382     {
383       originalOuterIndicesCpy = IndexVector::Map(m_pmat.outerIndexPtr(),n+1);
384       originalOuterIndices = originalOuterIndicesCpy.data();
385     }
386 
387     for (int i = 0; i < n; i++)
388     {
389       Index p = m_perm_c.size() ? m_perm_c.indices()(i) : i;
390       m_pmat.outerIndexPtr()[p] = originalOuterIndices[i];
391       m_pmat.innerNonZeroPtr()[p] = originalOuterIndices[i+1] - originalOuterIndices[i];
392     }
393   }
394 
395   /* Compute the default threshold as in MatLab, see:
396    * Tim Davis, "Algorithm 915, SuiteSparseQR: Multifrontal Multithreaded Rank-Revealing
397    * Sparse QR Factorization, ACM Trans. on Math. Soft. 38(1), 2011, Page 8:3
398    */
399   if(m_useDefaultThreshold)
400   {
401     RealScalar max2Norm = 0.0;
402     for (int j = 0; j < n; j++) max2Norm = numext::maxi(max2Norm, m_pmat.col(j).norm());
403     if(max2Norm==RealScalar(0))
404       max2Norm = RealScalar(1);
405     pivotThreshold = 20 * (m + n) * max2Norm * NumTraits<RealScalar>::epsilon();
406   }
407 
408   // Initialize the numerical permutation
409   m_pivotperm.setIdentity(n);
410 
411   StorageIndex nonzeroCol = 0; // Record the number of valid pivots
412   m_Q.startVec(0);
413 
414   // Left looking rank-revealing QR factorization: compute a column of R and Q at a time
415   for (StorageIndex col = 0; col < n; ++col)
416   {
417     mark.setConstant(-1);
418     m_R.startVec(col);
419     mark(nonzeroCol) = col;
420     Qidx(0) = nonzeroCol;
421     nzcolR = 0; nzcolQ = 1;
422     bool found_diag = nonzeroCol>=m;
423     tval.setZero();
424 
425     // Symbolic factorization: find the nonzero locations of the column k of the factors R and Q, i.e.,
426     // all the nodes (with indexes lower than rank) reachable through the column elimination tree (etree) rooted at node k.
427     // Note: if the diagonal entry does not exist, then its contribution must be explicitly added,
428     // thus the trick with found_diag that permits to do one more iteration on the diagonal element if this one has not been found.
429     for (typename QRMatrixType::InnerIterator itp(m_pmat, col); itp || !found_diag; ++itp)
430     {
431       StorageIndex curIdx = nonzeroCol;
432       if(itp) curIdx = StorageIndex(itp.row());
433       if(curIdx == nonzeroCol) found_diag = true;
434 
435       // Get the nonzeros indexes of the current column of R
436       StorageIndex st = m_firstRowElt(curIdx); // The traversal of the etree starts here
437       if (st < 0 )
438       {
439         m_lastError = "Empty row found during numerical factorization";
440         m_info = InvalidInput;
441         return;
442       }
443 
444       // Traverse the etree
445       Index bi = nzcolR;
446       for (; mark(st) != col; st = m_etree(st))
447       {
448         Ridx(nzcolR) = st;  // Add this row to the list,
449         mark(st) = col;     // and mark this row as visited
450         nzcolR++;
451       }
452 
453       // Reverse the list to get the topological ordering
454       Index nt = nzcolR-bi;
455       for(Index i = 0; i < nt/2; i++) std::swap(Ridx(bi+i), Ridx(nzcolR-i-1));
456 
457       // Copy the current (curIdx,pcol) value of the input matrix
458       if(itp) tval(curIdx) = itp.value();
459       else    tval(curIdx) = Scalar(0);
460 
461       // Compute the pattern of Q(:,k)
462       if(curIdx > nonzeroCol && mark(curIdx) != col )
463       {
464         Qidx(nzcolQ) = curIdx;  // Add this row to the pattern of Q,
465         mark(curIdx) = col;     // and mark it as visited
466         nzcolQ++;
467       }
468     }
469 
470     // Browse all the indexes of R(:,col) in reverse order
471     for (Index i = nzcolR-1; i >= 0; i--)
472     {
473       Index curIdx = Ridx(i);
474 
475       // Apply the curIdx-th householder vector to the current column (temporarily stored into tval)
476       Scalar tdot(0);
477 
478       // First compute q' * tval
479       tdot = m_Q.col(curIdx).dot(tval);
480 
481       tdot *= m_hcoeffs(curIdx);
482 
483       // Then update tval = tval - q * tau
484       // FIXME: tval -= tdot * m_Q.col(curIdx) should amount to the same (need to check/add support for efficient "dense ?= sparse")
485       for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
486         tval(itq.row()) -= itq.value() * tdot;
487 
488       // Detect fill-in for the current column of Q
489       if(m_etree(Ridx(i)) == nonzeroCol)
490       {
491         for (typename QRMatrixType::InnerIterator itq(m_Q, curIdx); itq; ++itq)
492         {
493           StorageIndex iQ = StorageIndex(itq.row());
494           if (mark(iQ) != col)
495           {
496             Qidx(nzcolQ++) = iQ;  // Add this row to the pattern of Q,
497             mark(iQ) = col;       // and mark it as visited
498           }
499         }
500       }
501     } // End update current column
502 
503     Scalar tau = RealScalar(0);
504     RealScalar beta = 0;
505 
506     if(nonzeroCol < diagSize)
507     {
508       // Compute the Householder reflection that eliminate the current column
509       // FIXME this step should call the Householder module.
510       Scalar c0 = nzcolQ ? tval(Qidx(0)) : Scalar(0);
511 
512       // First, the squared norm of Q((col+1):m, col)
513       RealScalar sqrNorm = 0.;
514       for (Index itq = 1; itq < nzcolQ; ++itq) sqrNorm += numext::abs2(tval(Qidx(itq)));
515       if(sqrNorm == RealScalar(0) && numext::imag(c0) == RealScalar(0))
516       {
517         beta = numext::real(c0);
518         tval(Qidx(0)) = 1;
519       }
520       else
521       {
522         using std::sqrt;
523         beta = sqrt(numext::abs2(c0) + sqrNorm);
524         if(numext::real(c0) >= RealScalar(0))
525           beta = -beta;
526         tval(Qidx(0)) = 1;
527         for (Index itq = 1; itq < nzcolQ; ++itq)
528           tval(Qidx(itq)) /= (c0 - beta);
529         tau = numext::conj((beta-c0) / beta);
530 
531       }
532     }
533 
534     // Insert values in R
535     for (Index  i = nzcolR-1; i >= 0; i--)
536     {
537       Index curIdx = Ridx(i);
538       if(curIdx < nonzeroCol)
539       {
540         m_R.insertBackByOuterInnerUnordered(col, curIdx) = tval(curIdx);
541         tval(curIdx) = Scalar(0.);
542       }
543     }
544 
545     if(nonzeroCol < diagSize && abs(beta) >= pivotThreshold)
546     {
547       m_R.insertBackByOuterInner(col, nonzeroCol) = beta;
548       // The householder coefficient
549       m_hcoeffs(nonzeroCol) = tau;
550       // Record the householder reflections
551       for (Index itq = 0; itq < nzcolQ; ++itq)
552       {
553         Index iQ = Qidx(itq);
554         m_Q.insertBackByOuterInnerUnordered(nonzeroCol,iQ) = tval(iQ);
555         tval(iQ) = Scalar(0.);
556       }
557       nonzeroCol++;
558       if(nonzeroCol<diagSize)
559         m_Q.startVec(nonzeroCol);
560     }
561     else
562     {
563       // Zero pivot found: move implicitly this column to the end
564       for (Index j = nonzeroCol; j < n-1; j++)
565         std::swap(m_pivotperm.indices()(j), m_pivotperm.indices()[j+1]);
566 
567       // Recompute the column elimination tree
568       internal::coletree(m_pmat, m_etree, m_firstRowElt, m_pivotperm.indices().data());
569       m_isEtreeOk = false;
570     }
571   }
572 
573   m_hcoeffs.tail(diagSize-nonzeroCol).setZero();
574 
575   // Finalize the column pointers of the sparse matrices R and Q
576   m_Q.finalize();
577   m_Q.makeCompressed();
578   m_R.finalize();
579   m_R.makeCompressed();
580   m_isQSorted = false;
581 
582   m_nonzeropivots = nonzeroCol;
583 
584   if(nonzeroCol<n)
585   {
586     // Permute the triangular factor to put the 'dead' columns to the end
587     QRMatrixType tempR(m_R);
588     m_R = tempR * m_pivotperm;
589 
590     // Update the column permutation
591     m_outputPerm_c = m_outputPerm_c * m_pivotperm;
592   }
593 
594   m_isInitialized = true;
595   m_factorizationIsok = true;
596   m_info = Success;
597 }
598 
599 template <typename SparseQRType, typename Derived>
600 struct SparseQR_QProduct : ReturnByValue<SparseQR_QProduct<SparseQRType, Derived> >
601 {
602   typedef typename SparseQRType::QRMatrixType MatrixType;
603   typedef typename SparseQRType::Scalar Scalar;
604   // Get the references
605   SparseQR_QProduct(const SparseQRType& qr, const Derived& other, bool transpose) :
606   m_qr(qr),m_other(other),m_transpose(transpose) {}
607   inline Index rows() const { return m_transpose ? m_qr.rows() : m_qr.cols(); }
608   inline Index cols() const { return m_other.cols(); }
609 
610   // Assign to a vector
611   template<typename DesType>
612   void evalTo(DesType& res) const
613   {
614     Index m = m_qr.rows();
615     Index n = m_qr.cols();
616     Index diagSize = (std::min)(m,n);
617     res = m_other;
618     if (m_transpose)
619     {
620       eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
621       //Compute res = Q' * other column by column
622       for(Index j = 0; j < res.cols(); j++){
623         for (Index k = 0; k < diagSize; k++)
624         {
625           Scalar tau = Scalar(0);
626           tau = m_qr.m_Q.col(k).dot(res.col(j));
627           if(tau==Scalar(0)) continue;
628           tau = tau * m_qr.m_hcoeffs(k);
629           res.col(j) -= tau * m_qr.m_Q.col(k);
630         }
631       }
632     }
633     else
634     {
635       eigen_assert(m_qr.m_Q.rows() == m_other.rows() && "Non conforming object sizes");
636       // Compute res = Q * other column by column
637       for(Index j = 0; j < res.cols(); j++)
638       {
639         for (Index k = diagSize-1; k >=0; k--)
640         {
641           Scalar tau = Scalar(0);
642           tau = m_qr.m_Q.col(k).dot(res.col(j));
643           if(tau==Scalar(0)) continue;
644           tau = tau * m_qr.m_hcoeffs(k);
645           res.col(j) -= tau * m_qr.m_Q.col(k);
646         }
647       }
648     }
649   }
650 
651   const SparseQRType& m_qr;
652   const Derived& m_other;
653   bool m_transpose;
654 };
655 
656 template<typename SparseQRType>
657 struct SparseQRMatrixQReturnType : public EigenBase<SparseQRMatrixQReturnType<SparseQRType> >
658 {
659   typedef typename SparseQRType::Scalar Scalar;
660   typedef Matrix<Scalar,Dynamic,Dynamic> DenseMatrix;
661   enum {
662     RowsAtCompileTime = Dynamic,
663     ColsAtCompileTime = Dynamic
664   };
665   explicit SparseQRMatrixQReturnType(const SparseQRType& qr) : m_qr(qr) {}
666   template<typename Derived>
667   SparseQR_QProduct<SparseQRType, Derived> operator*(const MatrixBase<Derived>& other)
668   {
669     return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(),false);
670   }
671   SparseQRMatrixQTransposeReturnType<SparseQRType> adjoint() const
672   {
673     return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
674   }
675   inline Index rows() const { return m_qr.rows(); }
676   inline Index cols() const { return (std::min)(m_qr.rows(),m_qr.cols()); }
677   // To use for operations with the transpose of Q
678   SparseQRMatrixQTransposeReturnType<SparseQRType> transpose() const
679   {
680     return SparseQRMatrixQTransposeReturnType<SparseQRType>(m_qr);
681   }
682   const SparseQRType& m_qr;
683 };
684 
685 template<typename SparseQRType>
686 struct SparseQRMatrixQTransposeReturnType
687 {
688   explicit SparseQRMatrixQTransposeReturnType(const SparseQRType& qr) : m_qr(qr) {}
689   template<typename Derived>
690   SparseQR_QProduct<SparseQRType,Derived> operator*(const MatrixBase<Derived>& other)
691   {
692     return SparseQR_QProduct<SparseQRType,Derived>(m_qr,other.derived(), true);
693   }
694   const SparseQRType& m_qr;
695 };
696 
697 namespace internal {
698 
699 template<typename SparseQRType>
700 struct evaluator_traits<SparseQRMatrixQReturnType<SparseQRType> >
701 {
702   typedef typename SparseQRType::MatrixType MatrixType;
703   typedef typename storage_kind_to_evaluator_kind<typename MatrixType::StorageKind>::Kind Kind;
704   typedef SparseShape Shape;
705 };
706 
707 template< typename DstXprType, typename SparseQRType>
708 struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Sparse>
709 {
710   typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
711   typedef typename DstXprType::Scalar Scalar;
712   typedef typename DstXprType::StorageIndex StorageIndex;
713   static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
714   {
715     typename DstXprType::PlainObject idMat(src.m_qr.rows(), src.m_qr.rows());
716     idMat.setIdentity();
717     // Sort the sparse householder reflectors if needed
718     const_cast<SparseQRType *>(&src.m_qr)->_sort_matrix_Q();
719     dst = SparseQR_QProduct<SparseQRType, DstXprType>(src.m_qr, idMat, false);
720   }
721 };
722 
723 template< typename DstXprType, typename SparseQRType>
724 struct Assignment<DstXprType, SparseQRMatrixQReturnType<SparseQRType>, internal::assign_op<typename DstXprType::Scalar,typename DstXprType::Scalar>, Sparse2Dense>
725 {
726   typedef SparseQRMatrixQReturnType<SparseQRType> SrcXprType;
727   typedef typename DstXprType::Scalar Scalar;
728   typedef typename DstXprType::StorageIndex StorageIndex;
729   static void run(DstXprType &dst, const SrcXprType &src, const internal::assign_op<Scalar,Scalar> &/*func*/)
730   {
731     dst = src.m_qr.matrixQ() * DstXprType::Identity(src.m_qr.rows(), src.m_qr.rows());
732   }
733 };
734 
735 } // end namespace internal
736 
737 } // end namespace Eigen
738 
739 #endif
740