1 // Ceres Solver - A fast non-linear least squares minimizer 2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved. 3 // http://code.google.com/p/ceres-solver/ 4 // 5 // Redistribution and use in source and binary forms, with or without 6 // modification, are permitted provided that the following conditions are met: 7 // 8 // * Redistributions of source code must retain the above copyright notice, 9 // this list of conditions and the following disclaimer. 10 // * Redistributions in binary form must reproduce the above copyright notice, 11 // this list of conditions and the following disclaimer in the documentation 12 // and/or other materials provided with the distribution. 13 // * Neither the name of Google Inc. nor the names of its contributors may be 14 // used to endorse or promote products derived from this software without 15 // specific prior written permission. 16 // 17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE 21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF 23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS 24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN 25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) 26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE 27 // POSSIBILITY OF SUCH DAMAGE. 28 // 29 // Author: sameeragarwal@google.com (Sameer Agarwal) 30 // 31 // A simple C++ interface to the SuiteSparse and CHOLMOD libraries. 32 33 #ifndef CERES_INTERNAL_SUITESPARSE_H_ 34 #define CERES_INTERNAL_SUITESPARSE_H_ 35 36 #ifndef CERES_NO_SUITESPARSE 37 38 #include <cstring> 39 #include <string> 40 #include <vector> 41 42 #include <glog/logging.h> 43 #include "cholmod.h" 44 #include "ceres/internal/port.h" 45 46 namespace ceres { 47 namespace internal { 48 49 class CompressedRowSparseMatrix; 50 class TripletSparseMatrix; 51 52 // The raw CHOLMOD and SuiteSparseQR libraries have a slightly 53 // cumbersome c like calling format. This object abstracts it away and 54 // provides the user with a simpler interface. The methods here cannot 55 // be static as a cholmod_common object serves as a global variable 56 // for all cholmod function calls. 57 class SuiteSparse { 58 public: SuiteSparse()59 SuiteSparse() { cholmod_start(&cc_); } ~SuiteSparse()60 ~SuiteSparse() { cholmod_finish(&cc_); } 61 62 // Functions for building cholmod_sparse objects from sparse 63 // matrices stored in triplet form. The matrix A is not 64 // modifed. Called owns the result. 65 cholmod_sparse* CreateSparseMatrix(TripletSparseMatrix* A); 66 67 // This function works like CreateSparseMatrix, except that the 68 // return value corresponds to A' rather than A. 69 cholmod_sparse* CreateSparseMatrixTranspose(TripletSparseMatrix* A); 70 71 // Create a cholmod_sparse wrapper around the contents of A. This is 72 // a shallow object, which refers to the contents of A and does not 73 // use the SuiteSparse machinery to allocate memory, this object 74 // should be disposed off with a delete and not a call to Free as is 75 // the case for objects returned by CreateSparseMatrixTranspose. 76 cholmod_sparse* CreateSparseMatrixTransposeView(CompressedRowSparseMatrix* A); 77 78 // Given a vector x, build a cholmod_dense vector of size out_size 79 // with the first in_size entries copied from x. If x is NULL, then 80 // an all zeros vector is returned. Caller owns the result. 81 cholmod_dense* CreateDenseVector(const double* x, int in_size, int out_size); 82 83 // The matrix A is scaled using the matrix whose diagonal is the 84 // vector scale. mode describes how scaling is applied. Possible 85 // values are CHOLMOD_ROW for row scaling - diag(scale) * A, 86 // CHOLMOD_COL for column scaling - A * diag(scale) and CHOLMOD_SYM 87 // for symmetric scaling which scales both the rows and the columns 88 // - diag(scale) * A * diag(scale). Scale(cholmod_dense * scale,int mode,cholmod_sparse * A)89 void Scale(cholmod_dense* scale, int mode, cholmod_sparse* A) { 90 cholmod_scale(scale, mode, A, &cc_); 91 } 92 93 // Create and return a matrix m = A * A'. Caller owns the 94 // result. The matrix A is not modified. AATranspose(cholmod_sparse * A)95 cholmod_sparse* AATranspose(cholmod_sparse* A) { 96 cholmod_sparse*m = cholmod_aat(A, NULL, A->nrow, 1, &cc_); 97 m->stype = 1; // Pay attention to the upper triangular part. 98 return m; 99 } 100 101 // y = alpha * A * x + beta * y. Only y is modified. SparseDenseMultiply(cholmod_sparse * A,double alpha,double beta,cholmod_dense * x,cholmod_dense * y)102 void SparseDenseMultiply(cholmod_sparse* A, double alpha, double beta, 103 cholmod_dense* x, cholmod_dense* y) { 104 double alpha_[2] = {alpha, 0}; 105 double beta_[2] = {beta, 0}; 106 cholmod_sdmult(A, 0, alpha_, beta_, x, y, &cc_); 107 } 108 109 // Find an ordering of A or AA' (if A is unsymmetric) that minimizes 110 // the fill-in in the Cholesky factorization of the corresponding 111 // matrix. This is done by using the AMD algorithm. 112 // 113 // Using this ordering, the symbolic Cholesky factorization of A (or 114 // AA') is computed and returned. 115 // 116 // A is not modified, only the pattern of non-zeros of A is used, 117 // the actual numerical values in A are of no consequence. 118 // 119 // Caller owns the result. 120 cholmod_factor* AnalyzeCholesky(cholmod_sparse* A); 121 122 cholmod_factor* BlockAnalyzeCholesky(cholmod_sparse* A, 123 const vector<int>& row_blocks, 124 const vector<int>& col_blocks); 125 126 // If A is symmetric, then compute the symbolic Cholesky 127 // factorization of A(ordering, ordering). If A is unsymmetric, then 128 // compute the symbolic factorization of 129 // A(ordering,:) A(ordering,:)'. 130 // 131 // A is not modified, only the pattern of non-zeros of A is used, 132 // the actual numerical values in A are of no consequence. 133 // 134 // Caller owns the result. 135 cholmod_factor* AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A, 136 const vector<int>& ordering); 137 138 // Use the symbolic factorization in L, to find the numerical 139 // factorization for the matrix A or AA^T. Return true if 140 // successful, false otherwise. L contains the numeric factorization 141 // on return. 142 bool Cholesky(cholmod_sparse* A, cholmod_factor* L); 143 144 // Given a Cholesky factorization of a matrix A = LL^T, solve the 145 // linear system Ax = b, and return the result. If the Solve fails 146 // NULL is returned. Caller owns the result. 147 cholmod_dense* Solve(cholmod_factor* L, cholmod_dense* b); 148 149 // Combine the calls to Cholesky and Solve into a single call. If 150 // the cholesky factorization or the solve fails, return 151 // NULL. Caller owns the result. 152 cholmod_dense* SolveCholesky(cholmod_sparse* A, 153 cholmod_factor* L, 154 cholmod_dense* b); 155 156 // By virtue of the modeling layer in Ceres being block oriented, 157 // all the matrices used by Ceres are also block oriented. When 158 // doing sparse direct factorization of these matrices the 159 // fill-reducing ordering algorithms (in particular AMD) can either 160 // be run on the block or the scalar form of these matrices. The two 161 // SuiteSparse::AnalyzeCholesky methods allows the the client to 162 // compute the symbolic factorization of a matrix by either using 163 // AMD on the matrix or a user provided ordering of the rows. 164 // 165 // But since the underlying matrices are block oriented, it is worth 166 // running AMD on just the block structre of these matrices and then 167 // lifting these block orderings to a full scalar ordering. This 168 // preserves the block structure of the permuted matrix, and exposes 169 // more of the super-nodal structure of the matrix to the numerical 170 // factorization routines. 171 // 172 // Find the block oriented AMD ordering of a matrix A, whose row and 173 // column blocks are given by row_blocks, and col_blocks 174 // respectively. The matrix may or may not be symmetric. The entries 175 // of col_blocks do not need to sum to the number of columns in 176 // A. If this is the case, only the first sum(col_blocks) are used 177 // to compute the ordering. 178 bool BlockAMDOrdering(const cholmod_sparse* A, 179 const vector<int>& row_blocks, 180 const vector<int>& col_blocks, 181 vector<int>* ordering); 182 183 // Given a set of blocks and a permutation of these blocks, compute 184 // the corresponding "scalar" ordering, where the scalar ordering of 185 // size sum(blocks). 186 static void BlockOrderingToScalarOrdering(const vector<int>& blocks, 187 const vector<int>& block_ordering, 188 vector<int>* scalar_ordering); 189 190 // Extract the block sparsity pattern of the scalar sparse matrix 191 // A and return it in compressed column form. The compressed column 192 // form is stored in two vectors block_rows, and block_cols, which 193 // correspond to the row and column arrays in a compressed column sparse 194 // matrix. 195 // 196 // If c_ij is the block in the matrix A corresponding to row block i 197 // and column block j, then it is expected that A contains at least 198 // one non-zero entry corresponding to the top left entry of c_ij, 199 // as that entry is used to detect the presence of a non-zero c_ij. 200 static void ScalarMatrixToBlockMatrix(const cholmod_sparse* A, 201 const vector<int>& row_blocks, 202 const vector<int>& col_blocks, 203 vector<int>* block_rows, 204 vector<int>* block_cols); 205 Free(cholmod_sparse * m)206 void Free(cholmod_sparse* m) { cholmod_free_sparse(&m, &cc_); } Free(cholmod_dense * m)207 void Free(cholmod_dense* m) { cholmod_free_dense(&m, &cc_); } Free(cholmod_factor * m)208 void Free(cholmod_factor* m) { cholmod_free_factor(&m, &cc_); } 209 Print(cholmod_sparse * m,const string & name)210 void Print(cholmod_sparse* m, const string& name) { 211 cholmod_print_sparse(m, const_cast<char*>(name.c_str()), &cc_); 212 } 213 Print(cholmod_dense * m,const string & name)214 void Print(cholmod_dense* m, const string& name) { 215 cholmod_print_dense(m, const_cast<char*>(name.c_str()), &cc_); 216 } 217 Print(cholmod_triplet * m,const string & name)218 void Print(cholmod_triplet* m, const string& name) { 219 cholmod_print_triplet(m, const_cast<char*>(name.c_str()), &cc_); 220 } 221 mutable_cc()222 cholmod_common* mutable_cc() { return &cc_; } 223 224 private: 225 cholmod_common cc_; 226 }; 227 228 } // namespace internal 229 } // namespace ceres 230 231 #endif // CERES_NO_SUITESPARSE 232 233 #endif // CERES_INTERNAL_SUITESPARSE_H_ 234