• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3 // http://code.google.com/p/ceres-solver/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 //   this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 //   this list of conditions and the following disclaimer in the documentation
12 //   and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 //   used to endorse or promote products derived from this software without
15 //   specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: sameeragarwal@google.com (Sameer Agarwal)
30 
31 #ifndef CERES_NO_SUITESPARSE
32 #include "ceres/suitesparse.h"
33 
34 #include <vector>
35 #include "cholmod.h"
36 #include "ceres/compressed_row_sparse_matrix.h"
37 #include "ceres/triplet_sparse_matrix.h"
38 namespace ceres {
39 namespace internal {
CreateSparseMatrix(TripletSparseMatrix * A)40 cholmod_sparse* SuiteSparse::CreateSparseMatrix(TripletSparseMatrix* A) {
41   cholmod_triplet triplet;
42 
43   triplet.nrow = A->num_rows();
44   triplet.ncol = A->num_cols();
45   triplet.nzmax = A->max_num_nonzeros();
46   triplet.nnz = A->num_nonzeros();
47   triplet.i = reinterpret_cast<void*>(A->mutable_rows());
48   triplet.j = reinterpret_cast<void*>(A->mutable_cols());
49   triplet.x = reinterpret_cast<void*>(A->mutable_values());
50   triplet.stype = 0;  // Matrix is not symmetric.
51   triplet.itype = CHOLMOD_INT;
52   triplet.xtype = CHOLMOD_REAL;
53   triplet.dtype = CHOLMOD_DOUBLE;
54 
55   return cholmod_triplet_to_sparse(&triplet, triplet.nnz, &cc_);
56 }
57 
58 
CreateSparseMatrixTranspose(TripletSparseMatrix * A)59 cholmod_sparse* SuiteSparse::CreateSparseMatrixTranspose(
60     TripletSparseMatrix* A) {
61   cholmod_triplet triplet;
62 
63   triplet.ncol = A->num_rows();  // swap row and columns
64   triplet.nrow = A->num_cols();
65   triplet.nzmax = A->max_num_nonzeros();
66   triplet.nnz = A->num_nonzeros();
67 
68   // swap rows and columns
69   triplet.j = reinterpret_cast<void*>(A->mutable_rows());
70   triplet.i = reinterpret_cast<void*>(A->mutable_cols());
71   triplet.x = reinterpret_cast<void*>(A->mutable_values());
72   triplet.stype = 0;  // Matrix is not symmetric.
73   triplet.itype = CHOLMOD_INT;
74   triplet.xtype = CHOLMOD_REAL;
75   triplet.dtype = CHOLMOD_DOUBLE;
76 
77   return cholmod_triplet_to_sparse(&triplet, triplet.nnz, &cc_);
78 }
79 
CreateSparseMatrixTransposeView(CompressedRowSparseMatrix * A)80 cholmod_sparse* SuiteSparse::CreateSparseMatrixTransposeView(
81     CompressedRowSparseMatrix* A) {
82   cholmod_sparse* m = new cholmod_sparse_struct;
83   m->nrow = A->num_cols();
84   m->ncol = A->num_rows();
85   m->nzmax = A->num_nonzeros();
86 
87   m->p = reinterpret_cast<void*>(A->mutable_rows());
88   m->i = reinterpret_cast<void*>(A->mutable_cols());
89   m->x = reinterpret_cast<void*>(A->mutable_values());
90 
91   m->stype = 0;  // Matrix is not symmetric.
92   m->itype = CHOLMOD_INT;
93   m->xtype = CHOLMOD_REAL;
94   m->dtype = CHOLMOD_DOUBLE;
95   m->sorted = 1;
96   m->packed = 1;
97 
98   return m;
99 }
100 
CreateDenseVector(const double * x,int in_size,int out_size)101 cholmod_dense* SuiteSparse::CreateDenseVector(const double* x,
102                                               int in_size,
103                                               int out_size) {
104     CHECK_LE(in_size, out_size);
105     cholmod_dense* v = cholmod_zeros(out_size, 1, CHOLMOD_REAL, &cc_);
106     if (x != NULL) {
107       memcpy(v->x, x, in_size*sizeof(*x));
108     }
109     return v;
110 }
111 
AnalyzeCholesky(cholmod_sparse * A)112 cholmod_factor* SuiteSparse::AnalyzeCholesky(cholmod_sparse* A) {
113   // Cholmod can try multiple re-ordering strategies to find a fill
114   // reducing ordering. Here we just tell it use AMD with automatic
115   // matrix dependence choice of supernodal versus simplicial
116   // factorization.
117   cc_.nmethods = 1;
118   cc_.method[0].ordering = CHOLMOD_AMD;
119   cc_.supernodal = CHOLMOD_AUTO;
120   cholmod_factor* factor = cholmod_analyze(A, &cc_);
121   CHECK_EQ(cc_.status, CHOLMOD_OK)
122       << "Cholmod symbolic analysis failed " << cc_.status;
123   CHECK_NOTNULL(factor);
124   return factor;
125 }
126 
BlockAnalyzeCholesky(cholmod_sparse * A,const vector<int> & row_blocks,const vector<int> & col_blocks)127 cholmod_factor* SuiteSparse::BlockAnalyzeCholesky(
128     cholmod_sparse* A,
129     const vector<int>& row_blocks,
130     const vector<int>& col_blocks) {
131   vector<int> ordering;
132   if (!BlockAMDOrdering(A, row_blocks, col_blocks, &ordering)) {
133     return NULL;
134   }
135   return AnalyzeCholeskyWithUserOrdering(A, ordering);
136 }
137 
AnalyzeCholeskyWithUserOrdering(cholmod_sparse * A,const vector<int> & ordering)138 cholmod_factor* SuiteSparse::AnalyzeCholeskyWithUserOrdering(cholmod_sparse* A,
139                                                              const vector<int>& ordering) {
140   CHECK_EQ(ordering.size(), A->nrow);
141   cc_.nmethods = 1 ;
142   cc_.method[0].ordering = CHOLMOD_GIVEN;
143   cholmod_factor* factor  =
144       cholmod_analyze_p(A, const_cast<int*>(&ordering[0]), NULL, 0, &cc_);
145   CHECK_EQ(cc_.status, CHOLMOD_OK)
146       << "Cholmod symbolic analysis failed " << cc_.status;
147   CHECK_NOTNULL(factor);
148   return factor;
149 }
150 
BlockAMDOrdering(const cholmod_sparse * A,const vector<int> & row_blocks,const vector<int> & col_blocks,vector<int> * ordering)151 bool SuiteSparse::BlockAMDOrdering(const cholmod_sparse* A,
152                                    const vector<int>& row_blocks,
153                                    const vector<int>& col_blocks,
154                                    vector<int>* ordering) {
155   const int num_row_blocks = row_blocks.size();
156   const int num_col_blocks = col_blocks.size();
157 
158   // Arrays storing the compressed column structure of the matrix
159   // incoding the block sparsity of A.
160   vector<int> block_cols;
161   vector<int> block_rows;
162 
163   ScalarMatrixToBlockMatrix(A,
164                             row_blocks,
165                             col_blocks,
166                             &block_rows,
167                             &block_cols);
168 
169   cholmod_sparse_struct block_matrix;
170   block_matrix.nrow = num_row_blocks;
171   block_matrix.ncol = num_col_blocks;
172   block_matrix.nzmax = block_rows.size();
173   block_matrix.p = reinterpret_cast<void*>(&block_cols[0]);
174   block_matrix.i = reinterpret_cast<void*>(&block_rows[0]);
175   block_matrix.x = NULL;
176   block_matrix.stype = A->stype;
177   block_matrix.itype = CHOLMOD_INT;
178   block_matrix.xtype = CHOLMOD_PATTERN;
179   block_matrix.dtype = CHOLMOD_DOUBLE;
180   block_matrix.sorted = 1;
181   block_matrix.packed = 1;
182 
183   vector<int> block_ordering(num_row_blocks);
184   if (!cholmod_amd(&block_matrix, NULL, 0, &block_ordering[0], &cc_)) {
185     return false;
186   }
187 
188   BlockOrderingToScalarOrdering(row_blocks, block_ordering, ordering);
189   return true;
190 }
191 
ScalarMatrixToBlockMatrix(const cholmod_sparse * A,const vector<int> & row_blocks,const vector<int> & col_blocks,vector<int> * block_rows,vector<int> * block_cols)192 void SuiteSparse::ScalarMatrixToBlockMatrix(const cholmod_sparse* A,
193                                             const vector<int>& row_blocks,
194                                             const vector<int>& col_blocks,
195                                             vector<int>* block_rows,
196                                             vector<int>* block_cols) {
197   CHECK_NOTNULL(block_rows)->clear();
198   CHECK_NOTNULL(block_cols)->clear();
199   const int num_row_blocks = row_blocks.size();
200   const int num_col_blocks = col_blocks.size();
201 
202   vector<int> row_block_starts(num_row_blocks);
203   for (int i = 0, cursor = 0; i < num_row_blocks; ++i) {
204     row_block_starts[i] = cursor;
205     cursor += row_blocks[i];
206   }
207 
208   // The reinterpret_cast is needed here because CHOLMOD stores arrays
209   // as void*.
210   const int* scalar_cols =  reinterpret_cast<const int*>(A->p);
211   const int* scalar_rows =  reinterpret_cast<const int*>(A->i);
212 
213   // This loop extracts the block sparsity of the scalar sparse matrix
214   // A. It does so by iterating over the columns, but only considering
215   // the columns corresponding to the first element of each column
216   // block. Within each column, the inner loop iterates over the rows,
217   // and detects the presence of a row block by checking for the
218   // presence of a non-zero entry corresponding to its first element.
219   block_cols->push_back(0);
220   int c = 0;
221   for (int col_block = 0; col_block < num_col_blocks; ++col_block) {
222     int column_size = 0;
223     for (int idx = scalar_cols[c]; idx < scalar_cols[c + 1]; ++idx) {
224       vector<int>::const_iterator it = lower_bound(row_block_starts.begin(),
225                                                    row_block_starts.end(),
226                                                    scalar_rows[idx]);
227       // Since we are using lower_bound, it will return the row id
228       // where the row block starts. For everything but the first row
229       // of the block, where these values will be the same, we can
230       // skip, as we only need the first row to detect the presence of
231       // the block.
232       //
233       // For rows all but the first row in the last row block,
234       // lower_bound will return row_block_starts.end(), but those can
235       // be skipped like the rows in other row blocks too.
236       if (it == row_block_starts.end() || *it != scalar_rows[idx]) {
237         continue;
238       }
239 
240       block_rows->push_back(it - row_block_starts.begin());
241       ++column_size;
242     }
243     block_cols->push_back(block_cols->back() + column_size);
244     c += col_blocks[col_block];
245   }
246 }
247 
BlockOrderingToScalarOrdering(const vector<int> & blocks,const vector<int> & block_ordering,vector<int> * scalar_ordering)248 void SuiteSparse::BlockOrderingToScalarOrdering(
249     const vector<int>& blocks,
250     const vector<int>& block_ordering,
251     vector<int>* scalar_ordering) {
252   CHECK_EQ(blocks.size(), block_ordering.size());
253   const int num_blocks = blocks.size();
254 
255   // block_starts = [0, block1, block1 + block2 ..]
256   vector<int> block_starts(num_blocks);
257   for (int i = 0, cursor = 0; i < num_blocks ; ++i) {
258     block_starts[i] = cursor;
259     cursor += blocks[i];
260   }
261 
262   scalar_ordering->resize(block_starts.back() + blocks.back());
263   int cursor = 0;
264   for (int i = 0; i < num_blocks; ++i) {
265     const int block_id = block_ordering[i];
266     const int block_size = blocks[block_id];
267     int block_position = block_starts[block_id];
268     for (int j = 0; j < block_size; ++j) {
269       (*scalar_ordering)[cursor++] = block_position++;
270     }
271   }
272 }
273 
Cholesky(cholmod_sparse * A,cholmod_factor * L)274 bool SuiteSparse::Cholesky(cholmod_sparse* A, cholmod_factor* L) {
275   CHECK_NOTNULL(A);
276   CHECK_NOTNULL(L);
277 
278   cc_.quick_return_if_not_posdef = 1;
279   int status = cholmod_factorize(A, L, &cc_);
280   switch (cc_.status) {
281     case CHOLMOD_NOT_INSTALLED:
282       LOG(WARNING) << "Cholmod failure: method not installed.";
283       return false;
284     case CHOLMOD_OUT_OF_MEMORY:
285       LOG(WARNING) << "Cholmod failure: out of memory.";
286       return false;
287     case CHOLMOD_TOO_LARGE:
288       LOG(WARNING) << "Cholmod failure: integer overflow occured.";
289       return false;
290     case CHOLMOD_INVALID:
291       LOG(WARNING) << "Cholmod failure: invalid input.";
292       return false;
293     case CHOLMOD_NOT_POSDEF:
294       // TODO(sameeragarwal): These two warnings require more
295       // sophisticated handling going forward. For now we will be
296       // strict and treat them as failures.
297       LOG(WARNING) << "Cholmod warning: matrix not positive definite.";
298       return false;
299     case CHOLMOD_DSMALL:
300       LOG(WARNING) << "Cholmod warning: D for LDL' or diag(L) or "
301                    << "LL' has tiny absolute value.";
302       return false;
303     case CHOLMOD_OK:
304       if (status != 0) {
305         return true;
306       }
307       LOG(WARNING) << "Cholmod failure: cholmod_factorize returned zero "
308                    << "but cholmod_common::status is CHOLMOD_OK."
309                    << "Please report this to ceres-solver@googlegroups.com.";
310       return false;
311     default:
312       LOG(WARNING) << "Unknown cholmod return code. "
313                    << "Please report this to ceres-solver@googlegroups.com.";
314       return false;
315   }
316   return false;
317 }
318 
Solve(cholmod_factor * L,cholmod_dense * b)319 cholmod_dense* SuiteSparse::Solve(cholmod_factor* L,
320                                   cholmod_dense* b) {
321   if (cc_.status != CHOLMOD_OK) {
322     LOG(WARNING) << "CHOLMOD status NOT OK";
323     return NULL;
324   }
325 
326   return cholmod_solve(CHOLMOD_A, L, b, &cc_);
327 }
328 
SolveCholesky(cholmod_sparse * A,cholmod_factor * L,cholmod_dense * b)329 cholmod_dense* SuiteSparse::SolveCholesky(cholmod_sparse* A,
330                                           cholmod_factor* L,
331                                           cholmod_dense* b) {
332   CHECK_NOTNULL(A);
333   CHECK_NOTNULL(L);
334   CHECK_NOTNULL(b);
335 
336   if (Cholesky(A, L)) {
337     return Solve(L, b);
338   }
339 
340   return NULL;
341 }
342 
343 }  // namespace internal
344 }  // namespace ceres
345 
346 #endif  // CERES_NO_SUITESPARSE
347