• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3 // http://code.google.com/p/ceres-solver/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 //   this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 //   this list of conditions and the following disclaimer in the documentation
12 //   and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 //   used to endorse or promote products derived from this software without
15 //   specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: sameeragarwal@google.com (Sameer Agarwal)
30 
31 #ifndef CERES_PUBLIC_SOLVER_H_
32 #define CERES_PUBLIC_SOLVER_H_
33 
34 #include <cmath>
35 #include <string>
36 #include <vector>
37 #include "ceres/crs_matrix.h"
38 #include "ceres/internal/macros.h"
39 #include "ceres/internal/port.h"
40 #include "ceres/iteration_callback.h"
41 #include "ceres/ordered_groups.h"
42 #include "ceres/types.h"
43 
44 namespace ceres {
45 
46 class Problem;
47 
48 // Interface for non-linear least squares solvers.
49 class Solver {
50  public:
51   virtual ~Solver();
52 
53   // The options structure contains, not surprisingly, options that control how
54   // the solver operates. The defaults should be suitable for a wide range of
55   // problems; however, better performance is often obtainable with tweaking.
56   //
57   // The constants are defined inside types.h
58   struct Options {
59     // Default constructor that sets up a generic sparse problem.
OptionsOptions60     Options() {
61       trust_region_strategy_type = LEVENBERG_MARQUARDT;
62       dogleg_type = TRADITIONAL_DOGLEG;
63       use_nonmonotonic_steps = false;
64       max_consecutive_nonmonotonic_steps = 5;
65       max_num_iterations = 50;
66       max_solver_time_in_seconds = 1e9;
67       num_threads = 1;
68       initial_trust_region_radius = 1e4;
69       max_trust_region_radius = 1e16;
70       min_trust_region_radius = 1e-32;
71       min_relative_decrease = 1e-3;
72       lm_min_diagonal = 1e-6;
73       lm_max_diagonal = 1e32;
74       max_num_consecutive_invalid_steps = 5;
75       function_tolerance = 1e-6;
76       gradient_tolerance = 1e-10;
77       parameter_tolerance = 1e-8;
78 
79 #if defined(CERES_NO_SUITESPARSE) && defined(CERES_NO_CXSPARSE)
80       linear_solver_type = DENSE_QR;
81 #else
82       linear_solver_type = SPARSE_NORMAL_CHOLESKY;
83 #endif
84 
85       preconditioner_type = JACOBI;
86 
87       sparse_linear_algebra_library = SUITE_SPARSE;
88 #if defined(CERES_NO_SUITESPARSE) && !defined(CERES_NO_CXSPARSE)
89       sparse_linear_algebra_library = CX_SPARSE;
90 #endif
91 
92       num_linear_solver_threads = 1;
93 
94 #if defined(CERES_NO_SUITESPARSE)
95       use_block_amd = false;
96 #else
97       use_block_amd = true;
98 #endif
99       linear_solver_ordering = NULL;
100       use_inner_iterations = false;
101       inner_iteration_ordering = NULL;
102       linear_solver_min_num_iterations = 1;
103       linear_solver_max_num_iterations = 500;
104       eta = 1e-1;
105       jacobi_scaling = true;
106       logging_type = PER_MINIMIZER_ITERATION;
107       minimizer_progress_to_stdout = false;
108       return_initial_residuals = false;
109       return_initial_gradient = false;
110       return_initial_jacobian = false;
111       return_final_residuals = false;
112       return_final_gradient = false;
113       return_final_jacobian = false;
114       lsqp_dump_directory = "/tmp";
115       lsqp_dump_format_type = TEXTFILE;
116       check_gradients = false;
117       gradient_check_relative_precision = 1e-8;
118       numeric_derivative_relative_step_size = 1e-6;
119       update_state_every_iteration = false;
120     }
121 
122     ~Options();
123     // Minimizer options ----------------------------------------
124 
125     TrustRegionStrategyType trust_region_strategy_type;
126 
127     // Type of dogleg strategy to use.
128     DoglegType dogleg_type;
129 
130     // The classical trust region methods are descent methods, in that
131     // they only accept a point if it strictly reduces the value of
132     // the objective function.
133     //
134     // Relaxing this requirement allows the algorithm to be more
135     // efficient in the long term at the cost of some local increase
136     // in the value of the objective function.
137     //
138     // This is because allowing for non-decreasing objective function
139     // values in a princpled manner allows the algorithm to "jump over
140     // boulders" as the method is not restricted to move into narrow
141     // valleys while preserving its convergence properties.
142     //
143     // Setting use_nonmonotonic_steps to true enables the
144     // non-monotonic trust region algorithm as described by Conn,
145     // Gould & Toint in "Trust Region Methods", Section 10.1.
146     //
147     // The parameter max_consecutive_nonmonotonic_steps controls the
148     // window size used by the step selection algorithm to accept
149     // non-monotonic steps.
150     //
151     // Even though the value of the objective function may be larger
152     // than the minimum value encountered over the course of the
153     // optimization, the final parameters returned to the user are the
154     // ones corresponding to the minimum cost over all iterations.
155     bool use_nonmonotonic_steps;
156     int max_consecutive_nonmonotonic_steps;
157 
158     // Maximum number of iterations for the minimizer to run for.
159     int max_num_iterations;
160 
161     // Maximum time for which the minimizer should run for.
162     double max_solver_time_in_seconds;
163 
164     // Number of threads used by Ceres for evaluating the cost and
165     // jacobians.
166     int num_threads;
167 
168     // Trust region minimizer settings.
169     double initial_trust_region_radius;
170     double max_trust_region_radius;
171 
172     // Minimizer terminates when the trust region radius becomes
173     // smaller than this value.
174     double min_trust_region_radius;
175 
176     // Lower bound for the relative decrease before a step is
177     // accepted.
178     double min_relative_decrease;
179 
180     // For the Levenberg-Marquadt algorithm, the scaled diagonal of
181     // the normal equations J'J is used to control the size of the
182     // trust region. Extremely small and large values along the
183     // diagonal can make this regularization scheme
184     // fail. lm_max_diagonal and lm_min_diagonal, clamp the values of
185     // diag(J'J) from above and below. In the normal course of
186     // operation, the user should not have to modify these parameters.
187     double lm_min_diagonal;
188     double lm_max_diagonal;
189 
190     // Sometimes due to numerical conditioning problems or linear
191     // solver flakiness, the trust region strategy may return a
192     // numerically invalid step that can be fixed by reducing the
193     // trust region size. So the TrustRegionMinimizer allows for a few
194     // successive invalid steps before it declares NUMERICAL_FAILURE.
195     int max_num_consecutive_invalid_steps;
196 
197     // Minimizer terminates when
198     //
199     //   (new_cost - old_cost) < function_tolerance * old_cost;
200     //
201     double function_tolerance;
202 
203     // Minimizer terminates when
204     //
205     //   max_i |gradient_i| < gradient_tolerance * max_i|initial_gradient_i|
206     //
207     // This value should typically be 1e-4 * function_tolerance.
208     double gradient_tolerance;
209 
210     // Minimizer terminates when
211     //
212     //   |step|_2 <= parameter_tolerance * ( |x|_2 +  parameter_tolerance)
213     //
214     double parameter_tolerance;
215 
216     // Linear least squares solver options -------------------------------------
217 
218     LinearSolverType linear_solver_type;
219 
220     // Type of preconditioner to use with the iterative linear solvers.
221     PreconditionerType preconditioner_type;
222 
223     // Ceres supports using multiple sparse linear algebra libraries
224     // for sparse matrix ordering and factorizations. Currently,
225     // SUITE_SPARSE and CX_SPARSE are the valid choices, depending on
226     // whether they are linked into Ceres at build time.
227     SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
228 
229     // Number of threads used by Ceres to solve the Newton
230     // step. Currently only the SPARSE_SCHUR solver is capable of
231     // using this setting.
232     int num_linear_solver_threads;
233 
234     // The order in which variables are eliminated in a linear solver
235     // can have a significant of impact on the efficiency and accuracy
236     // of the method. e.g., when doing sparse Cholesky factorization,
237     // there are matrices for which a good ordering will give a
238     // Cholesky factor with O(n) storage, where as a bad ordering will
239     // result in an completely dense factor.
240     //
241     // Ceres allows the user to provide varying amounts of hints to
242     // the solver about the variable elimination ordering to use. This
243     // can range from no hints, where the solver is free to decide the
244     // best possible ordering based on the user's choices like the
245     // linear solver being used, to an exact order in which the
246     // variables should be eliminated, and a variety of possibilities
247     // in between.
248     //
249     // Instances of the ParameterBlockOrdering class are used to
250     // communicate this information to Ceres.
251     //
252     // Formally an ordering is an ordered partitioning of the
253     // parameter blocks, i.e, each parameter block belongs to exactly
254     // one group, and each group has a unique non-negative integer
255     // associated with it, that determines its order in the set of
256     // groups.
257     //
258     // Given such an ordering, Ceres ensures that the parameter blocks in
259     // the lowest numbered group are eliminated first, and then the
260     // parmeter blocks in the next lowest numbered group and so on. Within
261     // each group, Ceres is free to order the parameter blocks as it
262     // chooses.
263     //
264     // If NULL, then all parameter blocks are assumed to be in the
265     // same group and the solver is free to decide the best
266     // ordering.
267     //
268     // e.g. Consider the linear system
269     //
270     //   x + y = 3
271     //   2x + 3y = 7
272     //
273     // There are two ways in which it can be solved. First eliminating x
274     // from the two equations, solving for y and then back substituting
275     // for x, or first eliminating y, solving for x and back substituting
276     // for y. The user can construct three orderings here.
277     //
278     //   {0: x}, {1: y} - eliminate x first.
279     //   {0: y}, {1: x} - eliminate y first.
280     //   {0: x, y}      - Solver gets to decide the elimination order.
281     //
282     // Thus, to have Ceres determine the ordering automatically using
283     // heuristics, put all the variables in group 0 and to control the
284     // ordering for every variable, create groups 0..N-1, one per
285     // variable, in the desired order.
286     //
287     // Bundle Adjustment
288     // -----------------
289     //
290     // A particular case of interest is bundle adjustment, where the user
291     // has two options. The default is to not specify an ordering at all,
292     // the solver will see that the user wants to use a Schur type solver
293     // and figure out the right elimination ordering.
294     //
295     // But if the user already knows what parameter blocks are points and
296     // what are cameras, they can save preprocessing time by partitioning
297     // the parameter blocks into two groups, one for the points and one
298     // for the cameras, where the group containing the points has an id
299     // smaller than the group containing cameras.
300     //
301     // Once assigned, Solver::Options owns this pointer and will
302     // deallocate the memory when destroyed.
303     ParameterBlockOrdering* linear_solver_ordering;
304 
305     // By virtue of the modeling layer in Ceres being block oriented,
306     // all the matrices used by Ceres are also block oriented. When
307     // doing sparse direct factorization of these matrices (for
308     // SPARSE_NORMAL_CHOLESKY, SPARSE_SCHUR and ITERATIVE in
309     // conjunction with CLUSTER_TRIDIAGONAL AND CLUSTER_JACOBI
310     // preconditioners), the fill-reducing ordering algorithms can
311     // either be run on the block or the scalar form of these matrices.
312     // Running it on the block form exposes more of the super-nodal
313     // structure of the matrix to the factorization routines. Setting
314     // this parameter to true runs the ordering algorithms in block
315     // form. Currently this option only makes sense with
316     // sparse_linear_algebra_library = SUITE_SPARSE.
317     bool use_block_amd;
318 
319     // Some non-linear least squares problems have additional
320     // structure in the way the parameter blocks interact that it is
321     // beneficial to modify the way the trust region step is computed.
322     //
323     // e.g., consider the following regression problem
324     //
325     //   y = a_1 exp(b_1 x) + a_2 exp(b_3 x^2 + c_1)
326     //
327     // Given a set of pairs{(x_i, y_i)}, the user wishes to estimate
328     // a_1, a_2, b_1, b_2, and c_1.
329     //
330     // Notice here that the expression on the left is linear in a_1
331     // and a_2, and given any value for b_1, b_2 and c_1, it is
332     // possible to use linear regression to estimate the optimal
333     // values of a_1 and a_2. Indeed, its possible to analytically
334     // eliminate the variables a_1 and a_2 from the problem all
335     // together. Problems like these are known as separable least
336     // squares problem and the most famous algorithm for solving them
337     // is the Variable Projection algorithm invented by Golub &
338     // Pereyra.
339     //
340     // Similar structure can be found in the matrix factorization with
341     // missing data problem. There the corresponding algorithm is
342     // known as Wiberg's algorithm.
343     //
344     // Ruhe & Wedin (Algorithms for Separable Nonlinear Least Squares
345     // Problems, SIAM Reviews, 22(3), 1980) present an analyis of
346     // various algorithms for solving separable non-linear least
347     // squares problems and refer to "Variable Projection" as
348     // Algorithm I in their paper.
349     //
350     // Implementing Variable Projection is tedious and expensive, and
351     // they present a simpler algorithm, which they refer to as
352     // Algorithm II, where once the Newton/Trust Region step has been
353     // computed for the whole problem (a_1, a_2, b_1, b_2, c_1) and
354     // additional optimization step is performed to estimate a_1 and
355     // a_2 exactly.
356     //
357     // This idea can be generalized to cases where the residual is not
358     // linear in a_1 and a_2, i.e., Solve for the trust region step
359     // for the full problem, and then use it as the starting point to
360     // further optimize just a_1 and a_2. For the linear case, this
361     // amounts to doing a single linear least squares solve. For
362     // non-linear problems, any method for solving the a_1 and a_2
363     // optimization problems will do. The only constraint on a_1 and
364     // a_2 is that they do not co-occur in any residual block.
365     //
366     // This idea can be further generalized, by not just optimizing
367     // (a_1, a_2), but decomposing the graph corresponding to the
368     // Hessian matrix's sparsity structure in a collection of
369     // non-overlapping independent sets and optimizing each of them.
370     //
371     // Setting "use_inner_iterations" to true enables the use of this
372     // non-linear generalization of Ruhe & Wedin's Algorithm II.  This
373     // version of Ceres has a higher iteration complexity, but also
374     // displays better convergence behaviour per iteration. Setting
375     // Solver::Options::num_threads to the maximum number possible is
376     // highly recommended.
377     bool use_inner_iterations;
378 
379     // If inner_iterations is true, then the user has two choices.
380     //
381     // 1. Let the solver heuristically decide which parameter blocks
382     //    to optimize in each inner iteration. To do this leave
383     //    Solver::Options::inner_iteration_ordering untouched.
384     //
385     // 2. Specify a collection of of ordered independent sets. Where
386     //    the lower numbered groups are optimized before the higher
387     //    number groups. Each group must be an independent set.
388     ParameterBlockOrdering* inner_iteration_ordering;
389 
390     // Minimum number of iterations for which the linear solver should
391     // run, even if the convergence criterion is satisfied.
392     int linear_solver_min_num_iterations;
393 
394     // Maximum number of iterations for which the linear solver should
395     // run. If the solver does not converge in less than
396     // linear_solver_max_num_iterations, then it returns
397     // MAX_ITERATIONS, as its termination type.
398     int linear_solver_max_num_iterations;
399 
400     // Forcing sequence parameter. The truncated Newton solver uses
401     // this number to control the relative accuracy with which the
402     // Newton step is computed.
403     //
404     // This constant is passed to ConjugateGradientsSolver which uses
405     // it to terminate the iterations when
406     //
407     //  (Q_i - Q_{i-1})/Q_i < eta/i
408     double eta;
409 
410     // Normalize the jacobian using Jacobi scaling before calling
411     // the linear least squares solver.
412     bool jacobi_scaling;
413 
414     // Logging options ---------------------------------------------------------
415 
416     LoggingType logging_type;
417 
418     // By default the Minimizer progress is logged to VLOG(1), which
419     // is sent to STDERR depending on the vlog level. If this flag is
420     // set to true, and logging_type is not SILENT, the logging output
421     // is sent to STDOUT.
422     bool minimizer_progress_to_stdout;
423 
424     bool return_initial_residuals;
425     bool return_initial_gradient;
426     bool return_initial_jacobian;
427 
428     bool return_final_residuals;
429     bool return_final_gradient;
430     bool return_final_jacobian;
431 
432     // List of iterations at which the optimizer should dump the
433     // linear least squares problem to disk. Useful for testing and
434     // benchmarking. If empty (default), no problems are dumped.
435     //
436     // This is ignored if protocol buffers are disabled.
437     vector<int> lsqp_iterations_to_dump;
438     string lsqp_dump_directory;
439     DumpFormatType lsqp_dump_format_type;
440 
441     // Finite differences options ----------------------------------------------
442 
443     // Check all jacobians computed by each residual block with finite
444     // differences. This is expensive since it involves computing the
445     // derivative by normal means (e.g. user specified, autodiff,
446     // etc), then also computing it using finite differences. The
447     // results are compared, and if they differ substantially, details
448     // are printed to the log.
449     bool check_gradients;
450 
451     // Relative precision to check for in the gradient checker. If the
452     // relative difference between an element in a jacobian exceeds
453     // this number, then the jacobian for that cost term is dumped.
454     double gradient_check_relative_precision;
455 
456     // Relative shift used for taking numeric derivatives. For finite
457     // differencing, each dimension is evaluated at slightly shifted
458     // values; for the case of central difference, this is what gets
459     // evaluated:
460     //
461     //   delta = numeric_derivative_relative_step_size;
462     //   f_initial  = f(x)
463     //   f_forward  = f((1 + delta) * x)
464     //   f_backward = f((1 - delta) * x)
465     //
466     // The finite differencing is done along each dimension. The
467     // reason to use a relative (rather than absolute) step size is
468     // that this way, numeric differentation works for functions where
469     // the arguments are typically large (e.g. 1e9) and when the
470     // values are small (e.g. 1e-5). It is possible to construct
471     // "torture cases" which break this finite difference heuristic,
472     // but they do not come up often in practice.
473     //
474     // TODO(keir): Pick a smarter number than the default above! In
475     // theory a good choice is sqrt(eps) * x, which for doubles means
476     // about 1e-8 * x. However, I have found this number too
477     // optimistic. This number should be exposed for users to change.
478     double numeric_derivative_relative_step_size;
479 
480     // If true, the user's parameter blocks are updated at the end of
481     // every Minimizer iteration, otherwise they are updated when the
482     // Minimizer terminates. This is useful if, for example, the user
483     // wishes to visualize the state of the optimization every
484     // iteration.
485     bool update_state_every_iteration;
486 
487     // Callbacks that are executed at the end of each iteration of the
488     // Minimizer. An iteration may terminate midway, either due to
489     // numerical failures or because one of the convergence tests has
490     // been satisfied. In this case none of the callbacks are
491     // executed.
492 
493     // Callbacks are executed in the order that they are specified in
494     // this vector. By default, parameter blocks are updated only at
495     // the end of the optimization, i.e when the Minimizer
496     // terminates. This behaviour is controlled by
497     // update_state_every_variable. If the user wishes to have access
498     // to the update parameter blocks when his/her callbacks are
499     // executed, then set update_state_every_iteration to true.
500     //
501     // The solver does NOT take ownership of these pointers.
502     vector<IterationCallback*> callbacks;
503 
504     // If non-empty, a summary of the execution of the solver is
505     // recorded to this file.
506     string solver_log;
507   };
508 
509   struct Summary {
510     Summary();
511 
512     // A brief one line description of the state of the solver after
513     // termination.
514     string BriefReport() const;
515 
516     // A full multiline description of the state of the solver after
517     // termination.
518     string FullReport() const;
519 
520     // Minimizer summary -------------------------------------------------
521     SolverTerminationType termination_type;
522 
523     // If the solver did not run, or there was a failure, a
524     // description of the error.
525     string error;
526 
527     // Cost of the problem before and after the optimization. See
528     // problem.h for definition of the cost of a problem.
529     double initial_cost;
530     double final_cost;
531 
532     // The part of the total cost that comes from residual blocks that
533     // were held fixed by the preprocessor because all the parameter
534     // blocks that they depend on were fixed.
535     double fixed_cost;
536 
537     // Vectors of residuals before and after the optimization. The
538     // entries of these vectors are in the order in which
539     // ResidualBlocks were added to the Problem object.
540     //
541     // Whether the residual vectors are populated with values is
542     // controlled by Solver::Options::return_initial_residuals and
543     // Solver::Options::return_final_residuals respectively.
544     vector<double> initial_residuals;
545     vector<double> final_residuals;
546 
547     // Gradient vectors, before and after the optimization.  The rows
548     // are in the same order in which the ParameterBlocks were added
549     // to the Problem object.
550     //
551     // NOTE: Since AddResidualBlock adds ParameterBlocks to the
552     // Problem automatically if they do not already exist, if you wish
553     // to have explicit control over the ordering of the vectors, then
554     // use Problem::AddParameterBlock to explicitly add the
555     // ParameterBlocks in the order desired.
556     //
557     // Whether the vectors are populated with values is controlled by
558     // Solver::Options::return_initial_gradient and
559     // Solver::Options::return_final_gradient respectively.
560     vector<double> initial_gradient;
561     vector<double> final_gradient;
562 
563     // Jacobian matrices before and after the optimization. The rows
564     // of these matrices are in the same order in which the
565     // ResidualBlocks were added to the Problem object. The columns
566     // are in the same order in which the ParameterBlocks were added
567     // to the Problem object.
568     //
569     // NOTE: Since AddResidualBlock adds ParameterBlocks to the
570     // Problem automatically if they do not already exist, if you wish
571     // to have explicit control over the column ordering of the
572     // matrix, then use Problem::AddParameterBlock to explicitly add
573     // the ParameterBlocks in the order desired.
574     //
575     // The Jacobian matrices are stored as compressed row sparse
576     // matrices. Please see ceres/crs_matrix.h for more details of the
577     // format.
578     //
579     // Whether the Jacboan matrices are populated with values is
580     // controlled by Solver::Options::return_initial_jacobian and
581     // Solver::Options::return_final_jacobian respectively.
582     CRSMatrix initial_jacobian;
583     CRSMatrix final_jacobian;
584 
585     vector<IterationSummary> iterations;
586 
587     int num_successful_steps;
588     int num_unsuccessful_steps;
589 
590     // When the user calls Solve, before the actual optimization
591     // occurs, Ceres performs a number of preprocessing steps. These
592     // include error checks, memory allocations, and reorderings. This
593     // time is accounted for as preprocessing time.
594     double preprocessor_time_in_seconds;
595 
596     // Time spent in the TrustRegionMinimizer.
597     double minimizer_time_in_seconds;
598 
599     // After the Minimizer is finished, some time is spent in
600     // re-evaluating residuals etc. This time is accounted for in the
601     // postprocessor time.
602     double postprocessor_time_in_seconds;
603 
604     // Some total of all time spent inside Ceres when Solve is called.
605     double total_time_in_seconds;
606 
607     // Preprocessor summary.
608     int num_parameter_blocks;
609     int num_parameters;
610     int num_residual_blocks;
611     int num_residuals;
612 
613     int num_parameter_blocks_reduced;
614     int num_parameters_reduced;
615     int num_residual_blocks_reduced;
616     int num_residuals_reduced;
617 
618     int num_eliminate_blocks_given;
619     int num_eliminate_blocks_used;
620 
621     int num_threads_given;
622     int num_threads_used;
623 
624     int num_linear_solver_threads_given;
625     int num_linear_solver_threads_used;
626 
627     LinearSolverType linear_solver_type_given;
628     LinearSolverType linear_solver_type_used;
629 
630     PreconditionerType preconditioner_type;
631 
632     TrustRegionStrategyType trust_region_strategy_type;
633     DoglegType dogleg_type;
634     SparseLinearAlgebraLibraryType sparse_linear_algebra_library;
635   };
636 
637   // Once a least squares problem has been built, this function takes
638   // the problem and optimizes it based on the values of the options
639   // parameters. Upon return, a detailed summary of the work performed
640   // by the preprocessor, the non-linear minmizer and the linear
641   // solver are reported in the summary object.
642   virtual void Solve(const Options& options,
643                      Problem* problem,
644                      Solver::Summary* summary);
645 };
646 
647 // Helper function which avoids going through the interface.
648 void Solve(const Solver::Options& options,
649            Problem* problem,
650            Solver::Summary* summary);
651 
652 }  // namespace ceres
653 
654 #endif  // CERES_PUBLIC_SOLVER_H_
655