• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Ceres Solver - A fast non-linear least squares minimizer
2 // Copyright 2010, 2011, 2012 Google Inc. All rights reserved.
3 // http://code.google.com/p/ceres-solver/
4 //
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are met:
7 //
8 // * Redistributions of source code must retain the above copyright notice,
9 //   this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above copyright notice,
11 //   this list of conditions and the following disclaimer in the documentation
12 //   and/or other materials provided with the distribution.
13 // * Neither the name of Google Inc. nor the names of its contributors may be
14 //   used to endorse or promote products derived from this software without
15 //   specific prior written permission.
16 //
17 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
18 // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
21 // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
22 // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
23 // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
24 // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
25 // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
26 // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
27 // POSSIBILITY OF SUCH DAMAGE.
28 //
29 // Author: keir@google.com (Keir Mierle)
30 //
31 // Based on the templated version in public/numeric_diff_cost_function.h.
32 
33 #include "ceres/runtime_numeric_diff_cost_function.h"
34 
35 #include <algorithm>
36 #include <numeric>
37 #include <vector>
38 #include "Eigen/Dense"
39 #include "ceres/cost_function.h"
40 #include "ceres/internal/scoped_ptr.h"
41 #include "glog/logging.h"
42 
43 namespace ceres {
44 namespace internal {
45 namespace {
46 
EvaluateJacobianForParameterBlock(const CostFunction * function,int parameter_block_size,int parameter_block,RuntimeNumericDiffMethod method,double relative_step_size,double const * residuals_at_eval_point,double ** parameters,double ** jacobians)47 bool EvaluateJacobianForParameterBlock(const CostFunction* function,
48                                        int parameter_block_size,
49                                        int parameter_block,
50                                        RuntimeNumericDiffMethod method,
51                                        double relative_step_size,
52                                        double const* residuals_at_eval_point,
53                                        double** parameters,
54                                        double** jacobians) {
55   using Eigen::Map;
56   using Eigen::Matrix;
57   using Eigen::Dynamic;
58   using Eigen::RowMajor;
59 
60   typedef Matrix<double, Dynamic, 1> ResidualVector;
61   typedef Matrix<double, Dynamic, 1> ParameterVector;
62   typedef Matrix<double, Dynamic, Dynamic, RowMajor> JacobianMatrix;
63 
64   int num_residuals = function->num_residuals();
65 
66   Map<JacobianMatrix> parameter_jacobian(jacobians[parameter_block],
67                                          num_residuals,
68                                          parameter_block_size);
69 
70   // Mutate one element at a time and then restore.
71   Map<ParameterVector> x_plus_delta(parameters[parameter_block],
72                                     parameter_block_size);
73   ParameterVector x(x_plus_delta);
74   ParameterVector step_size = x.array().abs() * relative_step_size;
75 
76   // To handle cases where a paremeter is exactly zero, instead use the mean
77   // step_size for the other dimensions.
78   double fallback_step_size = step_size.sum() / step_size.rows();
79   if (fallback_step_size == 0.0) {
80     // If all the parameters are zero, there's no good answer. Use the given
81     // relative step_size as absolute step_size and hope for the best.
82     fallback_step_size = relative_step_size;
83   }
84 
85   // For each parameter in the parameter block, use finite differences to
86   // compute the derivative for that parameter.
87   for (int j = 0; j < parameter_block_size; ++j) {
88     if (step_size(j) == 0.0) {
89       // The parameter is exactly zero, so compromise and use the mean step_size
90       // from the other parameters. This can break in many cases, but it's hard
91       // to pick a good number without problem specific knowledge.
92       step_size(j) = fallback_step_size;
93     }
94     x_plus_delta(j) = x(j) + step_size(j);
95 
96     ResidualVector residuals(num_residuals);
97     if (!function->Evaluate(parameters, &residuals[0], NULL)) {
98       // Something went wrong; bail.
99       return false;
100     }
101 
102     // Compute this column of the jacobian in 3 steps:
103     // 1. Store residuals for the forward part.
104     // 2. Subtract residuals for the backward (or 0) part.
105     // 3. Divide out the run.
106     parameter_jacobian.col(j) = residuals;
107 
108     double one_over_h = 1 / step_size(j);
109     if (method == CENTRAL) {
110       // Compute the function on the other side of x(j).
111       x_plus_delta(j) = x(j) - step_size(j);
112 
113       if (!function->Evaluate(parameters, &residuals[0], NULL)) {
114         // Something went wrong; bail.
115         return false;
116       }
117       parameter_jacobian.col(j) -= residuals;
118       one_over_h /= 2;
119     } else {
120       // Forward difference only; reuse existing residuals evaluation.
121       parameter_jacobian.col(j) -=
122           Map<const ResidualVector>(residuals_at_eval_point, num_residuals);
123     }
124     x_plus_delta(j) = x(j);  // Restore x_plus_delta.
125 
126     // Divide out the run to get slope.
127     parameter_jacobian.col(j) *= one_over_h;
128   }
129   return true;
130 }
131 
132 class RuntimeNumericDiffCostFunction : public CostFunction {
133  public:
RuntimeNumericDiffCostFunction(const CostFunction * function,RuntimeNumericDiffMethod method,double relative_step_size)134   RuntimeNumericDiffCostFunction(const CostFunction* function,
135                                  RuntimeNumericDiffMethod method,
136                                  double relative_step_size)
137       : function_(function),
138         method_(method),
139         relative_step_size_(relative_step_size) {
140     *mutable_parameter_block_sizes() = function->parameter_block_sizes();
141     set_num_residuals(function->num_residuals());
142   }
143 
~RuntimeNumericDiffCostFunction()144   virtual ~RuntimeNumericDiffCostFunction() { }
145 
Evaluate(double const * const * parameters,double * residuals,double ** jacobians) const146   virtual bool Evaluate(double const* const* parameters,
147                         double* residuals,
148                         double** jacobians) const {
149     // Get the function value (residuals) at the the point to evaluate.
150     bool success = function_->Evaluate(parameters, residuals, NULL);
151     if (!success) {
152       // Something went wrong; ignore the jacobian.
153       return false;
154     }
155     if (!jacobians) {
156       // Nothing to do; just forward.
157       return true;
158     }
159 
160     const vector<int16>& block_sizes = function_->parameter_block_sizes();
161     CHECK(!block_sizes.empty());
162 
163     // Create local space for a copy of the parameters which will get mutated.
164     int parameters_size = accumulate(block_sizes.begin(), block_sizes.end(), 0);
165     vector<double> parameters_copy(parameters_size);
166     vector<double*> parameters_references_copy(block_sizes.size());
167     parameters_references_copy[0] = &parameters_copy[0];
168     for (int block = 1; block < block_sizes.size(); ++block) {
169       parameters_references_copy[block] = parameters_references_copy[block - 1]
170           + block_sizes[block - 1];
171     }
172 
173     // Copy the parameters into the local temp space.
174     for (int block = 0; block < block_sizes.size(); ++block) {
175       memcpy(parameters_references_copy[block],
176              parameters[block],
177              block_sizes[block] * sizeof(*parameters[block]));
178     }
179 
180     for (int block = 0; block < block_sizes.size(); ++block) {
181       if (!jacobians[block]) {
182         // No jacobian requested for this parameter / residual pair.
183         continue;
184       }
185       if (!EvaluateJacobianForParameterBlock(function_,
186                                              block_sizes[block],
187                                              block,
188                                              method_,
189                                              relative_step_size_,
190                                              residuals,
191                                              &parameters_references_copy[0],
192                                              jacobians)) {
193         return false;
194       }
195     }
196     return true;
197   }
198 
199  private:
200   const CostFunction* function_;
201   RuntimeNumericDiffMethod method_;
202   double relative_step_size_;
203 };
204 
205 }  // namespace
206 
CreateRuntimeNumericDiffCostFunction(const CostFunction * cost_function,RuntimeNumericDiffMethod method,double relative_step_size)207 CostFunction* CreateRuntimeNumericDiffCostFunction(
208     const CostFunction* cost_function,
209     RuntimeNumericDiffMethod method,
210     double relative_step_size) {
211   return new RuntimeNumericDiffCostFunction(cost_function,
212                                             method,
213                                             relative_step_size);
214 }
215 
216 }  // namespace internal
217 }  // namespace ceres
218