1 #include "common/math/levenberg_marquardt.h"
2
3 #include <stdbool.h>
4 #include <stdio.h>
5 #include <string.h>
6
7 #include "common/math/macros.h"
8 #include "common/math/mat.h"
9 #include "common/math/vec.h"
10
11 // FORWARD DECLARATIONS
12 ////////////////////////////////////////////////////////////////////////
13 static bool checkRelativeStepSize(const float *step, const float *state,
14 size_t dim, float relative_error_threshold);
15
16 static bool computeResidualAndGradients(ResidualAndJacobianFunction func,
17 const float *state, const void *f_data,
18 float *jacobian,
19 float gradient_threshold,
20 size_t state_dim, size_t meas_dim,
21 float *residual, float *gradient,
22 float *hessian);
23
24 static bool computeStep(const float *gradient, float *hessian, float *L,
25 float damping_factor, size_t dim, float *step);
26
27 const static float kEps = 1e-10f;
28
29 // FUNCTION IMPLEMENTATIONS
30 ////////////////////////////////////////////////////////////////////////
lmSolverInit(struct LmSolver * solver,const struct LmParams * params,ResidualAndJacobianFunction func)31 void lmSolverInit(struct LmSolver *solver, const struct LmParams *params,
32 ResidualAndJacobianFunction func) {
33 ASSERT_NOT_NULL(solver);
34 ASSERT_NOT_NULL(params);
35 ASSERT_NOT_NULL(func);
36 memset(solver, 0, sizeof(struct LmSolver));
37 memcpy(&solver->params, params, sizeof(struct LmParams));
38 solver->func = func;
39 solver->num_iter = 0;
40 }
41
lmSolverSetData(struct LmSolver * solver,struct LmData * data)42 void lmSolverSetData(struct LmSolver *solver, struct LmData *data) {
43 ASSERT_NOT_NULL(solver);
44 ASSERT_NOT_NULL(data);
45 solver->data = data;
46 }
47
lmSolverSolve(struct LmSolver * solver,const float * initial_state,void * f_data,size_t state_dim,size_t meas_dim,float * state)48 enum LmStatus lmSolverSolve(struct LmSolver *solver, const float *initial_state,
49 void *f_data, size_t state_dim, size_t meas_dim,
50 float *state) {
51 // Initialize parameters.
52 float damping_factor = 0.0f;
53 float v = 2.0f;
54
55 // Check dimensions.
56 if (meas_dim > MAX_LM_MEAS_DIMENSION || state_dim > MAX_LM_STATE_DIMENSION) {
57 return INVALID_DATA_DIMENSIONS;
58 }
59
60 // Check pointers (note that f_data can be null if no additional data is
61 // required by the error function).
62 ASSERT_NOT_NULL(solver);
63 ASSERT_NOT_NULL(initial_state);
64 ASSERT_NOT_NULL(state);
65 ASSERT_NOT_NULL(solver->data);
66
67 // Allocate memory for intermediate variables.
68 float state_new[MAX_LM_STATE_DIMENSION];
69 struct LmData *data = solver->data;
70
71 // state = initial_state, num_iter = 0
72 memcpy(state, initial_state, sizeof(float) * state_dim);
73 solver->num_iter = 0;
74
75 // Compute initial cost function gradient and return if already sufficiently
76 // small to satisfy solution.
77 if (computeResidualAndGradients(solver->func, state, f_data, data->temp,
78 solver->params.gradient_threshold, state_dim,
79 meas_dim, data->residual,
80 data->gradient,
81 data->hessian)) {
82 return GRADIENT_SUFFICIENTLY_SMALL;
83 }
84
85 // Initialize damping parameter.
86 damping_factor = solver->params.initial_u_scale *
87 matMaxDiagonalElement(data->hessian, state_dim);
88
89 // Iterate solution.
90 for (solver->num_iter = 0;
91 solver->num_iter < solver->params.max_iterations;
92 ++solver->num_iter) {
93
94 // Compute new solver step.
95 if (!computeStep(data->gradient, data->hessian, data->temp, damping_factor,
96 state_dim, data->step)) {
97 return CHOLESKY_FAIL;
98 }
99
100 // If the new step is already sufficiently small, we have a solution.
101 if (checkRelativeStepSize(data->step, state, state_dim,
102 solver->params.relative_step_threshold)) {
103 return RELATIVE_STEP_SUFFICIENTLY_SMALL;
104 }
105
106 // state_new = state + step.
107 vecAdd(state_new, state, data->step, state_dim);
108
109 // Compute new cost function residual.
110 solver->func(state_new, f_data, data->residual_new, NULL);
111
112 // Compute ratio of expected to actual cost function gain for this step.
113 const float gain_ratio = computeGainRatio(data->residual,
114 data->residual_new,
115 data->step, data->gradient,
116 damping_factor, state_dim,
117 meas_dim);
118
119 // If gain ratio is positive, the step size is good, otherwise adjust
120 // damping factor and compute a new step.
121 if (gain_ratio > 0.0f) {
122 // Set state to new state vector: state = state_new.
123 memcpy(state, state_new, sizeof(float) * state_dim);
124
125 // Check if cost function gradient is now sufficiently small,
126 // in which case we have a local solution.
127 if (computeResidualAndGradients(solver->func, state, f_data, data->temp,
128 solver->params.gradient_threshold,
129 state_dim, meas_dim, data->residual,
130 data->gradient, data->hessian)) {
131 return GRADIENT_SUFFICIENTLY_SMALL;
132 }
133
134 // Update damping factor based on gain ratio.
135 // Note, this update logic comes from Equation 2.21 in the following:
136 // [Madsen, Kaj, Hans Bruun Nielsen, and Ole Tingleff.
137 // "Methods for non-linear least squares problems." (2004)].
138 const float tmp = 2.f * gain_ratio - 1.f;
139 damping_factor *= NANO_MAX(0.33333f, 1.f - tmp * tmp * tmp);
140 v = 2.f;
141 } else {
142 // Update damping factor and try again.
143 damping_factor *= v;
144 v *= 2.f;
145 }
146 }
147
148 return HIT_MAX_ITERATIONS;
149 }
150
computeGainRatio(const float * residual,const float * residual_new,const float * step,const float * gradient,float damping_factor,size_t state_dim,size_t meas_dim)151 float computeGainRatio(const float *residual, const float *residual_new,
152 const float *step, const float *gradient,
153 float damping_factor, size_t state_dim,
154 size_t meas_dim) {
155 // Compute true_gain = residual' residual - residual_new' residual_new.
156 const float true_gain = vecDot(residual, residual, meas_dim)
157 - vecDot(residual_new, residual_new, meas_dim);
158
159 // predicted gain = 0.5 * step' * (damping_factor * step + gradient).
160 float tmp[MAX_LM_STATE_DIMENSION];
161 vecScalarMul(tmp, step, damping_factor, state_dim);
162 vecAddInPlace(tmp, gradient, state_dim);
163 const float predicted_gain = 0.5f * vecDot(step, tmp, state_dim);
164
165 // Check that we don't divide by zero! If denominator is too small,
166 // set gain_ratio = 1 to use the current step.
167 if (predicted_gain < kEps) {
168 return 1.f;
169 }
170
171 return true_gain / predicted_gain;
172 }
173
174 /*
175 * Tests if a solution is found based on the size of the step relative to the
176 * current state magnitude. Returns true if a solution is found.
177 *
178 * TODO(dvitus): consider optimization of this function to use squared norm
179 * rather than norm for relative error computation to avoid square root.
180 */
checkRelativeStepSize(const float * step,const float * state,size_t dim,float relative_error_threshold)181 bool checkRelativeStepSize(const float *step, const float *state,
182 size_t dim, float relative_error_threshold) {
183 // r = eps * (||x|| + eps)
184 const float relative_error = relative_error_threshold *
185 (vecNorm(state, dim) + relative_error_threshold);
186
187 // solved if ||step|| <= r
188 // use squared version of this compare to avoid square root.
189 return (vecNormSquared(step, dim) <= relative_error * relative_error);
190 }
191
192 /*
193 * Computes the residual, f(x), as well as the gradient and hessian of the cost
194 * function for the given state.
195 *
196 * Returns a boolean indicating if the computed gradient is sufficiently small
197 * to indicate that a solution has been found.
198 *
199 * INPUTS:
200 * state: state estimate (x) for which to compute the gradient & hessian.
201 * f_data: pointer to parameter data needed for the residual or jacobian.
202 * jacobian: pointer to temporary memory for storing jacobian.
203 * Must be at least MAX_LM_STATE_DIMENSION * MAX_LM_MEAS_DIMENSION.
204 * gradient_threshold: if gradient is below this threshold, function returns 1.
205 *
206 * OUTPUTS:
207 * residual: f(x).
208 * gradient: - J' f(x), where J = df(x)/dx
209 * hessian: df^2(x)/dx^2 = J' J
210 */
computeResidualAndGradients(ResidualAndJacobianFunction func,const float * state,const void * f_data,float * jacobian,float gradient_threshold,size_t state_dim,size_t meas_dim,float * residual,float * gradient,float * hessian)211 bool computeResidualAndGradients(ResidualAndJacobianFunction func,
212 const float *state, const void *f_data,
213 float *jacobian, float gradient_threshold,
214 size_t state_dim, size_t meas_dim,
215 float *residual, float *gradient,
216 float *hessian) {
217 // Compute residual and Jacobian.
218 ASSERT_NOT_NULL(state);
219 ASSERT_NOT_NULL(residual);
220 ASSERT_NOT_NULL(gradient);
221 ASSERT_NOT_NULL(hessian);
222 func(state, f_data, residual, jacobian);
223
224 // Compute the cost function hessian = jacobian' jacobian and
225 // gradient = -jacobian' residual
226 matTransposeMultiplyMat(hessian, jacobian, meas_dim, state_dim);
227 matTransposeMultiplyVec(gradient, jacobian, residual, meas_dim, state_dim);
228 vecScalarMulInPlace(gradient, -1.f, state_dim);
229
230 // Check if solution is found (cost function gradient is sufficiently small).
231 return (vecMaxAbsoluteValue(gradient, state_dim) < gradient_threshold);
232 }
233
234 /*
235 * Computes the Levenberg-Marquardt solver step to satisfy the following:
236 * (J'J + uI) * step = - J' f
237 *
238 * INPUTS:
239 * gradient: -J'f
240 * hessian: J'J
241 * L: temp memory of at least MAX_LM_STATE_DIMENSION * MAX_LM_STATE_DIMENSION.
242 * damping_factor: u
243 * dim: state dimension
244 *
245 * OUTPUTS:
246 * step: solution to the above equation.
247 * Function returns false if the solution fails (due to cholesky failure),
248 * otherwise returns true.
249 *
250 * Note that the hessian is modified in this function in order to reduce
251 * local memory requirements.
252 */
computeStep(const float * gradient,float * hessian,float * L,float damping_factor,size_t dim,float * step)253 bool computeStep(const float *gradient, float *hessian, float *L,
254 float damping_factor, size_t dim, float *step) {
255
256 // 1) A = hessian + damping_factor * Identity.
257 matAddConstantDiagonal(hessian, damping_factor, dim);
258
259 // 2) Solve A * step = gradient for step.
260 // a) compute cholesky decomposition of A = L L^T.
261 if (!matCholeskyDecomposition(L, hessian, dim)) {
262 return false;
263 }
264
265 // b) solve for step via back-solve.
266 return matLinearSolveCholesky(step, L, gradient, dim);
267 }
268