1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
8 //
9 //
10 // License Agreement
11 // For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
22 //
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
26 //
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42
43 #include "precomp.hpp"
44 #include "opencv2/imgproc/imgproc_c.h"
45 #include "opencv2/calib3d/calib3d_c.h"
46 #include <stdio.h>
47 #include <iterator>
48
49 /*
50 This is stright-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet
51 that is (in a large extent) based on the paper:
52 Z. Zhang. "A flexible new technique for camera calibration".
53 IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.
54
55 The 1st initial port was done by Valery Mosyagin.
56 */
57
58 using namespace cv;
59
60 // reimplementation of dAB.m
cvCalcMatMulDeriv(const CvMat * A,const CvMat * B,CvMat * dABdA,CvMat * dABdB)61 CV_IMPL void cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB )
62 {
63 int i, j, M, N, L;
64 int bstep;
65
66 CV_Assert( CV_IS_MAT(A) && CV_IS_MAT(B) );
67 CV_Assert( CV_ARE_TYPES_EQ(A, B) &&
68 (CV_MAT_TYPE(A->type) == CV_32F || CV_MAT_TYPE(A->type) == CV_64F) );
69 CV_Assert( A->cols == B->rows );
70
71 M = A->rows;
72 L = A->cols;
73 N = B->cols;
74 bstep = B->step/CV_ELEM_SIZE(B->type);
75
76 if( dABdA )
77 {
78 CV_Assert( CV_ARE_TYPES_EQ(A, dABdA) &&
79 dABdA->rows == A->rows*B->cols && dABdA->cols == A->rows*A->cols );
80 }
81
82 if( dABdB )
83 {
84 CV_Assert( CV_ARE_TYPES_EQ(A, dABdB) &&
85 dABdB->rows == A->rows*B->cols && dABdB->cols == B->rows*B->cols );
86 }
87
88 if( CV_MAT_TYPE(A->type) == CV_32F )
89 {
90 for( i = 0; i < M*N; i++ )
91 {
92 int i1 = i / N, i2 = i % N;
93
94 if( dABdA )
95 {
96 float* dcda = (float*)(dABdA->data.ptr + dABdA->step*i);
97 const float* b = (const float*)B->data.ptr + i2;
98
99 for( j = 0; j < M*L; j++ )
100 dcda[j] = 0;
101 for( j = 0; j < L; j++ )
102 dcda[i1*L + j] = b[j*bstep];
103 }
104
105 if( dABdB )
106 {
107 float* dcdb = (float*)(dABdB->data.ptr + dABdB->step*i);
108 const float* a = (const float*)(A->data.ptr + A->step*i1);
109
110 for( j = 0; j < L*N; j++ )
111 dcdb[j] = 0;
112 for( j = 0; j < L; j++ )
113 dcdb[j*N + i2] = a[j];
114 }
115 }
116 }
117 else
118 {
119 for( i = 0; i < M*N; i++ )
120 {
121 int i1 = i / N, i2 = i % N;
122
123 if( dABdA )
124 {
125 double* dcda = (double*)(dABdA->data.ptr + dABdA->step*i);
126 const double* b = (const double*)B->data.ptr + i2;
127
128 for( j = 0; j < M*L; j++ )
129 dcda[j] = 0;
130 for( j = 0; j < L; j++ )
131 dcda[i1*L + j] = b[j*bstep];
132 }
133
134 if( dABdB )
135 {
136 double* dcdb = (double*)(dABdB->data.ptr + dABdB->step*i);
137 const double* a = (const double*)(A->data.ptr + A->step*i1);
138
139 for( j = 0; j < L*N; j++ )
140 dcdb[j] = 0;
141 for( j = 0; j < L; j++ )
142 dcdb[j*N + i2] = a[j];
143 }
144 }
145 }
146 }
147
148 // reimplementation of compose_motion.m
cvComposeRT(const CvMat * _rvec1,const CvMat * _tvec1,const CvMat * _rvec2,const CvMat * _tvec2,CvMat * _rvec3,CvMat * _tvec3,CvMat * dr3dr1,CvMat * dr3dt1,CvMat * dr3dr2,CvMat * dr3dt2,CvMat * dt3dr1,CvMat * dt3dt1,CvMat * dt3dr2,CvMat * dt3dt2)149 CV_IMPL void cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
150 const CvMat* _rvec2, const CvMat* _tvec2,
151 CvMat* _rvec3, CvMat* _tvec3,
152 CvMat* dr3dr1, CvMat* dr3dt1,
153 CvMat* dr3dr2, CvMat* dr3dt2,
154 CvMat* dt3dr1, CvMat* dt3dt1,
155 CvMat* dt3dr2, CvMat* dt3dt2 )
156 {
157 double _r1[3], _r2[3];
158 double _R1[9], _d1[9*3], _R2[9], _d2[9*3];
159 CvMat r1 = cvMat(3,1,CV_64F,_r1), r2 = cvMat(3,1,CV_64F,_r2);
160 CvMat R1 = cvMat(3,3,CV_64F,_R1), R2 = cvMat(3,3,CV_64F,_R2);
161 CvMat dR1dr1 = cvMat(9,3,CV_64F,_d1), dR2dr2 = cvMat(9,3,CV_64F,_d2);
162
163 CV_Assert( CV_IS_MAT(_rvec1) && CV_IS_MAT(_rvec2) );
164
165 CV_Assert( CV_MAT_TYPE(_rvec1->type) == CV_32F ||
166 CV_MAT_TYPE(_rvec1->type) == CV_64F );
167
168 CV_Assert( _rvec1->rows == 3 && _rvec1->cols == 1 && CV_ARE_SIZES_EQ(_rvec1, _rvec2) );
169
170 cvConvert( _rvec1, &r1 );
171 cvConvert( _rvec2, &r2 );
172
173 cvRodrigues2( &r1, &R1, &dR1dr1 );
174 cvRodrigues2( &r2, &R2, &dR2dr2 );
175
176 if( _rvec3 || dr3dr1 || dr3dr2 )
177 {
178 double _r3[3], _R3[9], _dR3dR1[9*9], _dR3dR2[9*9], _dr3dR3[9*3];
179 double _W1[9*3], _W2[3*3];
180 CvMat r3 = cvMat(3,1,CV_64F,_r3), R3 = cvMat(3,3,CV_64F,_R3);
181 CvMat dR3dR1 = cvMat(9,9,CV_64F,_dR3dR1), dR3dR2 = cvMat(9,9,CV_64F,_dR3dR2);
182 CvMat dr3dR3 = cvMat(3,9,CV_64F,_dr3dR3);
183 CvMat W1 = cvMat(3,9,CV_64F,_W1), W2 = cvMat(3,3,CV_64F,_W2);
184
185 cvMatMul( &R2, &R1, &R3 );
186 cvCalcMatMulDeriv( &R2, &R1, &dR3dR2, &dR3dR1 );
187
188 cvRodrigues2( &R3, &r3, &dr3dR3 );
189
190 if( _rvec3 )
191 cvConvert( &r3, _rvec3 );
192
193 if( dr3dr1 )
194 {
195 cvMatMul( &dr3dR3, &dR3dR1, &W1 );
196 cvMatMul( &W1, &dR1dr1, &W2 );
197 cvConvert( &W2, dr3dr1 );
198 }
199
200 if( dr3dr2 )
201 {
202 cvMatMul( &dr3dR3, &dR3dR2, &W1 );
203 cvMatMul( &W1, &dR2dr2, &W2 );
204 cvConvert( &W2, dr3dr2 );
205 }
206 }
207
208 if( dr3dt1 )
209 cvZero( dr3dt1 );
210 if( dr3dt2 )
211 cvZero( dr3dt2 );
212
213 if( _tvec3 || dt3dr2 || dt3dt1 )
214 {
215 double _t1[3], _t2[3], _t3[3], _dxdR2[3*9], _dxdt1[3*3], _W3[3*3];
216 CvMat t1 = cvMat(3,1,CV_64F,_t1), t2 = cvMat(3,1,CV_64F,_t2);
217 CvMat t3 = cvMat(3,1,CV_64F,_t3);
218 CvMat dxdR2 = cvMat(3, 9, CV_64F, _dxdR2);
219 CvMat dxdt1 = cvMat(3, 3, CV_64F, _dxdt1);
220 CvMat W3 = cvMat(3, 3, CV_64F, _W3);
221
222 CV_Assert( CV_IS_MAT(_tvec1) && CV_IS_MAT(_tvec2) );
223 CV_Assert( CV_ARE_SIZES_EQ(_tvec1, _tvec2) && CV_ARE_SIZES_EQ(_tvec1, _rvec1) );
224
225 cvConvert( _tvec1, &t1 );
226 cvConvert( _tvec2, &t2 );
227 cvMatMulAdd( &R2, &t1, &t2, &t3 );
228
229 if( _tvec3 )
230 cvConvert( &t3, _tvec3 );
231
232 if( dt3dr2 || dt3dt1 )
233 {
234 cvCalcMatMulDeriv( &R2, &t1, &dxdR2, &dxdt1 );
235 if( dt3dr2 )
236 {
237 cvMatMul( &dxdR2, &dR2dr2, &W3 );
238 cvConvert( &W3, dt3dr2 );
239 }
240 if( dt3dt1 )
241 cvConvert( &dxdt1, dt3dt1 );
242 }
243 }
244
245 if( dt3dt2 )
246 cvSetIdentity( dt3dt2 );
247 if( dt3dr1 )
248 cvZero( dt3dr1 );
249 }
250
cvRodrigues2(const CvMat * src,CvMat * dst,CvMat * jacobian)251 CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
252 {
253 int depth, elem_size;
254 int i, k;
255 double J[27];
256 CvMat matJ = cvMat( 3, 9, CV_64F, J );
257
258 if( !CV_IS_MAT(src) )
259 CV_Error( !src ? CV_StsNullPtr : CV_StsBadArg, "Input argument is not a valid matrix" );
260
261 if( !CV_IS_MAT(dst) )
262 CV_Error( !dst ? CV_StsNullPtr : CV_StsBadArg,
263 "The first output argument is not a valid matrix" );
264
265 depth = CV_MAT_DEPTH(src->type);
266 elem_size = CV_ELEM_SIZE(depth);
267
268 if( depth != CV_32F && depth != CV_64F )
269 CV_Error( CV_StsUnsupportedFormat, "The matrices must have 32f or 64f data type" );
270
271 if( !CV_ARE_DEPTHS_EQ(src, dst) )
272 CV_Error( CV_StsUnmatchedFormats, "All the matrices must have the same data type" );
273
274 if( jacobian )
275 {
276 if( !CV_IS_MAT(jacobian) )
277 CV_Error( CV_StsBadArg, "Jacobian is not a valid matrix" );
278
279 if( !CV_ARE_DEPTHS_EQ(src, jacobian) || CV_MAT_CN(jacobian->type) != 1 )
280 CV_Error( CV_StsUnmatchedFormats, "Jacobian must have 32fC1 or 64fC1 datatype" );
281
282 if( (jacobian->rows != 9 || jacobian->cols != 3) &&
283 (jacobian->rows != 3 || jacobian->cols != 9))
284 CV_Error( CV_StsBadSize, "Jacobian must be 3x9 or 9x3" );
285 }
286
287 if( src->cols == 1 || src->rows == 1 )
288 {
289 double rx, ry, rz, theta;
290 int step = src->rows > 1 ? src->step / elem_size : 1;
291
292 if( src->rows + src->cols*CV_MAT_CN(src->type) - 1 != 3 )
293 CV_Error( CV_StsBadSize, "Input matrix must be 1x3, 3x1 or 3x3" );
294
295 if( dst->rows != 3 || dst->cols != 3 || CV_MAT_CN(dst->type) != 1 )
296 CV_Error( CV_StsBadSize, "Output matrix must be 3x3, single-channel floating point matrix" );
297
298 if( depth == CV_32F )
299 {
300 rx = src->data.fl[0];
301 ry = src->data.fl[step];
302 rz = src->data.fl[step*2];
303 }
304 else
305 {
306 rx = src->data.db[0];
307 ry = src->data.db[step];
308 rz = src->data.db[step*2];
309 }
310 theta = std::sqrt(rx*rx + ry*ry + rz*rz);
311
312 if( theta < DBL_EPSILON )
313 {
314 cvSetIdentity( dst );
315
316 if( jacobian )
317 {
318 memset( J, 0, sizeof(J) );
319 J[5] = J[15] = J[19] = -1;
320 J[7] = J[11] = J[21] = 1;
321 }
322 }
323 else
324 {
325 const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
326
327 double c = cos(theta);
328 double s = sin(theta);
329 double c1 = 1. - c;
330 double itheta = theta ? 1./theta : 0.;
331
332 rx *= itheta; ry *= itheta; rz *= itheta;
333
334 double rrt[] = { rx*rx, rx*ry, rx*rz, rx*ry, ry*ry, ry*rz, rx*rz, ry*rz, rz*rz };
335 double _r_x_[] = { 0, -rz, ry, rz, 0, -rx, -ry, rx, 0 };
336 double R[9];
337 CvMat matR = cvMat( 3, 3, CV_64F, R );
338
339 // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
340 // where [r_x] is [0 -rz ry; rz 0 -rx; -ry rx 0]
341 for( k = 0; k < 9; k++ )
342 R[k] = c*I[k] + c1*rrt[k] + s*_r_x_[k];
343
344 cvConvert( &matR, dst );
345
346 if( jacobian )
347 {
348 double drrt[] = { rx+rx, ry, rz, ry, 0, 0, rz, 0, 0,
349 0, rx, 0, rx, ry+ry, rz, 0, rz, 0,
350 0, 0, rx, 0, 0, ry, rx, ry, rz+rz };
351 double d_r_x_[] = { 0, 0, 0, 0, 0, -1, 0, 1, 0,
352 0, 0, 1, 0, 0, 0, -1, 0, 0,
353 0, -1, 0, 1, 0, 0, 0, 0, 0 };
354 for( i = 0; i < 3; i++ )
355 {
356 double ri = i == 0 ? rx : i == 1 ? ry : rz;
357 double a0 = -s*ri, a1 = (s - 2*c1*itheta)*ri, a2 = c1*itheta;
358 double a3 = (c - s*itheta)*ri, a4 = s*itheta;
359 for( k = 0; k < 9; k++ )
360 J[i*9+k] = a0*I[k] + a1*rrt[k] + a2*drrt[i*9+k] +
361 a3*_r_x_[k] + a4*d_r_x_[i*9+k];
362 }
363 }
364 }
365 }
366 else if( src->cols == 3 && src->rows == 3 )
367 {
368 double R[9], U[9], V[9], W[3], rx, ry, rz;
369 CvMat matR = cvMat( 3, 3, CV_64F, R );
370 CvMat matU = cvMat( 3, 3, CV_64F, U );
371 CvMat matV = cvMat( 3, 3, CV_64F, V );
372 CvMat matW = cvMat( 3, 1, CV_64F, W );
373 double theta, s, c;
374 int step = dst->rows > 1 ? dst->step / elem_size : 1;
375
376 if( (dst->rows != 1 || dst->cols*CV_MAT_CN(dst->type) != 3) &&
377 (dst->rows != 3 || dst->cols != 1 || CV_MAT_CN(dst->type) != 1))
378 CV_Error( CV_StsBadSize, "Output matrix must be 1x3 or 3x1" );
379
380 cvConvert( src, &matR );
381 if( !cvCheckArr( &matR, CV_CHECK_RANGE+CV_CHECK_QUIET, -100, 100 ) )
382 {
383 cvZero(dst);
384 if( jacobian )
385 cvZero(jacobian);
386 return 0;
387 }
388
389 cvSVD( &matR, &matW, &matU, &matV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
390 cvGEMM( &matU, &matV, 1, 0, 0, &matR, CV_GEMM_A_T );
391
392 rx = R[7] - R[5];
393 ry = R[2] - R[6];
394 rz = R[3] - R[1];
395
396 s = std::sqrt((rx*rx + ry*ry + rz*rz)*0.25);
397 c = (R[0] + R[4] + R[8] - 1)*0.5;
398 c = c > 1. ? 1. : c < -1. ? -1. : c;
399 theta = acos(c);
400
401 if( s < 1e-5 )
402 {
403 double t;
404
405 if( c > 0 )
406 rx = ry = rz = 0;
407 else
408 {
409 t = (R[0] + 1)*0.5;
410 rx = std::sqrt(MAX(t,0.));
411 t = (R[4] + 1)*0.5;
412 ry = std::sqrt(MAX(t,0.))*(R[1] < 0 ? -1. : 1.);
413 t = (R[8] + 1)*0.5;
414 rz = std::sqrt(MAX(t,0.))*(R[2] < 0 ? -1. : 1.);
415 if( fabs(rx) < fabs(ry) && fabs(rx) < fabs(rz) && (R[5] > 0) != (ry*rz > 0) )
416 rz = -rz;
417 theta /= std::sqrt(rx*rx + ry*ry + rz*rz);
418 rx *= theta;
419 ry *= theta;
420 rz *= theta;
421 }
422
423 if( jacobian )
424 {
425 memset( J, 0, sizeof(J) );
426 if( c > 0 )
427 {
428 J[5] = J[15] = J[19] = -0.5;
429 J[7] = J[11] = J[21] = 0.5;
430 }
431 }
432 }
433 else
434 {
435 double vth = 1/(2*s);
436
437 if( jacobian )
438 {
439 double t, dtheta_dtr = -1./s;
440 // var1 = [vth;theta]
441 // var = [om1;var1] = [om1;vth;theta]
442 double dvth_dtheta = -vth*c/s;
443 double d1 = 0.5*dvth_dtheta*dtheta_dtr;
444 double d2 = 0.5*dtheta_dtr;
445 // dvar1/dR = dvar1/dtheta*dtheta/dR = [dvth/dtheta; 1] * dtheta/dtr * dtr/dR
446 double dvardR[5*9] =
447 {
448 0, 0, 0, 0, 0, 1, 0, -1, 0,
449 0, 0, -1, 0, 0, 0, 1, 0, 0,
450 0, 1, 0, -1, 0, 0, 0, 0, 0,
451 d1, 0, 0, 0, d1, 0, 0, 0, d1,
452 d2, 0, 0, 0, d2, 0, 0, 0, d2
453 };
454 // var2 = [om;theta]
455 double dvar2dvar[] =
456 {
457 vth, 0, 0, rx, 0,
458 0, vth, 0, ry, 0,
459 0, 0, vth, rz, 0,
460 0, 0, 0, 0, 1
461 };
462 double domegadvar2[] =
463 {
464 theta, 0, 0, rx*vth,
465 0, theta, 0, ry*vth,
466 0, 0, theta, rz*vth
467 };
468
469 CvMat _dvardR = cvMat( 5, 9, CV_64FC1, dvardR );
470 CvMat _dvar2dvar = cvMat( 4, 5, CV_64FC1, dvar2dvar );
471 CvMat _domegadvar2 = cvMat( 3, 4, CV_64FC1, domegadvar2 );
472 double t0[3*5];
473 CvMat _t0 = cvMat( 3, 5, CV_64FC1, t0 );
474
475 cvMatMul( &_domegadvar2, &_dvar2dvar, &_t0 );
476 cvMatMul( &_t0, &_dvardR, &matJ );
477
478 // transpose every row of matJ (treat the rows as 3x3 matrices)
479 CV_SWAP(J[1], J[3], t); CV_SWAP(J[2], J[6], t); CV_SWAP(J[5], J[7], t);
480 CV_SWAP(J[10], J[12], t); CV_SWAP(J[11], J[15], t); CV_SWAP(J[14], J[16], t);
481 CV_SWAP(J[19], J[21], t); CV_SWAP(J[20], J[24], t); CV_SWAP(J[23], J[25], t);
482 }
483
484 vth *= theta;
485 rx *= vth; ry *= vth; rz *= vth;
486 }
487
488 if( depth == CV_32F )
489 {
490 dst->data.fl[0] = (float)rx;
491 dst->data.fl[step] = (float)ry;
492 dst->data.fl[step*2] = (float)rz;
493 }
494 else
495 {
496 dst->data.db[0] = rx;
497 dst->data.db[step] = ry;
498 dst->data.db[step*2] = rz;
499 }
500 }
501
502 if( jacobian )
503 {
504 if( depth == CV_32F )
505 {
506 if( jacobian->rows == matJ.rows )
507 cvConvert( &matJ, jacobian );
508 else
509 {
510 float Jf[3*9];
511 CvMat _Jf = cvMat( matJ.rows, matJ.cols, CV_32FC1, Jf );
512 cvConvert( &matJ, &_Jf );
513 cvTranspose( &_Jf, jacobian );
514 }
515 }
516 else if( jacobian->rows == matJ.rows )
517 cvCopy( &matJ, jacobian );
518 else
519 cvTranspose( &matJ, jacobian );
520 }
521
522 return 1;
523 }
524
525
526 static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8, 8x1, 1x12 or 12x1 floating-point vector";
527
cvProjectPoints2(const CvMat * objectPoints,const CvMat * r_vec,const CvMat * t_vec,const CvMat * A,const CvMat * distCoeffs,CvMat * imagePoints,CvMat * dpdr,CvMat * dpdt,CvMat * dpdf,CvMat * dpdc,CvMat * dpdk,double aspectRatio)528 CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
529 const CvMat* r_vec,
530 const CvMat* t_vec,
531 const CvMat* A,
532 const CvMat* distCoeffs,
533 CvMat* imagePoints, CvMat* dpdr,
534 CvMat* dpdt, CvMat* dpdf,
535 CvMat* dpdc, CvMat* dpdk,
536 double aspectRatio )
537 {
538 Ptr<CvMat> matM, _m;
539 Ptr<CvMat> _dpdr, _dpdt, _dpdc, _dpdf, _dpdk;
540
541 int i, j, count;
542 int calc_derivatives;
543 const CvPoint3D64f* M;
544 CvPoint2D64f* m;
545 double r[3], R[9], dRdr[27], t[3], a[9], k[12] = {0,0,0,0,0,0,0,0,0,0,0,0}, fx, fy, cx, cy;
546 CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;
547 CvMat matR = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );
548 double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0;
549 int dpdr_step = 0, dpdt_step = 0, dpdk_step = 0, dpdf_step = 0, dpdc_step = 0;
550 bool fixedAspectRatio = aspectRatio > FLT_EPSILON;
551
552 if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(r_vec) ||
553 !CV_IS_MAT(t_vec) || !CV_IS_MAT(A) ||
554 /*!CV_IS_MAT(distCoeffs) ||*/ !CV_IS_MAT(imagePoints) )
555 CV_Error( CV_StsBadArg, "One of required arguments is not a valid matrix" );
556
557 int total = objectPoints->rows * objectPoints->cols * CV_MAT_CN(objectPoints->type);
558 if(total % 3 != 0)
559 {
560 //we have stopped support of homogeneous coordinates because it cause ambiguity in interpretation of the input data
561 CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
562 }
563 count = total / 3;
564
565 if( CV_IS_CONT_MAT(objectPoints->type) &&
566 (CV_MAT_DEPTH(objectPoints->type) == CV_32F || CV_MAT_DEPTH(objectPoints->type) == CV_64F)&&
567 ((objectPoints->rows == 1 && CV_MAT_CN(objectPoints->type) == 3) ||
568 (objectPoints->rows == count && CV_MAT_CN(objectPoints->type)*objectPoints->cols == 3) ||
569 (objectPoints->rows == 3 && CV_MAT_CN(objectPoints->type) == 1 && objectPoints->cols == count)))
570 {
571 matM.reset(cvCreateMat( objectPoints->rows, objectPoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(objectPoints->type)) ));
572 cvConvert(objectPoints, matM);
573 }
574 else
575 {
576 // matM = cvCreateMat( 1, count, CV_64FC3 );
577 // cvConvertPointsHomogeneous( objectPoints, matM );
578 CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
579 }
580
581 if( CV_IS_CONT_MAT(imagePoints->type) &&
582 (CV_MAT_DEPTH(imagePoints->type) == CV_32F || CV_MAT_DEPTH(imagePoints->type) == CV_64F) &&
583 ((imagePoints->rows == 1 && CV_MAT_CN(imagePoints->type) == 2) ||
584 (imagePoints->rows == count && CV_MAT_CN(imagePoints->type)*imagePoints->cols == 2) ||
585 (imagePoints->rows == 2 && CV_MAT_CN(imagePoints->type) == 1 && imagePoints->cols == count)))
586 {
587 _m.reset(cvCreateMat( imagePoints->rows, imagePoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(imagePoints->type)) ));
588 cvConvert(imagePoints, _m);
589 }
590 else
591 {
592 // _m = cvCreateMat( 1, count, CV_64FC2 );
593 CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
594 }
595
596 M = (CvPoint3D64f*)matM->data.db;
597 m = (CvPoint2D64f*)_m->data.db;
598
599 if( (CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F) ||
600 (((r_vec->rows != 1 && r_vec->cols != 1) ||
601 r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3) &&
602 ((r_vec->rows != 3 && r_vec->cols != 3) || CV_MAT_CN(r_vec->type) != 1)))
603 CV_Error( CV_StsBadArg, "Rotation must be represented by 1x3 or 3x1 "
604 "floating-point rotation vector, or 3x3 rotation matrix" );
605
606 if( r_vec->rows == 3 && r_vec->cols == 3 )
607 {
608 _r = cvMat( 3, 1, CV_64FC1, r );
609 cvRodrigues2( r_vec, &_r );
610 cvRodrigues2( &_r, &matR, &_dRdr );
611 cvCopy( r_vec, &matR );
612 }
613 else
614 {
615 _r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), r );
616 cvConvert( r_vec, &_r );
617 cvRodrigues2( &_r, &matR, &_dRdr );
618 }
619
620 if( (CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F) ||
621 (t_vec->rows != 1 && t_vec->cols != 1) ||
622 t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 )
623 CV_Error( CV_StsBadArg,
624 "Translation vector must be 1x3 or 3x1 floating-point vector" );
625
626 _t = cvMat( t_vec->rows, t_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), t );
627 cvConvert( t_vec, &_t );
628
629 if( (CV_MAT_TYPE(A->type) != CV_64FC1 && CV_MAT_TYPE(A->type) != CV_32FC1) ||
630 A->rows != 3 || A->cols != 3 )
631 CV_Error( CV_StsBadArg, "Instrinsic parameters must be 3x3 floating-point matrix" );
632
633 cvConvert( A, &_a );
634 fx = a[0]; fy = a[4];
635 cx = a[2]; cy = a[5];
636
637 if( fixedAspectRatio )
638 fx = fy*aspectRatio;
639
640 if( distCoeffs )
641 {
642 if( !CV_IS_MAT(distCoeffs) ||
643 (CV_MAT_DEPTH(distCoeffs->type) != CV_64F &&
644 CV_MAT_DEPTH(distCoeffs->type) != CV_32F) ||
645 (distCoeffs->rows != 1 && distCoeffs->cols != 1) ||
646 (distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 4 &&
647 distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5 &&
648 distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 8 &&
649 distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 12) )
650 CV_Error( CV_StsBadArg, cvDistCoeffErr );
651
652 _k = cvMat( distCoeffs->rows, distCoeffs->cols,
653 CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k );
654 cvConvert( distCoeffs, &_k );
655 }
656
657 if( dpdr )
658 {
659 if( !CV_IS_MAT(dpdr) ||
660 (CV_MAT_TYPE(dpdr->type) != CV_32FC1 &&
661 CV_MAT_TYPE(dpdr->type) != CV_64FC1) ||
662 dpdr->rows != count*2 || dpdr->cols != 3 )
663 CV_Error( CV_StsBadArg, "dp/drot must be 2Nx3 floating-point matrix" );
664
665 if( CV_MAT_TYPE(dpdr->type) == CV_64FC1 )
666 {
667 _dpdr.reset(cvCloneMat(dpdr));
668 }
669 else
670 _dpdr.reset(cvCreateMat( 2*count, 3, CV_64FC1 ));
671 dpdr_p = _dpdr->data.db;
672 dpdr_step = _dpdr->step/sizeof(dpdr_p[0]);
673 }
674
675 if( dpdt )
676 {
677 if( !CV_IS_MAT(dpdt) ||
678 (CV_MAT_TYPE(dpdt->type) != CV_32FC1 &&
679 CV_MAT_TYPE(dpdt->type) != CV_64FC1) ||
680 dpdt->rows != count*2 || dpdt->cols != 3 )
681 CV_Error( CV_StsBadArg, "dp/dT must be 2Nx3 floating-point matrix" );
682
683 if( CV_MAT_TYPE(dpdt->type) == CV_64FC1 )
684 {
685 _dpdt.reset(cvCloneMat(dpdt));
686 }
687 else
688 _dpdt.reset(cvCreateMat( 2*count, 3, CV_64FC1 ));
689 dpdt_p = _dpdt->data.db;
690 dpdt_step = _dpdt->step/sizeof(dpdt_p[0]);
691 }
692
693 if( dpdf )
694 {
695 if( !CV_IS_MAT(dpdf) ||
696 (CV_MAT_TYPE(dpdf->type) != CV_32FC1 && CV_MAT_TYPE(dpdf->type) != CV_64FC1) ||
697 dpdf->rows != count*2 || dpdf->cols != 2 )
698 CV_Error( CV_StsBadArg, "dp/df must be 2Nx2 floating-point matrix" );
699
700 if( CV_MAT_TYPE(dpdf->type) == CV_64FC1 )
701 {
702 _dpdf.reset(cvCloneMat(dpdf));
703 }
704 else
705 _dpdf.reset(cvCreateMat( 2*count, 2, CV_64FC1 ));
706 dpdf_p = _dpdf->data.db;
707 dpdf_step = _dpdf->step/sizeof(dpdf_p[0]);
708 }
709
710 if( dpdc )
711 {
712 if( !CV_IS_MAT(dpdc) ||
713 (CV_MAT_TYPE(dpdc->type) != CV_32FC1 && CV_MAT_TYPE(dpdc->type) != CV_64FC1) ||
714 dpdc->rows != count*2 || dpdc->cols != 2 )
715 CV_Error( CV_StsBadArg, "dp/dc must be 2Nx2 floating-point matrix" );
716
717 if( CV_MAT_TYPE(dpdc->type) == CV_64FC1 )
718 {
719 _dpdc.reset(cvCloneMat(dpdc));
720 }
721 else
722 _dpdc.reset(cvCreateMat( 2*count, 2, CV_64FC1 ));
723 dpdc_p = _dpdc->data.db;
724 dpdc_step = _dpdc->step/sizeof(dpdc_p[0]);
725 }
726
727 if( dpdk )
728 {
729 if( !CV_IS_MAT(dpdk) ||
730 (CV_MAT_TYPE(dpdk->type) != CV_32FC1 && CV_MAT_TYPE(dpdk->type) != CV_64FC1) ||
731 dpdk->rows != count*2 || (dpdk->cols != 12 && dpdk->cols != 8 && dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )
732 CV_Error( CV_StsBadArg, "dp/df must be 2Nx12, 2Nx8, 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );
733
734 if( !distCoeffs )
735 CV_Error( CV_StsNullPtr, "distCoeffs is NULL while dpdk is not" );
736
737 if( CV_MAT_TYPE(dpdk->type) == CV_64FC1 )
738 {
739 _dpdk.reset(cvCloneMat(dpdk));
740 }
741 else
742 _dpdk.reset(cvCreateMat( dpdk->rows, dpdk->cols, CV_64FC1 ));
743 dpdk_p = _dpdk->data.db;
744 dpdk_step = _dpdk->step/sizeof(dpdk_p[0]);
745 }
746
747 calc_derivatives = dpdr || dpdt || dpdf || dpdc || dpdk;
748
749 for( i = 0; i < count; i++ )
750 {
751 double X = M[i].x, Y = M[i].y, Z = M[i].z;
752 double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
753 double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
754 double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
755 double r2, r4, r6, a1, a2, a3, cdist, icdist2;
756 double xd, yd;
757
758 z = z ? 1./z : 1;
759 x *= z; y *= z;
760
761 r2 = x*x + y*y;
762 r4 = r2*r2;
763 r6 = r4*r2;
764 a1 = 2*x*y;
765 a2 = r2 + 2*x*x;
766 a3 = r2 + 2*y*y;
767 cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
768 icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
769 xd = x*cdist*icdist2 + k[2]*a1 + k[3]*a2 + k[8]*r2+k[9]*r4;
770 yd = y*cdist*icdist2 + k[2]*a3 + k[3]*a1 + k[10]*r2+k[11]*r4;
771
772 m[i].x = xd*fx + cx;
773 m[i].y = yd*fy + cy;
774
775 if( calc_derivatives )
776 {
777 if( dpdc_p )
778 {
779 dpdc_p[0] = 1; dpdc_p[1] = 0;
780 dpdc_p[dpdc_step] = 0;
781 dpdc_p[dpdc_step+1] = 1;
782 dpdc_p += dpdc_step*2;
783 }
784
785 if( dpdf_p )
786 {
787 if( fixedAspectRatio )
788 {
789 dpdf_p[0] = 0; dpdf_p[1] = xd*aspectRatio;
790 dpdf_p[dpdf_step] = 0;
791 dpdf_p[dpdf_step+1] = yd;
792 }
793 else
794 {
795 dpdf_p[0] = xd; dpdf_p[1] = 0;
796 dpdf_p[dpdf_step] = 0;
797 dpdf_p[dpdf_step+1] = yd;
798 }
799 dpdf_p += dpdf_step*2;
800 }
801
802 if( dpdk_p )
803 {
804 dpdk_p[0] = fx*x*icdist2*r2;
805 dpdk_p[1] = fx*x*icdist2*r4;
806 dpdk_p[dpdk_step] = fy*y*icdist2*r2;
807 dpdk_p[dpdk_step+1] = fy*y*icdist2*r4;
808 if( _dpdk->cols > 2 )
809 {
810 dpdk_p[2] = fx*a1;
811 dpdk_p[3] = fx*a2;
812 dpdk_p[dpdk_step+2] = fy*a3;
813 dpdk_p[dpdk_step+3] = fy*a1;
814 if( _dpdk->cols > 4 )
815 {
816 dpdk_p[4] = fx*x*icdist2*r6;
817 dpdk_p[dpdk_step+4] = fy*y*icdist2*r6;
818
819 if( _dpdk->cols > 5 )
820 {
821 dpdk_p[5] = fx*x*cdist*(-icdist2)*icdist2*r2;
822 dpdk_p[dpdk_step+5] = fy*y*cdist*(-icdist2)*icdist2*r2;
823 dpdk_p[6] = fx*x*cdist*(-icdist2)*icdist2*r4;
824 dpdk_p[dpdk_step+6] = fy*y*cdist*(-icdist2)*icdist2*r4;
825 dpdk_p[7] = fx*x*cdist*(-icdist2)*icdist2*r6;
826 dpdk_p[dpdk_step+7] = fy*y*cdist*(-icdist2)*icdist2*r6;
827 if( _dpdk->cols > 8 )
828 {
829 dpdk_p[8] = fx*r2; //s1
830 dpdk_p[9] = fx*r4; //s2
831 dpdk_p[10] = 0;//s3
832 dpdk_p[11] = 0;//s4
833 dpdk_p[dpdk_step+8] = 0; //s1
834 dpdk_p[dpdk_step+9] = 0; //s2
835 dpdk_p[dpdk_step+10] = fy*r2; //s3
836 dpdk_p[dpdk_step+11] = fy*r4; //s4
837 }
838 }
839 }
840 }
841 dpdk_p += dpdk_step*2;
842 }
843
844 if( dpdt_p )
845 {
846 double dxdt[] = { z, 0, -x*z }, dydt[] = { 0, z, -y*z };
847 for( j = 0; j < 3; j++ )
848 {
849 double dr2dt = 2*x*dxdt[j] + 2*y*dydt[j];
850 double dcdist_dt = k[0]*dr2dt + 2*k[1]*r2*dr2dt + 3*k[4]*r4*dr2dt;
851 double dicdist2_dt = -icdist2*icdist2*(k[5]*dr2dt + 2*k[6]*r2*dr2dt + 3*k[7]*r4*dr2dt);
852 double da1dt = 2*(x*dydt[j] + y*dxdt[j]);
853 double dmxdt = fx*(dxdt[j]*cdist*icdist2 + x*dcdist_dt*icdist2 + x*cdist*dicdist2_dt +
854 k[2]*da1dt + k[3]*(dr2dt + 2*x*dxdt[j]) + k[8]*dr2dt + 2*r2*k[9]*dr2dt);
855 double dmydt = fy*(dydt[j]*cdist*icdist2 + y*dcdist_dt*icdist2 + y*cdist*dicdist2_dt +
856 k[2]*(dr2dt + 2*y*dydt[j]) + k[3]*da1dt + k[10]*dr2dt + 2*r2*k[11]*dr2dt);
857 dpdt_p[j] = dmxdt;
858 dpdt_p[dpdt_step+j] = dmydt;
859 }
860 dpdt_p += dpdt_step*2;
861 }
862
863 if( dpdr_p )
864 {
865 double dx0dr[] =
866 {
867 X*dRdr[0] + Y*dRdr[1] + Z*dRdr[2],
868 X*dRdr[9] + Y*dRdr[10] + Z*dRdr[11],
869 X*dRdr[18] + Y*dRdr[19] + Z*dRdr[20]
870 };
871 double dy0dr[] =
872 {
873 X*dRdr[3] + Y*dRdr[4] + Z*dRdr[5],
874 X*dRdr[12] + Y*dRdr[13] + Z*dRdr[14],
875 X*dRdr[21] + Y*dRdr[22] + Z*dRdr[23]
876 };
877 double dz0dr[] =
878 {
879 X*dRdr[6] + Y*dRdr[7] + Z*dRdr[8],
880 X*dRdr[15] + Y*dRdr[16] + Z*dRdr[17],
881 X*dRdr[24] + Y*dRdr[25] + Z*dRdr[26]
882 };
883 for( j = 0; j < 3; j++ )
884 {
885 double dxdr = z*(dx0dr[j] - x*dz0dr[j]);
886 double dydr = z*(dy0dr[j] - y*dz0dr[j]);
887 double dr2dr = 2*x*dxdr + 2*y*dydr;
888 double dcdist_dr = k[0]*dr2dr + 2*k[1]*r2*dr2dr + 3*k[4]*r4*dr2dr;
889 double dicdist2_dr = -icdist2*icdist2*(k[5]*dr2dr + 2*k[6]*r2*dr2dr + 3*k[7]*r4*dr2dr);
890 double da1dr = 2*(x*dydr + y*dxdr);
891 double dmxdr = fx*(dxdr*cdist*icdist2 + x*dcdist_dr*icdist2 + x*cdist*dicdist2_dr +
892 k[2]*da1dr + k[3]*(dr2dr + 2*x*dxdr) + k[8]*dr2dr + 2*r2*k[9]*dr2dr);
893 double dmydr = fy*(dydr*cdist*icdist2 + y*dcdist_dr*icdist2 + y*cdist*dicdist2_dr +
894 k[2]*(dr2dr + 2*y*dydr) + k[3]*da1dr + k[10]*dr2dr + 2*r2*k[11]*dr2dr);
895 dpdr_p[j] = dmxdr;
896 dpdr_p[dpdr_step+j] = dmydr;
897 }
898 dpdr_p += dpdr_step*2;
899 }
900 }
901 }
902
903 if( _m != imagePoints )
904 cvConvert( _m, imagePoints );
905
906 if( _dpdr != dpdr )
907 cvConvert( _dpdr, dpdr );
908
909 if( _dpdt != dpdt )
910 cvConvert( _dpdt, dpdt );
911
912 if( _dpdf != dpdf )
913 cvConvert( _dpdf, dpdf );
914
915 if( _dpdc != dpdc )
916 cvConvert( _dpdc, dpdc );
917
918 if( _dpdk != dpdk )
919 cvConvert( _dpdk, dpdk );
920 }
921
922
cvFindExtrinsicCameraParams2(const CvMat * objectPoints,const CvMat * imagePoints,const CvMat * A,const CvMat * distCoeffs,CvMat * rvec,CvMat * tvec,int useExtrinsicGuess)923 CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
924 const CvMat* imagePoints, const CvMat* A,
925 const CvMat* distCoeffs, CvMat* rvec, CvMat* tvec,
926 int useExtrinsicGuess )
927 {
928 const int max_iter = 20;
929 Ptr<CvMat> matM, _Mxy, _m, _mn, matL;
930
931 int i, count;
932 double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];
933 double MM[9], U[9], V[9], W[3];
934 CvScalar Mc;
935 double param[6];
936 CvMat matA = cvMat( 3, 3, CV_64F, a );
937 CvMat _Ar = cvMat( 3, 3, CV_64F, ar );
938 CvMat matR = cvMat( 3, 3, CV_64F, R );
939 CvMat _r = cvMat( 3, 1, CV_64F, param );
940 CvMat _t = cvMat( 3, 1, CV_64F, param + 3 );
941 CvMat _Mc = cvMat( 1, 3, CV_64F, Mc.val );
942 CvMat _MM = cvMat( 3, 3, CV_64F, MM );
943 CvMat matU = cvMat( 3, 3, CV_64F, U );
944 CvMat matV = cvMat( 3, 3, CV_64F, V );
945 CvMat matW = cvMat( 3, 1, CV_64F, W );
946 CvMat _param = cvMat( 6, 1, CV_64F, param );
947 CvMat _dpdr, _dpdt;
948
949 CV_Assert( CV_IS_MAT(objectPoints) && CV_IS_MAT(imagePoints) &&
950 CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec) );
951
952 count = MAX(objectPoints->cols, objectPoints->rows);
953 matM.reset(cvCreateMat( 1, count, CV_64FC3 ));
954 _m.reset(cvCreateMat( 1, count, CV_64FC2 ));
955
956 cvConvertPointsHomogeneous( objectPoints, matM );
957 cvConvertPointsHomogeneous( imagePoints, _m );
958 cvConvert( A, &matA );
959
960 CV_Assert( (CV_MAT_DEPTH(rvec->type) == CV_64F || CV_MAT_DEPTH(rvec->type) == CV_32F) &&
961 (rvec->rows == 1 || rvec->cols == 1) && rvec->rows*rvec->cols*CV_MAT_CN(rvec->type) == 3 );
962
963 CV_Assert( (CV_MAT_DEPTH(tvec->type) == CV_64F || CV_MAT_DEPTH(tvec->type) == CV_32F) &&
964 (tvec->rows == 1 || tvec->cols == 1) && tvec->rows*tvec->cols*CV_MAT_CN(tvec->type) == 3 );
965
966 _mn.reset(cvCreateMat( 1, count, CV_64FC2 ));
967 _Mxy.reset(cvCreateMat( 1, count, CV_64FC2 ));
968
969 // normalize image points
970 // (unapply the intrinsic matrix transformation and distortion)
971 cvUndistortPoints( _m, _mn, &matA, distCoeffs, 0, &_Ar );
972
973 if( useExtrinsicGuess )
974 {
975 CvMat _r_temp = cvMat(rvec->rows, rvec->cols,
976 CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
977 CvMat _t_temp = cvMat(tvec->rows, tvec->cols,
978 CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3);
979 cvConvert( rvec, &_r_temp );
980 cvConvert( tvec, &_t_temp );
981 }
982 else
983 {
984 Mc = cvAvg(matM);
985 cvReshape( matM, matM, 1, count );
986 cvMulTransposed( matM, &_MM, 1, &_Mc );
987 cvSVD( &_MM, &matW, 0, &matV, CV_SVD_MODIFY_A + CV_SVD_V_T );
988
989 // initialize extrinsic parameters
990 if( W[2]/W[1] < 1e-3 || count < 4 )
991 {
992 // a planar structure case (all M's lie in the same plane)
993 double tt[3], h[9], h1_norm, h2_norm;
994 CvMat* R_transform = &matV;
995 CvMat T_transform = cvMat( 3, 1, CV_64F, tt );
996 CvMat matH = cvMat( 3, 3, CV_64F, h );
997 CvMat _h1, _h2, _h3;
998
999 if( V[2]*V[2] + V[5]*V[5] < 1e-10 )
1000 cvSetIdentity( R_transform );
1001
1002 if( cvDet(R_transform) < 0 )
1003 cvScale( R_transform, R_transform, -1 );
1004
1005 cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T );
1006
1007 for( i = 0; i < count; i++ )
1008 {
1009 const double* Rp = R_transform->data.db;
1010 const double* Tp = T_transform.data.db;
1011 const double* src = matM->data.db + i*3;
1012 double* dst = _Mxy->data.db + i*2;
1013
1014 dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0];
1015 dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1];
1016 }
1017
1018 cvFindHomography( _Mxy, _mn, &matH );
1019
1020 if( cvCheckArr(&matH, CV_CHECK_QUIET) )
1021 {
1022 cvGetCol( &matH, &_h1, 0 );
1023 _h2 = _h1; _h2.data.db++;
1024 _h3 = _h2; _h3.data.db++;
1025 h1_norm = std::sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);
1026 h2_norm = std::sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);
1027
1028 cvScale( &_h1, &_h1, 1./MAX(h1_norm, DBL_EPSILON) );
1029 cvScale( &_h2, &_h2, 1./MAX(h2_norm, DBL_EPSILON) );
1030 cvScale( &_h3, &_t, 2./MAX(h1_norm + h2_norm, DBL_EPSILON));
1031 cvCrossProduct( &_h1, &_h2, &_h3 );
1032
1033 cvRodrigues2( &matH, &_r );
1034 cvRodrigues2( &_r, &matH );
1035 cvMatMulAdd( &matH, &T_transform, &_t, &_t );
1036 cvMatMul( &matH, R_transform, &matR );
1037 }
1038 else
1039 {
1040 cvSetIdentity( &matR );
1041 cvZero( &_t );
1042 }
1043
1044 cvRodrigues2( &matR, &_r );
1045 }
1046 else
1047 {
1048 // non-planar structure. Use DLT method
1049 double* L;
1050 double LL[12*12], LW[12], LV[12*12], sc;
1051 CvMat _LL = cvMat( 12, 12, CV_64F, LL );
1052 CvMat _LW = cvMat( 12, 1, CV_64F, LW );
1053 CvMat _LV = cvMat( 12, 12, CV_64F, LV );
1054 CvMat _RRt, _RR, _tt;
1055 CvPoint3D64f* M = (CvPoint3D64f*)matM->data.db;
1056 CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db;
1057
1058 matL.reset(cvCreateMat( 2*count, 12, CV_64F ));
1059 L = matL->data.db;
1060
1061 for( i = 0; i < count; i++, L += 24 )
1062 {
1063 double x = -mn[i].x, y = -mn[i].y;
1064 L[0] = L[16] = M[i].x;
1065 L[1] = L[17] = M[i].y;
1066 L[2] = L[18] = M[i].z;
1067 L[3] = L[19] = 1.;
1068 L[4] = L[5] = L[6] = L[7] = 0.;
1069 L[12] = L[13] = L[14] = L[15] = 0.;
1070 L[8] = x*M[i].x;
1071 L[9] = x*M[i].y;
1072 L[10] = x*M[i].z;
1073 L[11] = x;
1074 L[20] = y*M[i].x;
1075 L[21] = y*M[i].y;
1076 L[22] = y*M[i].z;
1077 L[23] = y;
1078 }
1079
1080 cvMulTransposed( matL, &_LL, 1 );
1081 cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T );
1082 _RRt = cvMat( 3, 4, CV_64F, LV + 11*12 );
1083 cvGetCols( &_RRt, &_RR, 0, 3 );
1084 cvGetCol( &_RRt, &_tt, 3 );
1085 if( cvDet(&_RR) < 0 )
1086 cvScale( &_RRt, &_RRt, -1 );
1087 sc = cvNorm(&_RR);
1088 cvSVD( &_RR, &matW, &matU, &matV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
1089 cvGEMM( &matU, &matV, 1, 0, 0, &matR, CV_GEMM_A_T );
1090 cvScale( &_tt, &_t, cvNorm(&matR)/sc );
1091 cvRodrigues2( &matR, &_r );
1092 }
1093 }
1094
1095 cvReshape( matM, matM, 3, 1 );
1096 cvReshape( _mn, _mn, 2, 1 );
1097
1098 // refine extrinsic parameters using iterative algorithm
1099 CvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true);
1100 cvCopy( &_param, solver.param );
1101
1102 for(;;)
1103 {
1104 CvMat *matJ = 0, *_err = 0;
1105 const CvMat *__param = 0;
1106 bool proceed = solver.update( __param, matJ, _err );
1107 cvCopy( __param, &_param );
1108 if( !proceed || !_err )
1109 break;
1110 cvReshape( _err, _err, 2, 1 );
1111 if( matJ )
1112 {
1113 cvGetCols( matJ, &_dpdr, 0, 3 );
1114 cvGetCols( matJ, &_dpdt, 3, 6 );
1115 cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs,
1116 _err, &_dpdr, &_dpdt, 0, 0, 0 );
1117 }
1118 else
1119 {
1120 cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs,
1121 _err, 0, 0, 0, 0, 0 );
1122 }
1123 cvSub(_err, _m, _err);
1124 cvReshape( _err, _err, 1, 2*count );
1125 }
1126 cvCopy( solver.param, &_param );
1127
1128 _r = cvMat( rvec->rows, rvec->cols,
1129 CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
1130 _t = cvMat( tvec->rows, tvec->cols,
1131 CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3 );
1132
1133 cvConvert( &_r, rvec );
1134 cvConvert( &_t, tvec );
1135 }
1136
1137
cvInitIntrinsicParams2D(const CvMat * objectPoints,const CvMat * imagePoints,const CvMat * npoints,CvSize imageSize,CvMat * cameraMatrix,double aspectRatio)1138 CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints,
1139 const CvMat* imagePoints, const CvMat* npoints,
1140 CvSize imageSize, CvMat* cameraMatrix,
1141 double aspectRatio )
1142 {
1143 Ptr<CvMat> matA, _b, _allH;
1144
1145 int i, j, pos, nimages, ni = 0;
1146 double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
1147 double H[9], f[2];
1148 CvMat _a = cvMat( 3, 3, CV_64F, a );
1149 CvMat matH = cvMat( 3, 3, CV_64F, H );
1150 CvMat _f = cvMat( 2, 1, CV_64F, f );
1151
1152 assert( CV_MAT_TYPE(npoints->type) == CV_32SC1 &&
1153 CV_IS_MAT_CONT(npoints->type) );
1154 nimages = npoints->rows + npoints->cols - 1;
1155
1156 if( (CV_MAT_TYPE(objectPoints->type) != CV_32FC3 &&
1157 CV_MAT_TYPE(objectPoints->type) != CV_64FC3) ||
1158 (CV_MAT_TYPE(imagePoints->type) != CV_32FC2 &&
1159 CV_MAT_TYPE(imagePoints->type) != CV_64FC2) )
1160 CV_Error( CV_StsUnsupportedFormat, "Both object points and image points must be 2D" );
1161
1162 if( objectPoints->rows != 1 || imagePoints->rows != 1 )
1163 CV_Error( CV_StsBadSize, "object points and image points must be a single-row matrices" );
1164
1165 matA.reset(cvCreateMat( 2*nimages, 2, CV_64F ));
1166 _b.reset(cvCreateMat( 2*nimages, 1, CV_64F ));
1167 a[2] = (!imageSize.width) ? 0.5 : (imageSize.width - 1)*0.5;
1168 a[5] = (!imageSize.height) ? 0.5 : (imageSize.height - 1)*0.5;
1169 _allH.reset(cvCreateMat( nimages, 9, CV_64F ));
1170
1171 // extract vanishing points in order to obtain initial value for the focal length
1172 for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1173 {
1174 double* Ap = matA->data.db + i*4;
1175 double* bp = _b->data.db + i*2;
1176 ni = npoints->data.i[i];
1177 double h[3], v[3], d1[3], d2[3];
1178 double n[4] = {0,0,0,0};
1179 CvMat _m, matM;
1180 cvGetCols( objectPoints, &matM, pos, pos + ni );
1181 cvGetCols( imagePoints, &_m, pos, pos + ni );
1182
1183 cvFindHomography( &matM, &_m, &matH );
1184 memcpy( _allH->data.db + i*9, H, sizeof(H) );
1185
1186 H[0] -= H[6]*a[2]; H[1] -= H[7]*a[2]; H[2] -= H[8]*a[2];
1187 H[3] -= H[6]*a[5]; H[4] -= H[7]*a[5]; H[5] -= H[8]*a[5];
1188
1189 for( j = 0; j < 3; j++ )
1190 {
1191 double t0 = H[j*3], t1 = H[j*3+1];
1192 h[j] = t0; v[j] = t1;
1193 d1[j] = (t0 + t1)*0.5;
1194 d2[j] = (t0 - t1)*0.5;
1195 n[0] += t0*t0; n[1] += t1*t1;
1196 n[2] += d1[j]*d1[j]; n[3] += d2[j]*d2[j];
1197 }
1198
1199 for( j = 0; j < 4; j++ )
1200 n[j] = 1./std::sqrt(n[j]);
1201
1202 for( j = 0; j < 3; j++ )
1203 {
1204 h[j] *= n[0]; v[j] *= n[1];
1205 d1[j] *= n[2]; d2[j] *= n[3];
1206 }
1207
1208 Ap[0] = h[0]*v[0]; Ap[1] = h[1]*v[1];
1209 Ap[2] = d1[0]*d2[0]; Ap[3] = d1[1]*d2[1];
1210 bp[0] = -h[2]*v[2]; bp[1] = -d1[2]*d2[2];
1211 }
1212
1213 cvSolve( matA, _b, &_f, CV_NORMAL + CV_SVD );
1214 a[0] = std::sqrt(fabs(1./f[0]));
1215 a[4] = std::sqrt(fabs(1./f[1]));
1216 if( aspectRatio != 0 )
1217 {
1218 double tf = (a[0] + a[4])/(aspectRatio + 1.);
1219 a[0] = aspectRatio*tf;
1220 a[4] = tf;
1221 }
1222
1223 cvConvert( &_a, cameraMatrix );
1224 }
1225
1226
1227 /* finds intrinsic and extrinsic camera parameters
1228 from a few views of known calibration pattern */
cvCalibrateCamera2(const CvMat * objectPoints,const CvMat * imagePoints,const CvMat * npoints,CvSize imageSize,CvMat * cameraMatrix,CvMat * distCoeffs,CvMat * rvecs,CvMat * tvecs,int flags,CvTermCriteria termCrit)1229 CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
1230 const CvMat* imagePoints, const CvMat* npoints,
1231 CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs,
1232 CvMat* rvecs, CvMat* tvecs, int flags, CvTermCriteria termCrit )
1233 {
1234 const int NINTRINSIC = 16;
1235 Ptr<CvMat> matM, _m, _Ji, _Je, _err;
1236 CvLevMarq solver;
1237 double reprojErr = 0;
1238
1239 double A[9], k[12] = {0,0,0,0,0,0,0,0,0,0,0,0};
1240 CvMat matA = cvMat(3, 3, CV_64F, A), _k;
1241 int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;
1242 double aspectRatio = 0.;
1243
1244 // 0. check the parameters & allocate buffers
1245 if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(imagePoints) ||
1246 !CV_IS_MAT(npoints) || !CV_IS_MAT(cameraMatrix) || !CV_IS_MAT(distCoeffs) )
1247 CV_Error( CV_StsBadArg, "One of required vector arguments is not a valid matrix" );
1248
1249 if( imageSize.width <= 0 || imageSize.height <= 0 )
1250 CV_Error( CV_StsOutOfRange, "image width and height must be positive" );
1251
1252 if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||
1253 (npoints->rows != 1 && npoints->cols != 1) )
1254 CV_Error( CV_StsUnsupportedFormat,
1255 "the array of point counters must be 1-dimensional integer vector" );
1256 //when the thin prism model is used the distortion coefficients matrix must have 12 parameters
1257 if((flags & CV_CALIB_THIN_PRISM_MODEL) && (distCoeffs->cols*distCoeffs->rows != 12))
1258 CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" );
1259
1260 nimages = npoints->rows*npoints->cols;
1261 npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
1262
1263 if( rvecs )
1264 {
1265 cn = CV_MAT_CN(rvecs->type);
1266 if( !CV_IS_MAT(rvecs) ||
1267 (CV_MAT_DEPTH(rvecs->type) != CV_32F && CV_MAT_DEPTH(rvecs->type) != CV_64F) ||
1268 ((rvecs->rows != nimages || (rvecs->cols*cn != 3 && rvecs->cols*cn != 9)) &&
1269 (rvecs->rows != 1 || rvecs->cols != nimages || cn != 3)) )
1270 CV_Error( CV_StsBadArg, "the output array of rotation vectors must be 3-channel "
1271 "1xn or nx1 array or 1-channel nx3 or nx9 array, where n is the number of views" );
1272 }
1273
1274 if( tvecs )
1275 {
1276 cn = CV_MAT_CN(tvecs->type);
1277 if( !CV_IS_MAT(tvecs) ||
1278 (CV_MAT_DEPTH(tvecs->type) != CV_32F && CV_MAT_DEPTH(tvecs->type) != CV_64F) ||
1279 ((tvecs->rows != nimages || tvecs->cols*cn != 3) &&
1280 (tvecs->rows != 1 || tvecs->cols != nimages || cn != 3)) )
1281 CV_Error( CV_StsBadArg, "the output array of translation vectors must be 3-channel "
1282 "1xn or nx1 array or 1-channel nx3 array, where n is the number of views" );
1283 }
1284
1285 if( (CV_MAT_TYPE(cameraMatrix->type) != CV_32FC1 &&
1286 CV_MAT_TYPE(cameraMatrix->type) != CV_64FC1) ||
1287 cameraMatrix->rows != 3 || cameraMatrix->cols != 3 )
1288 CV_Error( CV_StsBadArg,
1289 "Intrinsic parameters must be 3x3 floating-point matrix" );
1290
1291 if( (CV_MAT_TYPE(distCoeffs->type) != CV_32FC1 &&
1292 CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) ||
1293 (distCoeffs->cols != 1 && distCoeffs->rows != 1) ||
1294 (distCoeffs->cols*distCoeffs->rows != 4 &&
1295 distCoeffs->cols*distCoeffs->rows != 5 &&
1296 distCoeffs->cols*distCoeffs->rows != 8 &&
1297 distCoeffs->cols*distCoeffs->rows != 12) )
1298 CV_Error( CV_StsBadArg, cvDistCoeffErr );
1299
1300 for( i = 0; i < nimages; i++ )
1301 {
1302 ni = npoints->data.i[i*npstep];
1303 if( ni < 4 )
1304 {
1305 char buf[100];
1306 sprintf( buf, "The number of points in the view #%d is < 4", i );
1307 CV_Error( CV_StsOutOfRange, buf );
1308 }
1309 maxPoints = MAX( maxPoints, ni );
1310 total += ni;
1311 }
1312
1313 matM.reset(cvCreateMat( 1, total, CV_64FC3 ));
1314 _m.reset(cvCreateMat( 1, total, CV_64FC2 ));
1315
1316 cvConvertPointsHomogeneous( objectPoints, matM );
1317 cvConvertPointsHomogeneous( imagePoints, _m );
1318
1319 nparams = NINTRINSIC + nimages*6;
1320 _Ji.reset(cvCreateMat( maxPoints*2, NINTRINSIC, CV_64FC1 ));
1321 _Je.reset(cvCreateMat( maxPoints*2, 6, CV_64FC1 ));
1322 _err.reset(cvCreateMat( maxPoints*2, 1, CV_64FC1 ));
1323 cvZero( _Ji );
1324
1325 _k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);
1326 if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 8 )
1327 {
1328 if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 5 )
1329 flags |= CV_CALIB_FIX_K3;
1330 flags |= CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5 | CV_CALIB_FIX_K6;
1331 }
1332 const double minValidAspectRatio = 0.01;
1333 const double maxValidAspectRatio = 100.0;
1334
1335 // 1. initialize intrinsic parameters & LM solver
1336 if( flags & CV_CALIB_USE_INTRINSIC_GUESS )
1337 {
1338 cvConvert( cameraMatrix, &matA );
1339 if( A[0] <= 0 || A[4] <= 0 )
1340 CV_Error( CV_StsOutOfRange, "Focal length (fx and fy) must be positive" );
1341 if( A[2] < 0 || A[2] >= imageSize.width ||
1342 A[5] < 0 || A[5] >= imageSize.height )
1343 CV_Error( CV_StsOutOfRange, "Principal point must be within the image" );
1344 if( fabs(A[1]) > 1e-5 )
1345 CV_Error( CV_StsOutOfRange, "Non-zero skew is not supported by the function" );
1346 if( fabs(A[3]) > 1e-5 || fabs(A[6]) > 1e-5 ||
1347 fabs(A[7]) > 1e-5 || fabs(A[8]-1) > 1e-5 )
1348 CV_Error( CV_StsOutOfRange,
1349 "The intrinsic matrix must have [fx 0 cx; 0 fy cy; 0 0 1] shape" );
1350 A[1] = A[3] = A[6] = A[7] = 0.;
1351 A[8] = 1.;
1352
1353 if( flags & CV_CALIB_FIX_ASPECT_RATIO )
1354 {
1355 aspectRatio = A[0]/A[4];
1356
1357 if( aspectRatio < minValidAspectRatio || aspectRatio > maxValidAspectRatio )
1358 CV_Error( CV_StsOutOfRange,
1359 "The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
1360 }
1361 cvConvert( distCoeffs, &_k );
1362 }
1363 else
1364 {
1365 CvScalar mean, sdv;
1366 cvAvgSdv( matM, &mean, &sdv );
1367 if( fabs(mean.val[2]) > 1e-5 || fabs(sdv.val[2]) > 1e-5 )
1368 CV_Error( CV_StsBadArg,
1369 "For non-planar calibration rigs the initial intrinsic matrix must be specified" );
1370 for( i = 0; i < total; i++ )
1371 ((CvPoint3D64f*)matM->data.db)[i].z = 0.;
1372
1373 if( flags & CV_CALIB_FIX_ASPECT_RATIO )
1374 {
1375 aspectRatio = cvmGet(cameraMatrix,0,0);
1376 aspectRatio /= cvmGet(cameraMatrix,1,1);
1377 if( aspectRatio < minValidAspectRatio || aspectRatio > maxValidAspectRatio )
1378 CV_Error( CV_StsOutOfRange,
1379 "The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
1380 }
1381 cvInitIntrinsicParams2D( matM, _m, npoints, imageSize, &matA, aspectRatio );
1382 }
1383
1384 solver.init( nparams, 0, termCrit );
1385
1386 {
1387 double* param = solver.param->data.db;
1388 uchar* mask = solver.mask->data.ptr;
1389
1390 param[0] = A[0]; param[1] = A[4]; param[2] = A[2]; param[3] = A[5];
1391 param[4] = k[0]; param[5] = k[1]; param[6] = k[2]; param[7] = k[3];
1392 param[8] = k[4]; param[9] = k[5]; param[10] = k[6]; param[11] = k[7];
1393 param[12] = k[8]; param[13] = k[9]; param[14] = k[10]; param[15] = k[11];
1394
1395 if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
1396 mask[0] = mask[1] = 0;
1397 if( flags & CV_CALIB_FIX_PRINCIPAL_POINT )
1398 mask[2] = mask[3] = 0;
1399 if( flags & CV_CALIB_ZERO_TANGENT_DIST )
1400 {
1401 param[6] = param[7] = 0;
1402 mask[6] = mask[7] = 0;
1403 }
1404 if( !(flags & CV_CALIB_RATIONAL_MODEL) )
1405 flags |= CV_CALIB_FIX_K4 + CV_CALIB_FIX_K5 + CV_CALIB_FIX_K6;
1406 if( !(flags & CV_CALIB_THIN_PRISM_MODEL))
1407 flags |= CALIB_FIX_S1_S2_S3_S4;
1408 if( flags & CV_CALIB_FIX_K1 )
1409 mask[4] = 0;
1410 if( flags & CV_CALIB_FIX_K2 )
1411 mask[5] = 0;
1412 if( flags & CV_CALIB_FIX_K3 )
1413 mask[8] = 0;
1414 if( flags & CV_CALIB_FIX_K4 )
1415 mask[9] = 0;
1416 if( flags & CV_CALIB_FIX_K5 )
1417 mask[10] = 0;
1418 if( flags & CV_CALIB_FIX_K6 )
1419 mask[11] = 0;
1420
1421 if(flags & CALIB_FIX_S1_S2_S3_S4)
1422 {
1423 mask[12] = 0;
1424 mask[13] = 0;
1425 mask[14] = 0;
1426 mask[15] = 0;
1427 }
1428 }
1429
1430 // 2. initialize extrinsic parameters
1431 for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1432 {
1433 CvMat _Mi, _mi, _ri, _ti;
1434 ni = npoints->data.i[i*npstep];
1435
1436 cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
1437 cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
1438
1439 cvGetCols( matM, &_Mi, pos, pos + ni );
1440 cvGetCols( _m, &_mi, pos, pos + ni );
1441
1442 cvFindExtrinsicCameraParams2( &_Mi, &_mi, &matA, &_k, &_ri, &_ti );
1443 }
1444
1445 // 3. run the optimization
1446 for(;;)
1447 {
1448 const CvMat* _param = 0;
1449 CvMat *_JtJ = 0, *_JtErr = 0;
1450 double* _errNorm = 0;
1451 bool proceed = solver.updateAlt( _param, _JtJ, _JtErr, _errNorm );
1452 double *param = solver.param->data.db, *pparam = solver.prevParam->data.db;
1453
1454 if( flags & CV_CALIB_FIX_ASPECT_RATIO )
1455 {
1456 param[0] = param[1]*aspectRatio;
1457 pparam[0] = pparam[1]*aspectRatio;
1458 }
1459
1460 A[0] = param[0]; A[4] = param[1]; A[2] = param[2]; A[5] = param[3];
1461 k[0] = param[4]; k[1] = param[5]; k[2] = param[6]; k[3] = param[7];
1462 k[4] = param[8]; k[5] = param[9]; k[6] = param[10]; k[7] = param[11];
1463 k[8] = param[12];k[9] = param[13];k[10] = param[14];k[11] = param[15];
1464
1465 if( !proceed )
1466 break;
1467
1468 reprojErr = 0;
1469
1470 for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1471 {
1472 CvMat _Mi, _mi, _ri, _ti, _dpdr, _dpdt, _dpdf, _dpdc, _dpdk, _mp, _part;
1473 ni = npoints->data.i[i*npstep];
1474
1475 cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
1476 cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
1477
1478 cvGetCols( matM, &_Mi, pos, pos + ni );
1479 cvGetCols( _m, &_mi, pos, pos + ni );
1480
1481 _Je->rows = _Ji->rows = _err->rows = ni*2;
1482 cvGetCols( _Je, &_dpdr, 0, 3 );
1483 cvGetCols( _Je, &_dpdt, 3, 6 );
1484 cvGetCols( _Ji, &_dpdf, 0, 2 );
1485 cvGetCols( _Ji, &_dpdc, 2, 4 );
1486 cvGetCols( _Ji, &_dpdk, 4, NINTRINSIC );
1487 cvReshape( _err, &_mp, 2, 1 );
1488
1489 if( _JtJ || _JtErr )
1490 {
1491 cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, &_dpdt,
1492 (flags & CV_CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,
1493 (flags & CV_CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,
1494 (flags & CV_CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);
1495 }
1496 else
1497 cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp );
1498
1499 cvSub( &_mp, &_mi, &_mp );
1500
1501 if( _JtJ || _JtErr )
1502 {
1503 cvGetSubRect( _JtJ, &_part, cvRect(0,0,NINTRINSIC,NINTRINSIC) );
1504 cvGEMM( _Ji, _Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );
1505
1506 cvGetSubRect( _JtJ, &_part, cvRect(NINTRINSIC+i*6,NINTRINSIC+i*6,6,6) );
1507 cvGEMM( _Je, _Je, 1, 0, 0, &_part, CV_GEMM_A_T );
1508
1509 cvGetSubRect( _JtJ, &_part, cvRect(NINTRINSIC+i*6,0,6,NINTRINSIC) );
1510 cvGEMM( _Ji, _Je, 1, 0, 0, &_part, CV_GEMM_A_T );
1511
1512 cvGetRows( _JtErr, &_part, 0, NINTRINSIC );
1513 cvGEMM( _Ji, _err, 1, &_part, 1, &_part, CV_GEMM_A_T );
1514
1515 cvGetRows( _JtErr, &_part, NINTRINSIC + i*6, NINTRINSIC + (i+1)*6 );
1516 cvGEMM( _Je, _err, 1, 0, 0, &_part, CV_GEMM_A_T );
1517 }
1518
1519 double errNorm = cvNorm( &_mp, 0, CV_L2 );
1520 reprojErr += errNorm*errNorm;
1521 }
1522 if( _errNorm )
1523 *_errNorm = reprojErr;
1524 }
1525
1526 // 4. store the results
1527 cvConvert( &matA, cameraMatrix );
1528 cvConvert( &_k, distCoeffs );
1529
1530 for( i = 0; i < nimages; i++ )
1531 {
1532 CvMat src, dst;
1533 if( rvecs )
1534 {
1535 src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 );
1536 if( rvecs->rows == nimages && rvecs->cols*CV_MAT_CN(rvecs->type) == 9 )
1537 {
1538 dst = cvMat( 3, 3, CV_MAT_DEPTH(rvecs->type),
1539 rvecs->data.ptr + rvecs->step*i );
1540 cvRodrigues2( &src, &matA );
1541 cvConvert( &matA, &dst );
1542 }
1543 else
1544 {
1545 dst = cvMat( 3, 1, CV_MAT_DEPTH(rvecs->type), rvecs->rows == 1 ?
1546 rvecs->data.ptr + i*CV_ELEM_SIZE(rvecs->type) :
1547 rvecs->data.ptr + rvecs->step*i );
1548 cvConvert( &src, &dst );
1549 }
1550 }
1551 if( tvecs )
1552 {
1553 src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 + 3 );
1554 dst = cvMat( 3, 1, CV_MAT_DEPTH(tvecs->type), tvecs->rows == 1 ?
1555 tvecs->data.ptr + i*CV_ELEM_SIZE(tvecs->type) :
1556 tvecs->data.ptr + tvecs->step*i );
1557 cvConvert( &src, &dst );
1558 }
1559 }
1560
1561 return std::sqrt(reprojErr/total);
1562 }
1563
1564
cvCalibrationMatrixValues(const CvMat * calibMatr,CvSize imgSize,double apertureWidth,double apertureHeight,double * fovx,double * fovy,double * focalLength,CvPoint2D64f * principalPoint,double * pasp)1565 void cvCalibrationMatrixValues( const CvMat *calibMatr, CvSize imgSize,
1566 double apertureWidth, double apertureHeight, double *fovx, double *fovy,
1567 double *focalLength, CvPoint2D64f *principalPoint, double *pasp )
1568 {
1569 double alphax, alphay, mx, my;
1570 int imgWidth = imgSize.width, imgHeight = imgSize.height;
1571
1572 /* Validate parameters. */
1573
1574 if(calibMatr == 0)
1575 CV_Error(CV_StsNullPtr, "Some of parameters is a NULL pointer!");
1576
1577 if(!CV_IS_MAT(calibMatr))
1578 CV_Error(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");
1579
1580 if(calibMatr->cols != 3 || calibMatr->rows != 3)
1581 CV_Error(CV_StsUnmatchedSizes, "Size of matrices must be 3x3!");
1582
1583 alphax = cvmGet(calibMatr, 0, 0);
1584 alphay = cvmGet(calibMatr, 1, 1);
1585 assert(imgWidth != 0 && imgHeight != 0 && alphax != 0.0 && alphay != 0.0);
1586
1587 /* Calculate pixel aspect ratio. */
1588 if(pasp)
1589 *pasp = alphay / alphax;
1590
1591 /* Calculate number of pixel per realworld unit. */
1592
1593 if(apertureWidth != 0.0 && apertureHeight != 0.0) {
1594 mx = imgWidth / apertureWidth;
1595 my = imgHeight / apertureHeight;
1596 } else {
1597 mx = 1.0;
1598 if(pasp)
1599 my = *pasp;
1600 else
1601 my = 1.0;
1602 }
1603
1604 /* Calculate fovx and fovy. */
1605
1606 if(fovx)
1607 *fovx = 2 * atan(imgWidth / (2 * alphax)) * 180.0 / CV_PI;
1608
1609 if(fovy)
1610 *fovy = 2 * atan(imgHeight / (2 * alphay)) * 180.0 / CV_PI;
1611
1612 /* Calculate focal length. */
1613
1614 if(focalLength)
1615 *focalLength = alphax / mx;
1616
1617 /* Calculate principle point. */
1618
1619 if(principalPoint)
1620 *principalPoint = cvPoint2D64f(cvmGet(calibMatr, 0, 2) / mx, cvmGet(calibMatr, 1, 2) / my);
1621 }
1622
1623
1624 //////////////////////////////// Stereo Calibration ///////////////////////////////////
1625
dbCmp(const void * _a,const void * _b)1626 static int dbCmp( const void* _a, const void* _b )
1627 {
1628 double a = *(const double*)_a;
1629 double b = *(const double*)_b;
1630
1631 return (a > b) - (a < b);
1632 }
1633
1634
cvStereoCalibrate(const CvMat * _objectPoints,const CvMat * _imagePoints1,const CvMat * _imagePoints2,const CvMat * _npoints,CvMat * _cameraMatrix1,CvMat * _distCoeffs1,CvMat * _cameraMatrix2,CvMat * _distCoeffs2,CvSize imageSize,CvMat * matR,CvMat * matT,CvMat * matE,CvMat * matF,int flags,CvTermCriteria termCrit)1635 double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1,
1636 const CvMat* _imagePoints2, const CvMat* _npoints,
1637 CvMat* _cameraMatrix1, CvMat* _distCoeffs1,
1638 CvMat* _cameraMatrix2, CvMat* _distCoeffs2,
1639 CvSize imageSize, CvMat* matR, CvMat* matT,
1640 CvMat* matE, CvMat* matF,
1641 int flags,
1642 CvTermCriteria termCrit )
1643 {
1644 const int NINTRINSIC = 16;
1645 Ptr<CvMat> npoints, err, J_LR, Je, Ji, imagePoints[2], objectPoints, RT0;
1646 CvLevMarq solver;
1647 double reprojErr = 0;
1648
1649 double A[2][9], dk[2][12]={{0,0,0,0,0,0,0,0,0,0,0,0},{0,0,0,0,0,0,0,0,0,0,0,0}}, rlr[9];
1650 CvMat K[2], Dist[2], om_LR, T_LR;
1651 CvMat R_LR = cvMat(3, 3, CV_64F, rlr);
1652 int i, k, p, ni = 0, ofs, nimages, pointsTotal, maxPoints = 0;
1653 int nparams;
1654 bool recomputeIntrinsics = false;
1655 double aspectRatio[2] = {0,0};
1656
1657 CV_Assert( CV_IS_MAT(_imagePoints1) && CV_IS_MAT(_imagePoints2) &&
1658 CV_IS_MAT(_objectPoints) && CV_IS_MAT(_npoints) &&
1659 CV_IS_MAT(matR) && CV_IS_MAT(matT) );
1660
1661 CV_Assert( CV_ARE_TYPES_EQ(_imagePoints1, _imagePoints2) &&
1662 CV_ARE_DEPTHS_EQ(_imagePoints1, _objectPoints) );
1663
1664 CV_Assert( (_npoints->cols == 1 || _npoints->rows == 1) &&
1665 CV_MAT_TYPE(_npoints->type) == CV_32SC1 );
1666
1667 nimages = _npoints->cols + _npoints->rows - 1;
1668 npoints.reset(cvCreateMat( _npoints->rows, _npoints->cols, _npoints->type ));
1669 cvCopy( _npoints, npoints );
1670
1671 for( i = 0, pointsTotal = 0; i < nimages; i++ )
1672 {
1673 maxPoints = MAX(maxPoints, npoints->data.i[i]);
1674 pointsTotal += npoints->data.i[i];
1675 }
1676
1677 objectPoints.reset(cvCreateMat( _objectPoints->rows, _objectPoints->cols,
1678 CV_64FC(CV_MAT_CN(_objectPoints->type))));
1679 cvConvert( _objectPoints, objectPoints );
1680 cvReshape( objectPoints, objectPoints, 3, 1 );
1681
1682 for( k = 0; k < 2; k++ )
1683 {
1684 const CvMat* points = k == 0 ? _imagePoints1 : _imagePoints2;
1685 const CvMat* cameraMatrix = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
1686 const CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
1687
1688 int cn = CV_MAT_CN(_imagePoints1->type);
1689 CV_Assert( (CV_MAT_DEPTH(_imagePoints1->type) == CV_32F ||
1690 CV_MAT_DEPTH(_imagePoints1->type) == CV_64F) &&
1691 ((_imagePoints1->rows == pointsTotal && _imagePoints1->cols*cn == 2) ||
1692 (_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) );
1693
1694 K[k] = cvMat(3,3,CV_64F,A[k]);
1695 Dist[k] = cvMat(1,12,CV_64F,dk[k]);
1696
1697 imagePoints[k].reset(cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type))));
1698 cvConvert( points, imagePoints[k] );
1699 cvReshape( imagePoints[k], imagePoints[k], 2, 1 );
1700
1701 if( flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS|
1702 CV_CALIB_FIX_ASPECT_RATIO|CV_CALIB_FIX_FOCAL_LENGTH) )
1703 cvConvert( cameraMatrix, &K[k] );
1704
1705 if( flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS|
1706 CV_CALIB_FIX_K1|CV_CALIB_FIX_K2|CV_CALIB_FIX_K3|CV_CALIB_FIX_K4|CV_CALIB_FIX_K5|CV_CALIB_FIX_K6) )
1707 {
1708 CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
1709 CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
1710 cvConvert( distCoeffs, &tdist );
1711 }
1712
1713 if( !(flags & (CV_CALIB_FIX_INTRINSIC|CV_CALIB_USE_INTRINSIC_GUESS)))
1714 {
1715 cvCalibrateCamera2( objectPoints, imagePoints[k],
1716 npoints, imageSize, &K[k], &Dist[k], 0, 0, flags );
1717 }
1718 }
1719
1720 if( flags & CV_CALIB_SAME_FOCAL_LENGTH )
1721 {
1722 static const int avg_idx[] = { 0, 4, 2, 5, -1 };
1723 for( k = 0; avg_idx[k] >= 0; k++ )
1724 A[0][avg_idx[k]] = A[1][avg_idx[k]] = (A[0][avg_idx[k]] + A[1][avg_idx[k]])*0.5;
1725 }
1726
1727 if( flags & CV_CALIB_FIX_ASPECT_RATIO )
1728 {
1729 for( k = 0; k < 2; k++ )
1730 aspectRatio[k] = A[k][0]/A[k][4];
1731 }
1732
1733 recomputeIntrinsics = (flags & CV_CALIB_FIX_INTRINSIC) == 0;
1734
1735 err.reset(cvCreateMat( maxPoints*2, 1, CV_64F ));
1736 Je.reset(cvCreateMat( maxPoints*2, 6, CV_64F ));
1737 J_LR.reset(cvCreateMat( maxPoints*2, 6, CV_64F ));
1738 Ji.reset(cvCreateMat( maxPoints*2, NINTRINSIC, CV_64F ));
1739 cvZero( Ji );
1740
1741 // we optimize for the inter-camera R(3),t(3), then, optionally,
1742 // for intrinisic parameters of each camera ((fx,fy,cx,cy,k1,k2,p1,p2) ~ 8 parameters).
1743 nparams = 6*(nimages+1) + (recomputeIntrinsics ? NINTRINSIC*2 : 0);
1744
1745 // storage for initial [om(R){i}|t{i}] (in order to compute the median for each component)
1746 RT0.reset(cvCreateMat( 6, nimages, CV_64F ));
1747
1748 solver.init( nparams, 0, termCrit );
1749 if( recomputeIntrinsics )
1750 {
1751 uchar* imask = solver.mask->data.ptr + nparams - NINTRINSIC*2;
1752 if( !(flags & CV_CALIB_RATIONAL_MODEL) )
1753 flags |= CV_CALIB_FIX_K4 | CV_CALIB_FIX_K5 | CV_CALIB_FIX_K6;
1754 if( !(flags & CV_CALIB_THIN_PRISM_MODEL) )
1755 flags |= CV_CALIB_FIX_S1_S2_S3_S4;
1756 if( flags & CV_CALIB_FIX_ASPECT_RATIO )
1757 imask[0] = imask[NINTRINSIC] = 0;
1758 if( flags & CV_CALIB_FIX_FOCAL_LENGTH )
1759 imask[0] = imask[1] = imask[NINTRINSIC] = imask[NINTRINSIC+1] = 0;
1760 if( flags & CV_CALIB_FIX_PRINCIPAL_POINT )
1761 imask[2] = imask[3] = imask[NINTRINSIC+2] = imask[NINTRINSIC+3] = 0;
1762 if( flags & CV_CALIB_ZERO_TANGENT_DIST )
1763 imask[6] = imask[7] = imask[NINTRINSIC+6] = imask[NINTRINSIC+7] = 0;
1764 if( flags & CV_CALIB_FIX_K1 )
1765 imask[4] = imask[NINTRINSIC+4] = 0;
1766 if( flags & CV_CALIB_FIX_K2 )
1767 imask[5] = imask[NINTRINSIC+5] = 0;
1768 if( flags & CV_CALIB_FIX_K3 )
1769 imask[8] = imask[NINTRINSIC+8] = 0;
1770 if( flags & CV_CALIB_FIX_K4 )
1771 imask[9] = imask[NINTRINSIC+9] = 0;
1772 if( flags & CV_CALIB_FIX_K5 )
1773 imask[10] = imask[NINTRINSIC+10] = 0;
1774 if( flags & CV_CALIB_FIX_K6 )
1775 imask[11] = imask[NINTRINSIC+11] = 0;
1776 if( flags & CV_CALIB_FIX_S1_S2_S3_S4 )
1777 {
1778 imask[12] = imask[NINTRINSIC+12] = 0;
1779 imask[13] = imask[NINTRINSIC+13] = 0;
1780 imask[14] = imask[NINTRINSIC+14] = 0;
1781 imask[15] = imask[NINTRINSIC+15] = 0;
1782 }
1783 }
1784
1785 /*
1786 Compute initial estimate of pose
1787
1788 For each image, compute:
1789 R(om) is the rotation matrix of om
1790 om(R) is the rotation vector of R
1791 R_ref = R(om_right) * R(om_left)'
1792 T_ref_list = [T_ref_list; T_right - R_ref * T_left]
1793 om_ref_list = {om_ref_list; om(R_ref)]
1794
1795 om = median(om_ref_list)
1796 T = median(T_ref_list)
1797 */
1798 for( i = ofs = 0; i < nimages; ofs += ni, i++ )
1799 {
1800 ni = npoints->data.i[i];
1801 CvMat objpt_i;
1802 double _om[2][3], r[2][9], t[2][3];
1803 CvMat om[2], R[2], T[2], imgpt_i[2];
1804
1805 objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
1806 for( k = 0; k < 2; k++ )
1807 {
1808 imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
1809 om[k] = cvMat(3, 1, CV_64F, _om[k]);
1810 R[k] = cvMat(3, 3, CV_64F, r[k]);
1811 T[k] = cvMat(3, 1, CV_64F, t[k]);
1812
1813 // FIXME: here we ignore activePoints[k] because of
1814 // the limited API of cvFindExtrnisicCameraParams2
1815 cvFindExtrinsicCameraParams2( &objpt_i, &imgpt_i[k], &K[k], &Dist[k], &om[k], &T[k] );
1816 cvRodrigues2( &om[k], &R[k] );
1817 if( k == 0 )
1818 {
1819 // save initial om_left and T_left
1820 solver.param->data.db[(i+1)*6] = _om[0][0];
1821 solver.param->data.db[(i+1)*6 + 1] = _om[0][1];
1822 solver.param->data.db[(i+1)*6 + 2] = _om[0][2];
1823 solver.param->data.db[(i+1)*6 + 3] = t[0][0];
1824 solver.param->data.db[(i+1)*6 + 4] = t[0][1];
1825 solver.param->data.db[(i+1)*6 + 5] = t[0][2];
1826 }
1827 }
1828 cvGEMM( &R[1], &R[0], 1, 0, 0, &R[0], CV_GEMM_B_T );
1829 cvGEMM( &R[0], &T[0], -1, &T[1], 1, &T[1] );
1830 cvRodrigues2( &R[0], &T[0] );
1831 RT0->data.db[i] = t[0][0];
1832 RT0->data.db[i + nimages] = t[0][1];
1833 RT0->data.db[i + nimages*2] = t[0][2];
1834 RT0->data.db[i + nimages*3] = t[1][0];
1835 RT0->data.db[i + nimages*4] = t[1][1];
1836 RT0->data.db[i + nimages*5] = t[1][2];
1837 }
1838
1839 // find the medians and save the first 6 parameters
1840 for( i = 0; i < 6; i++ )
1841 {
1842 qsort( RT0->data.db + i*nimages, nimages, CV_ELEM_SIZE(RT0->type), dbCmp );
1843 solver.param->data.db[i] = nimages % 2 != 0 ? RT0->data.db[i*nimages + nimages/2] :
1844 (RT0->data.db[i*nimages + nimages/2 - 1] + RT0->data.db[i*nimages + nimages/2])*0.5;
1845 }
1846
1847 if( recomputeIntrinsics )
1848 for( k = 0; k < 2; k++ )
1849 {
1850 double* iparam = solver.param->data.db + (nimages+1)*6 + k*NINTRINSIC;
1851 if( flags & CV_CALIB_ZERO_TANGENT_DIST )
1852 dk[k][2] = dk[k][3] = 0;
1853 iparam[0] = A[k][0]; iparam[1] = A[k][4]; iparam[2] = A[k][2]; iparam[3] = A[k][5];
1854 iparam[4] = dk[k][0]; iparam[5] = dk[k][1]; iparam[6] = dk[k][2];
1855 iparam[7] = dk[k][3]; iparam[8] = dk[k][4]; iparam[9] = dk[k][5];
1856 iparam[10] = dk[k][6]; iparam[11] = dk[k][7];
1857 iparam[12] = dk[k][8];
1858 iparam[13] = dk[k][9];
1859 iparam[14] = dk[k][10];
1860 iparam[15] = dk[k][11];
1861 }
1862
1863 om_LR = cvMat(3, 1, CV_64F, solver.param->data.db);
1864 T_LR = cvMat(3, 1, CV_64F, solver.param->data.db + 3);
1865
1866 for(;;)
1867 {
1868 const CvMat* param = 0;
1869 CvMat tmpimagePoints;
1870 CvMat *JtJ = 0, *JtErr = 0;
1871 double *_errNorm = 0;
1872 double _omR[3], _tR[3];
1873 double _dr3dr1[9], _dr3dr2[9], /*_dt3dr1[9],*/ _dt3dr2[9], _dt3dt1[9], _dt3dt2[9];
1874 CvMat dr3dr1 = cvMat(3, 3, CV_64F, _dr3dr1);
1875 CvMat dr3dr2 = cvMat(3, 3, CV_64F, _dr3dr2);
1876 //CvMat dt3dr1 = cvMat(3, 3, CV_64F, _dt3dr1);
1877 CvMat dt3dr2 = cvMat(3, 3, CV_64F, _dt3dr2);
1878 CvMat dt3dt1 = cvMat(3, 3, CV_64F, _dt3dt1);
1879 CvMat dt3dt2 = cvMat(3, 3, CV_64F, _dt3dt2);
1880 CvMat om[2], T[2], imgpt_i[2];
1881 CvMat dpdrot_hdr, dpdt_hdr, dpdf_hdr, dpdc_hdr, dpdk_hdr;
1882 CvMat *dpdrot = &dpdrot_hdr, *dpdt = &dpdt_hdr, *dpdf = 0, *dpdc = 0, *dpdk = 0;
1883
1884 if( !solver.updateAlt( param, JtJ, JtErr, _errNorm ))
1885 break;
1886 reprojErr = 0;
1887
1888 cvRodrigues2( &om_LR, &R_LR );
1889 om[1] = cvMat(3,1,CV_64F,_omR);
1890 T[1] = cvMat(3,1,CV_64F,_tR);
1891
1892 if( recomputeIntrinsics )
1893 {
1894 double* iparam = solver.param->data.db + (nimages+1)*6;
1895 double* ipparam = solver.prevParam->data.db + (nimages+1)*6;
1896 dpdf = &dpdf_hdr;
1897 dpdc = &dpdc_hdr;
1898 dpdk = &dpdk_hdr;
1899 if( flags & CV_CALIB_SAME_FOCAL_LENGTH )
1900 {
1901 iparam[NINTRINSIC] = iparam[0];
1902 iparam[NINTRINSIC+1] = iparam[1];
1903 ipparam[NINTRINSIC] = ipparam[0];
1904 ipparam[NINTRINSIC+1] = ipparam[1];
1905 }
1906 if( flags & CV_CALIB_FIX_ASPECT_RATIO )
1907 {
1908 iparam[0] = iparam[1]*aspectRatio[0];
1909 iparam[NINTRINSIC] = iparam[NINTRINSIC+1]*aspectRatio[1];
1910 ipparam[0] = ipparam[1]*aspectRatio[0];
1911 ipparam[NINTRINSIC] = ipparam[NINTRINSIC+1]*aspectRatio[1];
1912 }
1913 for( k = 0; k < 2; k++ )
1914 {
1915 A[k][0] = iparam[k*NINTRINSIC+0];
1916 A[k][4] = iparam[k*NINTRINSIC+1];
1917 A[k][2] = iparam[k*NINTRINSIC+2];
1918 A[k][5] = iparam[k*NINTRINSIC+3];
1919 dk[k][0] = iparam[k*NINTRINSIC+4];
1920 dk[k][1] = iparam[k*NINTRINSIC+5];
1921 dk[k][2] = iparam[k*NINTRINSIC+6];
1922 dk[k][3] = iparam[k*NINTRINSIC+7];
1923 dk[k][4] = iparam[k*NINTRINSIC+8];
1924 dk[k][5] = iparam[k*NINTRINSIC+9];
1925 dk[k][6] = iparam[k*NINTRINSIC+10];
1926 dk[k][7] = iparam[k*NINTRINSIC+11];
1927 dk[k][8] = iparam[k*NINTRINSIC+12];
1928 dk[k][9] = iparam[k*NINTRINSIC+13];
1929 dk[k][10] = iparam[k*NINTRINSIC+14];
1930 dk[k][11] = iparam[k*NINTRINSIC+15];
1931 }
1932 }
1933
1934 for( i = ofs = 0; i < nimages; ofs += ni, i++ )
1935 {
1936 ni = npoints->data.i[i];
1937 CvMat objpt_i, _part;
1938
1939 om[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6);
1940 T[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6+3);
1941
1942 if( JtJ || JtErr )
1943 cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1], &dr3dr1, 0,
1944 &dr3dr2, 0, 0, &dt3dt1, &dt3dr2, &dt3dt2 );
1945 else
1946 cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1] );
1947
1948 objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
1949 err->rows = Je->rows = J_LR->rows = Ji->rows = ni*2;
1950 cvReshape( err, &tmpimagePoints, 2, 1 );
1951
1952 cvGetCols( Ji, &dpdf_hdr, 0, 2 );
1953 cvGetCols( Ji, &dpdc_hdr, 2, 4 );
1954 cvGetCols( Ji, &dpdk_hdr, 4, NINTRINSIC );
1955 cvGetCols( Je, &dpdrot_hdr, 0, 3 );
1956 cvGetCols( Je, &dpdt_hdr, 3, 6 );
1957
1958 for( k = 0; k < 2; k++ )
1959 {
1960 double l2err;
1961 imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
1962
1963 if( JtJ || JtErr )
1964 cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k],
1965 &tmpimagePoints, dpdrot, dpdt, dpdf, dpdc, dpdk,
1966 (flags & CV_CALIB_FIX_ASPECT_RATIO) ? aspectRatio[k] : 0);
1967 else
1968 cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k], &tmpimagePoints );
1969 cvSub( &tmpimagePoints, &imgpt_i[k], &tmpimagePoints );
1970
1971 l2err = cvNorm( &tmpimagePoints, 0, CV_L2 );
1972
1973 if( JtJ || JtErr )
1974 {
1975 int iofs = (nimages+1)*6 + k*NINTRINSIC, eofs = (i+1)*6;
1976 assert( JtJ && JtErr );
1977
1978 if( k == 1 )
1979 {
1980 // d(err_{x|y}R) ~ de3
1981 // convert de3/{dr3,dt3} => de3{dr1,dt1} & de3{dr2,dt2}
1982 for( p = 0; p < ni*2; p++ )
1983 {
1984 CvMat de3dr3 = cvMat( 1, 3, CV_64F, Je->data.ptr + Je->step*p );
1985 CvMat de3dt3 = cvMat( 1, 3, CV_64F, de3dr3.data.db + 3 );
1986 CvMat de3dr2 = cvMat( 1, 3, CV_64F, J_LR->data.ptr + J_LR->step*p );
1987 CvMat de3dt2 = cvMat( 1, 3, CV_64F, de3dr2.data.db + 3 );
1988 double _de3dr1[3], _de3dt1[3];
1989 CvMat de3dr1 = cvMat( 1, 3, CV_64F, _de3dr1 );
1990 CvMat de3dt1 = cvMat( 1, 3, CV_64F, _de3dt1 );
1991
1992 cvMatMul( &de3dr3, &dr3dr1, &de3dr1 );
1993 cvMatMul( &de3dt3, &dt3dt1, &de3dt1 );
1994
1995 cvMatMul( &de3dr3, &dr3dr2, &de3dr2 );
1996 cvMatMulAdd( &de3dt3, &dt3dr2, &de3dr2, &de3dr2 );
1997
1998 cvMatMul( &de3dt3, &dt3dt2, &de3dt2 );
1999
2000 cvCopy( &de3dr1, &de3dr3 );
2001 cvCopy( &de3dt1, &de3dt3 );
2002 }
2003
2004 cvGetSubRect( JtJ, &_part, cvRect(0, 0, 6, 6) );
2005 cvGEMM( J_LR, J_LR, 1, &_part, 1, &_part, CV_GEMM_A_T );
2006
2007 cvGetSubRect( JtJ, &_part, cvRect(eofs, 0, 6, 6) );
2008 cvGEMM( J_LR, Je, 1, 0, 0, &_part, CV_GEMM_A_T );
2009
2010 cvGetRows( JtErr, &_part, 0, 6 );
2011 cvGEMM( J_LR, err, 1, &_part, 1, &_part, CV_GEMM_A_T );
2012 }
2013
2014 cvGetSubRect( JtJ, &_part, cvRect(eofs, eofs, 6, 6) );
2015 cvGEMM( Je, Je, 1, &_part, 1, &_part, CV_GEMM_A_T );
2016
2017 cvGetRows( JtErr, &_part, eofs, eofs + 6 );
2018 cvGEMM( Je, err, 1, &_part, 1, &_part, CV_GEMM_A_T );
2019
2020 if( recomputeIntrinsics )
2021 {
2022 cvGetSubRect( JtJ, &_part, cvRect(iofs, iofs, NINTRINSIC, NINTRINSIC) );
2023 cvGEMM( Ji, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );
2024 cvGetSubRect( JtJ, &_part, cvRect(iofs, eofs, NINTRINSIC, 6) );
2025 cvGEMM( Je, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );
2026 if( k == 1 )
2027 {
2028 cvGetSubRect( JtJ, &_part, cvRect(iofs, 0, NINTRINSIC, 6) );
2029 cvGEMM( J_LR, Ji, 1, &_part, 1, &_part, CV_GEMM_A_T );
2030 }
2031 cvGetRows( JtErr, &_part, iofs, iofs + NINTRINSIC );
2032 cvGEMM( Ji, err, 1, &_part, 1, &_part, CV_GEMM_A_T );
2033 }
2034 }
2035
2036 reprojErr += l2err*l2err;
2037 }
2038 }
2039 if(_errNorm)
2040 *_errNorm = reprojErr;
2041 }
2042
2043 cvRodrigues2( &om_LR, &R_LR );
2044 if( matR->rows == 1 || matR->cols == 1 )
2045 cvConvert( &om_LR, matR );
2046 else
2047 cvConvert( &R_LR, matR );
2048 cvConvert( &T_LR, matT );
2049
2050 if( recomputeIntrinsics )
2051 {
2052 cvConvert( &K[0], _cameraMatrix1 );
2053 cvConvert( &K[1], _cameraMatrix2 );
2054
2055 for( k = 0; k < 2; k++ )
2056 {
2057 CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
2058 CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
2059 CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
2060 cvConvert( &tdist, distCoeffs );
2061 }
2062 }
2063
2064 if( matE || matF )
2065 {
2066 double* t = T_LR.data.db;
2067 double tx[] =
2068 {
2069 0, -t[2], t[1],
2070 t[2], 0, -t[0],
2071 -t[1], t[0], 0
2072 };
2073 CvMat Tx = cvMat(3, 3, CV_64F, tx);
2074 double e[9], f[9];
2075 CvMat E = cvMat(3, 3, CV_64F, e);
2076 CvMat F = cvMat(3, 3, CV_64F, f);
2077 cvMatMul( &Tx, &R_LR, &E );
2078 if( matE )
2079 cvConvert( &E, matE );
2080 if( matF )
2081 {
2082 double ik[9];
2083 CvMat iK = cvMat(3, 3, CV_64F, ik);
2084 cvInvert(&K[1], &iK);
2085 cvGEMM( &iK, &E, 1, 0, 0, &E, CV_GEMM_A_T );
2086 cvInvert(&K[0], &iK);
2087 cvMatMul(&E, &iK, &F);
2088 cvConvertScale( &F, matF, fabs(f[8]) > 0 ? 1./f[8] : 1 );
2089 }
2090 }
2091
2092 return std::sqrt(reprojErr/(pointsTotal*2));
2093 }
2094
2095
2096 static void
icvGetRectangles(const CvMat * cameraMatrix,const CvMat * distCoeffs,const CvMat * R,const CvMat * newCameraMatrix,CvSize imgSize,cv::Rect_<float> & inner,cv::Rect_<float> & outer)2097 icvGetRectangles( const CvMat* cameraMatrix, const CvMat* distCoeffs,
2098 const CvMat* R, const CvMat* newCameraMatrix, CvSize imgSize,
2099 cv::Rect_<float>& inner, cv::Rect_<float>& outer )
2100 {
2101 const int N = 9;
2102 int x, y, k;
2103 cv::Ptr<CvMat> _pts(cvCreateMat(1, N*N, CV_32FC2));
2104 CvPoint2D32f* pts = (CvPoint2D32f*)(_pts->data.ptr);
2105
2106 for( y = k = 0; y < N; y++ )
2107 for( x = 0; x < N; x++ )
2108 pts[k++] = cvPoint2D32f((float)x*imgSize.width/(N-1),
2109 (float)y*imgSize.height/(N-1));
2110
2111 cvUndistortPoints(_pts, _pts, cameraMatrix, distCoeffs, R, newCameraMatrix);
2112
2113 float iX0=-FLT_MAX, iX1=FLT_MAX, iY0=-FLT_MAX, iY1=FLT_MAX;
2114 float oX0=FLT_MAX, oX1=-FLT_MAX, oY0=FLT_MAX, oY1=-FLT_MAX;
2115 // find the inscribed rectangle.
2116 // the code will likely not work with extreme rotation matrices (R) (>45%)
2117 for( y = k = 0; y < N; y++ )
2118 for( x = 0; x < N; x++ )
2119 {
2120 CvPoint2D32f p = pts[k++];
2121 oX0 = MIN(oX0, p.x);
2122 oX1 = MAX(oX1, p.x);
2123 oY0 = MIN(oY0, p.y);
2124 oY1 = MAX(oY1, p.y);
2125
2126 if( x == 0 )
2127 iX0 = MAX(iX0, p.x);
2128 if( x == N-1 )
2129 iX1 = MIN(iX1, p.x);
2130 if( y == 0 )
2131 iY0 = MAX(iY0, p.y);
2132 if( y == N-1 )
2133 iY1 = MIN(iY1, p.y);
2134 }
2135 inner = cv::Rect_<float>(iX0, iY0, iX1-iX0, iY1-iY0);
2136 outer = cv::Rect_<float>(oX0, oY0, oX1-oX0, oY1-oY0);
2137 }
2138
2139
cvStereoRectify(const CvMat * _cameraMatrix1,const CvMat * _cameraMatrix2,const CvMat * _distCoeffs1,const CvMat * _distCoeffs2,CvSize imageSize,const CvMat * matR,const CvMat * matT,CvMat * _R1,CvMat * _R2,CvMat * _P1,CvMat * _P2,CvMat * matQ,int flags,double alpha,CvSize newImgSize,CvRect * roi1,CvRect * roi2)2140 void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
2141 const CvMat* _distCoeffs1, const CvMat* _distCoeffs2,
2142 CvSize imageSize, const CvMat* matR, const CvMat* matT,
2143 CvMat* _R1, CvMat* _R2, CvMat* _P1, CvMat* _P2,
2144 CvMat* matQ, int flags, double alpha, CvSize newImgSize,
2145 CvRect* roi1, CvRect* roi2 )
2146 {
2147 double _om[3], _t[3], _uu[3]={0,0,0}, _r_r[3][3], _pp[3][4];
2148 double _ww[3], _wr[3][3], _z[3] = {0,0,0}, _ri[3][3];
2149 cv::Rect_<float> inner1, inner2, outer1, outer2;
2150
2151 CvMat om = cvMat(3, 1, CV_64F, _om);
2152 CvMat t = cvMat(3, 1, CV_64F, _t);
2153 CvMat uu = cvMat(3, 1, CV_64F, _uu);
2154 CvMat r_r = cvMat(3, 3, CV_64F, _r_r);
2155 CvMat pp = cvMat(3, 4, CV_64F, _pp);
2156 CvMat ww = cvMat(3, 1, CV_64F, _ww); // temps
2157 CvMat wR = cvMat(3, 3, CV_64F, _wr);
2158 CvMat Z = cvMat(3, 1, CV_64F, _z);
2159 CvMat Ri = cvMat(3, 3, CV_64F, _ri);
2160 double nx = imageSize.width, ny = imageSize.height;
2161 int i, k;
2162
2163 if( matR->rows == 3 && matR->cols == 3 )
2164 cvRodrigues2(matR, &om); // get vector rotation
2165 else
2166 cvConvert(matR, &om); // it's already a rotation vector
2167 cvConvertScale(&om, &om, -0.5); // get average rotation
2168 cvRodrigues2(&om, &r_r); // rotate cameras to same orientation by averaging
2169 cvMatMul(&r_r, matT, &t);
2170
2171 int idx = fabs(_t[0]) > fabs(_t[1]) ? 0 : 1;
2172 double c = _t[idx], nt = cvNorm(&t, 0, CV_L2);
2173 _uu[idx] = c > 0 ? 1 : -1;
2174
2175 // calculate global Z rotation
2176 cvCrossProduct(&t,&uu,&ww);
2177 double nw = cvNorm(&ww, 0, CV_L2);
2178 if (nw > 0.0)
2179 cvConvertScale(&ww, &ww, acos(fabs(c)/nt)/nw);
2180 cvRodrigues2(&ww, &wR);
2181
2182 // apply to both views
2183 cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, CV_GEMM_B_T);
2184 cvConvert( &Ri, _R1 );
2185 cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, 0);
2186 cvConvert( &Ri, _R2 );
2187 cvMatMul(&Ri, matT, &t);
2188
2189 // calculate projection/camera matrices
2190 // these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
2191 double fc_new = DBL_MAX;
2192 CvPoint2D64f cc_new[2] = {{0,0}, {0,0}};
2193
2194 for( k = 0; k < 2; k++ ) {
2195 const CvMat* A = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
2196 const CvMat* Dk = k == 0 ? _distCoeffs1 : _distCoeffs2;
2197 double dk1 = Dk ? cvmGet(Dk, 0, 0) : 0;
2198 double fc = cvmGet(A,idx^1,idx^1);
2199 if( dk1 < 0 ) {
2200 fc *= 1 + dk1*(nx*nx + ny*ny)/(4*fc*fc);
2201 }
2202 fc_new = MIN(fc_new, fc);
2203 }
2204
2205 for( k = 0; k < 2; k++ )
2206 {
2207 const CvMat* A = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
2208 const CvMat* Dk = k == 0 ? _distCoeffs1 : _distCoeffs2;
2209 CvPoint2D32f _pts[4];
2210 CvPoint3D32f _pts_3[4];
2211 CvMat pts = cvMat(1, 4, CV_32FC2, _pts);
2212 CvMat pts_3 = cvMat(1, 4, CV_32FC3, _pts_3);
2213
2214 for( i = 0; i < 4; i++ )
2215 {
2216 int j = (i<2) ? 0 : 1;
2217 _pts[i].x = (float)((i % 2)*(nx-1));
2218 _pts[i].y = (float)(j*(ny-1));
2219 }
2220 cvUndistortPoints( &pts, &pts, A, Dk, 0, 0 );
2221 cvConvertPointsHomogeneous( &pts, &pts_3 );
2222
2223 //Change camera matrix to have cc=[0,0] and fc = fc_new
2224 double _a_tmp[3][3];
2225 CvMat A_tmp = cvMat(3, 3, CV_64F, _a_tmp);
2226 _a_tmp[0][0]=fc_new;
2227 _a_tmp[1][1]=fc_new;
2228 _a_tmp[0][2]=0.0;
2229 _a_tmp[1][2]=0.0;
2230 cvProjectPoints2( &pts_3, k == 0 ? _R1 : _R2, &Z, &A_tmp, 0, &pts );
2231 CvScalar avg = cvAvg(&pts);
2232 cc_new[k].x = (nx-1)/2 - avg.val[0];
2233 cc_new[k].y = (ny-1)/2 - avg.val[1];
2234 }
2235
2236 // vertical focal length must be the same for both images to keep the epipolar constraint
2237 // (for horizontal epipolar lines -- TBD: check for vertical epipolar lines)
2238 // use fy for fx also, for simplicity
2239
2240 // For simplicity, set the principal points for both cameras to be the average
2241 // of the two principal points (either one of or both x- and y- coordinates)
2242 if( flags & CV_CALIB_ZERO_DISPARITY )
2243 {
2244 cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
2245 cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
2246 }
2247 else if( idx == 0 ) // horizontal stereo
2248 cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
2249 else // vertical stereo
2250 cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
2251
2252 cvZero( &pp );
2253 _pp[0][0] = _pp[1][1] = fc_new;
2254 _pp[0][2] = cc_new[0].x;
2255 _pp[1][2] = cc_new[0].y;
2256 _pp[2][2] = 1;
2257 cvConvert(&pp, _P1);
2258
2259 _pp[0][2] = cc_new[1].x;
2260 _pp[1][2] = cc_new[1].y;
2261 _pp[idx][3] = _t[idx]*fc_new; // baseline * focal length
2262 cvConvert(&pp, _P2);
2263
2264 alpha = MIN(alpha, 1.);
2265
2266 icvGetRectangles( _cameraMatrix1, _distCoeffs1, _R1, _P1, imageSize, inner1, outer1 );
2267 icvGetRectangles( _cameraMatrix2, _distCoeffs2, _R2, _P2, imageSize, inner2, outer2 );
2268
2269 {
2270 newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imageSize;
2271 double cx1_0 = cc_new[0].x;
2272 double cy1_0 = cc_new[0].y;
2273 double cx2_0 = cc_new[1].x;
2274 double cy2_0 = cc_new[1].y;
2275 double cx1 = newImgSize.width*cx1_0/imageSize.width;
2276 double cy1 = newImgSize.height*cy1_0/imageSize.height;
2277 double cx2 = newImgSize.width*cx2_0/imageSize.width;
2278 double cy2 = newImgSize.height*cy2_0/imageSize.height;
2279 double s = 1.;
2280
2281 if( alpha >= 0 )
2282 {
2283 double s0 = std::max(std::max(std::max((double)cx1/(cx1_0 - inner1.x), (double)cy1/(cy1_0 - inner1.y)),
2284 (double)(newImgSize.width - cx1)/(inner1.x + inner1.width - cx1_0)),
2285 (double)(newImgSize.height - cy1)/(inner1.y + inner1.height - cy1_0));
2286 s0 = std::max(std::max(std::max(std::max((double)cx2/(cx2_0 - inner2.x), (double)cy2/(cy2_0 - inner2.y)),
2287 (double)(newImgSize.width - cx2)/(inner2.x + inner2.width - cx2_0)),
2288 (double)(newImgSize.height - cy2)/(inner2.y + inner2.height - cy2_0)),
2289 s0);
2290
2291 double s1 = std::min(std::min(std::min((double)cx1/(cx1_0 - outer1.x), (double)cy1/(cy1_0 - outer1.y)),
2292 (double)(newImgSize.width - cx1)/(outer1.x + outer1.width - cx1_0)),
2293 (double)(newImgSize.height - cy1)/(outer1.y + outer1.height - cy1_0));
2294 s1 = std::min(std::min(std::min(std::min((double)cx2/(cx2_0 - outer2.x), (double)cy2/(cy2_0 - outer2.y)),
2295 (double)(newImgSize.width - cx2)/(outer2.x + outer2.width - cx2_0)),
2296 (double)(newImgSize.height - cy2)/(outer2.y + outer2.height - cy2_0)),
2297 s1);
2298
2299 s = s0*(1 - alpha) + s1*alpha;
2300 }
2301
2302 fc_new *= s;
2303 cc_new[0] = cvPoint2D64f(cx1, cy1);
2304 cc_new[1] = cvPoint2D64f(cx2, cy2);
2305
2306 cvmSet(_P1, 0, 0, fc_new);
2307 cvmSet(_P1, 1, 1, fc_new);
2308 cvmSet(_P1, 0, 2, cx1);
2309 cvmSet(_P1, 1, 2, cy1);
2310
2311 cvmSet(_P2, 0, 0, fc_new);
2312 cvmSet(_P2, 1, 1, fc_new);
2313 cvmSet(_P2, 0, 2, cx2);
2314 cvmSet(_P2, 1, 2, cy2);
2315 cvmSet(_P2, idx, 3, s*cvmGet(_P2, idx, 3));
2316
2317 if(roi1)
2318 {
2319 *roi1 = cv::Rect(cvCeil((inner1.x - cx1_0)*s + cx1),
2320 cvCeil((inner1.y - cy1_0)*s + cy1),
2321 cvFloor(inner1.width*s), cvFloor(inner1.height*s))
2322 & cv::Rect(0, 0, newImgSize.width, newImgSize.height);
2323 }
2324
2325 if(roi2)
2326 {
2327 *roi2 = cv::Rect(cvCeil((inner2.x - cx2_0)*s + cx2),
2328 cvCeil((inner2.y - cy2_0)*s + cy2),
2329 cvFloor(inner2.width*s), cvFloor(inner2.height*s))
2330 & cv::Rect(0, 0, newImgSize.width, newImgSize.height);
2331 }
2332 }
2333
2334 if( matQ )
2335 {
2336 double q[] =
2337 {
2338 1, 0, 0, -cc_new[0].x,
2339 0, 1, 0, -cc_new[0].y,
2340 0, 0, 0, fc_new,
2341 0, 0, -1./_t[idx],
2342 (idx == 0 ? cc_new[0].x - cc_new[1].x : cc_new[0].y - cc_new[1].y)/_t[idx]
2343 };
2344 CvMat Q = cvMat(4, 4, CV_64F, q);
2345 cvConvert( &Q, matQ );
2346 }
2347 }
2348
2349
cvGetOptimalNewCameraMatrix(const CvMat * cameraMatrix,const CvMat * distCoeffs,CvSize imgSize,double alpha,CvMat * newCameraMatrix,CvSize newImgSize,CvRect * validPixROI,int centerPrincipalPoint)2350 void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCoeffs,
2351 CvSize imgSize, double alpha,
2352 CvMat* newCameraMatrix, CvSize newImgSize,
2353 CvRect* validPixROI, int centerPrincipalPoint )
2354 {
2355 cv::Rect_<float> inner, outer;
2356 newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imgSize;
2357
2358 double M[3][3];
2359 CvMat matM = cvMat(3, 3, CV_64F, M);
2360 cvConvert(cameraMatrix, &matM);
2361
2362 if( centerPrincipalPoint )
2363 {
2364 double cx0 = M[0][2];
2365 double cy0 = M[1][2];
2366 double cx = (newImgSize.width-1)*0.5;
2367 double cy = (newImgSize.height-1)*0.5;
2368
2369 icvGetRectangles( cameraMatrix, distCoeffs, 0, cameraMatrix, imgSize, inner, outer );
2370 double s0 = std::max(std::max(std::max((double)cx/(cx0 - inner.x), (double)cy/(cy0 - inner.y)),
2371 (double)cx/(inner.x + inner.width - cx0)),
2372 (double)cy/(inner.y + inner.height - cy0));
2373 double s1 = std::min(std::min(std::min((double)cx/(cx0 - outer.x), (double)cy/(cy0 - outer.y)),
2374 (double)cx/(outer.x + outer.width - cx0)),
2375 (double)cy/(outer.y + outer.height - cy0));
2376 double s = s0*(1 - alpha) + s1*alpha;
2377
2378 M[0][0] *= s;
2379 M[1][1] *= s;
2380 M[0][2] = cx;
2381 M[1][2] = cy;
2382
2383 if( validPixROI )
2384 {
2385 inner = cv::Rect_<float>((float)((inner.x - cx0)*s + cx),
2386 (float)((inner.y - cy0)*s + cy),
2387 (float)(inner.width*s),
2388 (float)(inner.height*s));
2389 cv::Rect r(cvCeil(inner.x), cvCeil(inner.y), cvFloor(inner.width), cvFloor(inner.height));
2390 r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
2391 *validPixROI = r;
2392 }
2393 }
2394 else
2395 {
2396 // Get inscribed and circumscribed rectangles in normalized
2397 // (independent of camera matrix) coordinates
2398 icvGetRectangles( cameraMatrix, distCoeffs, 0, 0, imgSize, inner, outer );
2399
2400 // Projection mapping inner rectangle to viewport
2401 double fx0 = (newImgSize.width - 1) / inner.width;
2402 double fy0 = (newImgSize.height - 1) / inner.height;
2403 double cx0 = -fx0 * inner.x;
2404 double cy0 = -fy0 * inner.y;
2405
2406 // Projection mapping outer rectangle to viewport
2407 double fx1 = (newImgSize.width - 1) / outer.width;
2408 double fy1 = (newImgSize.height - 1) / outer.height;
2409 double cx1 = -fx1 * outer.x;
2410 double cy1 = -fy1 * outer.y;
2411
2412 // Interpolate between the two optimal projections
2413 M[0][0] = fx0*(1 - alpha) + fx1*alpha;
2414 M[1][1] = fy0*(1 - alpha) + fy1*alpha;
2415 M[0][2] = cx0*(1 - alpha) + cx1*alpha;
2416 M[1][2] = cy0*(1 - alpha) + cy1*alpha;
2417
2418 if( validPixROI )
2419 {
2420 icvGetRectangles( cameraMatrix, distCoeffs, 0, &matM, imgSize, inner, outer );
2421 cv::Rect r = inner;
2422 r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
2423 *validPixROI = r;
2424 }
2425 }
2426
2427 cvConvert(&matM, newCameraMatrix);
2428 }
2429
2430
cvStereoRectifyUncalibrated(const CvMat * _points1,const CvMat * _points2,const CvMat * F0,CvSize imgSize,CvMat * _H1,CvMat * _H2,double threshold)2431 CV_IMPL int cvStereoRectifyUncalibrated(
2432 const CvMat* _points1, const CvMat* _points2,
2433 const CvMat* F0, CvSize imgSize,
2434 CvMat* _H1, CvMat* _H2, double threshold )
2435 {
2436 Ptr<CvMat> _m1, _m2, _lines1, _lines2;
2437
2438 int i, j, npoints;
2439 double cx, cy;
2440 double u[9], v[9], w[9], f[9], h1[9], h2[9], h0[9], e2[3];
2441 CvMat E2 = cvMat( 3, 1, CV_64F, e2 );
2442 CvMat U = cvMat( 3, 3, CV_64F, u );
2443 CvMat V = cvMat( 3, 3, CV_64F, v );
2444 CvMat W = cvMat( 3, 3, CV_64F, w );
2445 CvMat F = cvMat( 3, 3, CV_64F, f );
2446 CvMat H1 = cvMat( 3, 3, CV_64F, h1 );
2447 CvMat H2 = cvMat( 3, 3, CV_64F, h2 );
2448 CvMat H0 = cvMat( 3, 3, CV_64F, h0 );
2449
2450 CvPoint2D64f* m1;
2451 CvPoint2D64f* m2;
2452 CvPoint3D64f* lines1;
2453 CvPoint3D64f* lines2;
2454
2455 CV_Assert( CV_IS_MAT(_points1) && CV_IS_MAT(_points2) &&
2456 (_points1->rows == 1 || _points1->cols == 1) &&
2457 (_points2->rows == 1 || _points2->cols == 1) &&
2458 CV_ARE_SIZES_EQ(_points1, _points2) );
2459
2460 npoints = _points1->rows * _points1->cols * CV_MAT_CN(_points1->type) / 2;
2461
2462 _m1.reset(cvCreateMat( _points1->rows, _points1->cols, CV_64FC(CV_MAT_CN(_points1->type)) ));
2463 _m2.reset(cvCreateMat( _points2->rows, _points2->cols, CV_64FC(CV_MAT_CN(_points2->type)) ));
2464 _lines1.reset(cvCreateMat( 1, npoints, CV_64FC3 ));
2465 _lines2.reset(cvCreateMat( 1, npoints, CV_64FC3 ));
2466
2467 cvConvert( F0, &F );
2468
2469 cvSVD( (CvMat*)&F, &W, &U, &V, CV_SVD_U_T + CV_SVD_V_T );
2470 W.data.db[8] = 0.;
2471 cvGEMM( &U, &W, 1, 0, 0, &W, CV_GEMM_A_T );
2472 cvMatMul( &W, &V, &F );
2473
2474 cx = cvRound( (imgSize.width-1)*0.5 );
2475 cy = cvRound( (imgSize.height-1)*0.5 );
2476
2477 cvZero( _H1 );
2478 cvZero( _H2 );
2479
2480 cvConvert( _points1, _m1 );
2481 cvConvert( _points2, _m2 );
2482 cvReshape( _m1, _m1, 2, 1 );
2483 cvReshape( _m2, _m2, 2, 1 );
2484
2485 m1 = (CvPoint2D64f*)_m1->data.ptr;
2486 m2 = (CvPoint2D64f*)_m2->data.ptr;
2487 lines1 = (CvPoint3D64f*)_lines1->data.ptr;
2488 lines2 = (CvPoint3D64f*)_lines2->data.ptr;
2489
2490 if( threshold > 0 )
2491 {
2492 cvComputeCorrespondEpilines( _m1, 1, &F, _lines1 );
2493 cvComputeCorrespondEpilines( _m2, 2, &F, _lines2 );
2494
2495 // measure distance from points to the corresponding epilines, mark outliers
2496 for( i = j = 0; i < npoints; i++ )
2497 {
2498 if( fabs(m1[i].x*lines2[i].x +
2499 m1[i].y*lines2[i].y +
2500 lines2[i].z) <= threshold &&
2501 fabs(m2[i].x*lines1[i].x +
2502 m2[i].y*lines1[i].y +
2503 lines1[i].z) <= threshold )
2504 {
2505 if( j < i )
2506 {
2507 m1[j] = m1[i];
2508 m2[j] = m2[i];
2509 }
2510 j++;
2511 }
2512 }
2513
2514 npoints = j;
2515 if( npoints == 0 )
2516 return 0;
2517 }
2518
2519 _m1->cols = _m2->cols = npoints;
2520 memcpy( E2.data.db, U.data.db + 6, sizeof(e2));
2521 cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );
2522
2523 double t[] =
2524 {
2525 1, 0, -cx,
2526 0, 1, -cy,
2527 0, 0, 1
2528 };
2529 CvMat T = cvMat(3, 3, CV_64F, t);
2530 cvMatMul( &T, &E2, &E2 );
2531
2532 int mirror = e2[0] < 0;
2533 double d = MAX(std::sqrt(e2[0]*e2[0] + e2[1]*e2[1]),DBL_EPSILON);
2534 double alpha = e2[0]/d;
2535 double beta = e2[1]/d;
2536 double r[] =
2537 {
2538 alpha, beta, 0,
2539 -beta, alpha, 0,
2540 0, 0, 1
2541 };
2542 CvMat R = cvMat(3, 3, CV_64F, r);
2543 cvMatMul( &R, &T, &T );
2544 cvMatMul( &R, &E2, &E2 );
2545 double invf = fabs(e2[2]) < 1e-6*fabs(e2[0]) ? 0 : -e2[2]/e2[0];
2546 double k[] =
2547 {
2548 1, 0, 0,
2549 0, 1, 0,
2550 invf, 0, 1
2551 };
2552 CvMat K = cvMat(3, 3, CV_64F, k);
2553 cvMatMul( &K, &T, &H2 );
2554 cvMatMul( &K, &E2, &E2 );
2555
2556 double it[] =
2557 {
2558 1, 0, cx,
2559 0, 1, cy,
2560 0, 0, 1
2561 };
2562 CvMat iT = cvMat( 3, 3, CV_64F, it );
2563 cvMatMul( &iT, &H2, &H2 );
2564
2565 memcpy( E2.data.db, U.data.db + 6, sizeof(e2));
2566 cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );
2567
2568 double e2_x[] =
2569 {
2570 0, -e2[2], e2[1],
2571 e2[2], 0, -e2[0],
2572 -e2[1], e2[0], 0
2573 };
2574 double e2_111[] =
2575 {
2576 e2[0], e2[0], e2[0],
2577 e2[1], e2[1], e2[1],
2578 e2[2], e2[2], e2[2],
2579 };
2580 CvMat E2_x = cvMat(3, 3, CV_64F, e2_x);
2581 CvMat E2_111 = cvMat(3, 3, CV_64F, e2_111);
2582 cvMatMulAdd(&E2_x, &F, &E2_111, &H0 );
2583 cvMatMul(&H2, &H0, &H0);
2584 CvMat E1=cvMat(3, 1, CV_64F, V.data.db+6);
2585 cvMatMul(&H0, &E1, &E1);
2586
2587 cvPerspectiveTransform( _m1, _m1, &H0 );
2588 cvPerspectiveTransform( _m2, _m2, &H2 );
2589 CvMat A = cvMat( 1, npoints, CV_64FC3, lines1 ), BxBy, B;
2590 double x[3];
2591 CvMat X = cvMat( 3, 1, CV_64F, x );
2592 cvConvertPointsHomogeneous( _m1, &A );
2593 cvReshape( &A, &A, 1, npoints );
2594 cvReshape( _m2, &BxBy, 1, npoints );
2595 cvGetCol( &BxBy, &B, 0 );
2596 cvSolve( &A, &B, &X, CV_SVD );
2597
2598 double ha[] =
2599 {
2600 x[0], x[1], x[2],
2601 0, 1, 0,
2602 0, 0, 1
2603 };
2604 CvMat Ha = cvMat(3, 3, CV_64F, ha);
2605 cvMatMul( &Ha, &H0, &H1 );
2606 cvPerspectiveTransform( _m1, _m1, &Ha );
2607
2608 if( mirror )
2609 {
2610 double mm[] = { -1, 0, cx*2, 0, -1, cy*2, 0, 0, 1 };
2611 CvMat MM = cvMat(3, 3, CV_64F, mm);
2612 cvMatMul( &MM, &H1, &H1 );
2613 cvMatMul( &MM, &H2, &H2 );
2614 }
2615
2616 cvConvert( &H1, _H1 );
2617 cvConvert( &H2, _H2 );
2618
2619 return 1;
2620 }
2621
2622
reprojectImageTo3D(InputArray _disparity,OutputArray __3dImage,InputArray _Qmat,bool handleMissingValues,int dtype)2623 void cv::reprojectImageTo3D( InputArray _disparity,
2624 OutputArray __3dImage, InputArray _Qmat,
2625 bool handleMissingValues, int dtype )
2626 {
2627 Mat disparity = _disparity.getMat(), Q = _Qmat.getMat();
2628 int stype = disparity.type();
2629
2630 CV_Assert( stype == CV_8UC1 || stype == CV_16SC1 ||
2631 stype == CV_32SC1 || stype == CV_32FC1 );
2632 CV_Assert( Q.size() == Size(4,4) );
2633
2634 if( dtype < 0 )
2635 dtype = CV_32FC3;
2636 else
2637 {
2638 dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), 3);
2639 CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
2640 }
2641
2642 __3dImage.create(disparity.size(), CV_MAKETYPE(dtype, 3));
2643 Mat _3dImage = __3dImage.getMat();
2644
2645 const double bigZ = 10000.;
2646 double q[4][4];
2647 Mat _Q(4, 4, CV_64F, q);
2648 Q.convertTo(_Q, CV_64F);
2649
2650 int x, cols = disparity.cols;
2651 CV_Assert( cols >= 0 );
2652
2653 std::vector<float> _sbuf(cols+1), _dbuf(cols*3+1);
2654 float* sbuf = &_sbuf[0], *dbuf = &_dbuf[0];
2655 double minDisparity = FLT_MAX;
2656
2657 // NOTE: here we quietly assume that at least one pixel in the disparity map is not defined.
2658 // and we set the corresponding Z's to some fixed big value.
2659 if( handleMissingValues )
2660 cv::minMaxIdx( disparity, &minDisparity, 0, 0, 0 );
2661
2662 for( int y = 0; y < disparity.rows; y++ )
2663 {
2664 float *sptr = sbuf, *dptr = dbuf;
2665 double qx = q[0][1]*y + q[0][3], qy = q[1][1]*y + q[1][3];
2666 double qz = q[2][1]*y + q[2][3], qw = q[3][1]*y + q[3][3];
2667
2668 if( stype == CV_8UC1 )
2669 {
2670 const uchar* sptr0 = disparity.ptr<uchar>(y);
2671 for( x = 0; x < cols; x++ )
2672 sptr[x] = (float)sptr0[x];
2673 }
2674 else if( stype == CV_16SC1 )
2675 {
2676 const short* sptr0 = disparity.ptr<short>(y);
2677 for( x = 0; x < cols; x++ )
2678 sptr[x] = (float)sptr0[x];
2679 }
2680 else if( stype == CV_32SC1 )
2681 {
2682 const int* sptr0 = disparity.ptr<int>(y);
2683 for( x = 0; x < cols; x++ )
2684 sptr[x] = (float)sptr0[x];
2685 }
2686 else
2687 sptr = (float*)disparity.ptr<float>(y);
2688
2689 if( dtype == CV_32FC3 )
2690 dptr = _3dImage.ptr<float>(y);
2691
2692 for( x = 0; x < cols; x++, qx += q[0][0], qy += q[1][0], qz += q[2][0], qw += q[3][0] )
2693 {
2694 double d = sptr[x];
2695 double iW = 1./(qw + q[3][2]*d);
2696 double X = (qx + q[0][2]*d)*iW;
2697 double Y = (qy + q[1][2]*d)*iW;
2698 double Z = (qz + q[2][2]*d)*iW;
2699 if( fabs(d-minDisparity) <= FLT_EPSILON )
2700 Z = bigZ;
2701
2702 dptr[x*3] = (float)X;
2703 dptr[x*3+1] = (float)Y;
2704 dptr[x*3+2] = (float)Z;
2705 }
2706
2707 if( dtype == CV_16SC3 )
2708 {
2709 short* dptr0 = _3dImage.ptr<short>(y);
2710 for( x = 0; x < cols*3; x++ )
2711 {
2712 int ival = cvRound(dptr[x]);
2713 dptr0[x] = cv::saturate_cast<short>(ival);
2714 }
2715 }
2716 else if( dtype == CV_32SC3 )
2717 {
2718 int* dptr0 = _3dImage.ptr<int>(y);
2719 for( x = 0; x < cols*3; x++ )
2720 {
2721 int ival = cvRound(dptr[x]);
2722 dptr0[x] = ival;
2723 }
2724 }
2725 }
2726 }
2727
2728
cvReprojectImageTo3D(const CvArr * disparityImage,CvArr * _3dImage,const CvMat * matQ,int handleMissingValues)2729 void cvReprojectImageTo3D( const CvArr* disparityImage,
2730 CvArr* _3dImage, const CvMat* matQ,
2731 int handleMissingValues )
2732 {
2733 cv::Mat disp = cv::cvarrToMat(disparityImage);
2734 cv::Mat _3dimg = cv::cvarrToMat(_3dImage);
2735 cv::Mat mq = cv::cvarrToMat(matQ);
2736 CV_Assert( disp.size() == _3dimg.size() );
2737 int dtype = _3dimg.type();
2738 CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
2739
2740 cv::reprojectImageTo3D(disp, _3dimg, mq, handleMissingValues != 0, dtype );
2741 }
2742
2743
2744 CV_IMPL void
cvRQDecomp3x3(const CvMat * matrixM,CvMat * matrixR,CvMat * matrixQ,CvMat * matrixQx,CvMat * matrixQy,CvMat * matrixQz,CvPoint3D64f * eulerAngles)2745 cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
2746 CvMat *matrixQx, CvMat *matrixQy, CvMat *matrixQz,
2747 CvPoint3D64f *eulerAngles)
2748 {
2749 double matM[3][3], matR[3][3], matQ[3][3];
2750 CvMat M = cvMat(3, 3, CV_64F, matM);
2751 CvMat R = cvMat(3, 3, CV_64F, matR);
2752 CvMat Q = cvMat(3, 3, CV_64F, matQ);
2753 double z, c, s;
2754
2755 /* Validate parameters. */
2756 CV_Assert( CV_IS_MAT(matrixM) && CV_IS_MAT(matrixR) && CV_IS_MAT(matrixQ) &&
2757 matrixM->cols == 3 && matrixM->rows == 3 &&
2758 CV_ARE_SIZES_EQ(matrixM, matrixR) && CV_ARE_SIZES_EQ(matrixM, matrixQ));
2759
2760 cvConvert(matrixM, &M);
2761
2762 /* Find Givens rotation Q_x for x axis (left multiplication). */
2763 /*
2764 ( 1 0 0 )
2765 Qx = ( 0 c s ), c = m33/sqrt(m32^2 + m33^2), s = m32/sqrt(m32^2 + m33^2)
2766 ( 0 -s c )
2767 */
2768 s = matM[2][1];
2769 c = matM[2][2];
2770 z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
2771 c *= z;
2772 s *= z;
2773
2774 double _Qx[3][3] = { {1, 0, 0}, {0, c, s}, {0, -s, c} };
2775 CvMat Qx = cvMat(3, 3, CV_64F, _Qx);
2776
2777 cvMatMul(&M, &Qx, &R);
2778 assert(fabs(matR[2][1]) < FLT_EPSILON);
2779 matR[2][1] = 0;
2780
2781 /* Find Givens rotation for y axis. */
2782 /*
2783 ( c 0 -s )
2784 Qy = ( 0 1 0 ), c = m33/sqrt(m31^2 + m33^2), s = -m31/sqrt(m31^2 + m33^2)
2785 ( s 0 c )
2786 */
2787 s = -matR[2][0];
2788 c = matR[2][2];
2789 z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
2790 c *= z;
2791 s *= z;
2792
2793 double _Qy[3][3] = { {c, 0, -s}, {0, 1, 0}, {s, 0, c} };
2794 CvMat Qy = cvMat(3, 3, CV_64F, _Qy);
2795 cvMatMul(&R, &Qy, &M);
2796
2797 assert(fabs(matM[2][0]) < FLT_EPSILON);
2798 matM[2][0] = 0;
2799
2800 /* Find Givens rotation for z axis. */
2801 /*
2802 ( c s 0 )
2803 Qz = (-s c 0 ), c = m22/sqrt(m21^2 + m22^2), s = m21/sqrt(m21^2 + m22^2)
2804 ( 0 0 1 )
2805 */
2806
2807 s = matM[1][0];
2808 c = matM[1][1];
2809 z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
2810 c *= z;
2811 s *= z;
2812
2813 double _Qz[3][3] = { {c, s, 0}, {-s, c, 0}, {0, 0, 1} };
2814 CvMat Qz = cvMat(3, 3, CV_64F, _Qz);
2815
2816 cvMatMul(&M, &Qz, &R);
2817 assert(fabs(matR[1][0]) < FLT_EPSILON);
2818 matR[1][0] = 0;
2819
2820 // Solve the decomposition ambiguity.
2821 // Diagonal entries of R, except the last one, shall be positive.
2822 // Further rotate R by 180 degree if necessary
2823 if( matR[0][0] < 0 )
2824 {
2825 if( matR[1][1] < 0 )
2826 {
2827 // rotate around z for 180 degree, i.e. a rotation matrix of
2828 // [-1, 0, 0],
2829 // [ 0, -1, 0],
2830 // [ 0, 0, 1]
2831 matR[0][0] *= -1;
2832 matR[0][1] *= -1;
2833 matR[1][1] *= -1;
2834
2835 _Qz[0][0] *= -1;
2836 _Qz[0][1] *= -1;
2837 _Qz[1][0] *= -1;
2838 _Qz[1][1] *= -1;
2839 }
2840 else
2841 {
2842 // rotate around y for 180 degree, i.e. a rotation matrix of
2843 // [-1, 0, 0],
2844 // [ 0, 1, 0],
2845 // [ 0, 0, -1]
2846 matR[0][0] *= -1;
2847 matR[0][2] *= -1;
2848 matR[1][2] *= -1;
2849 matR[2][2] *= -1;
2850
2851 cvTranspose( &Qz, &Qz );
2852
2853 _Qy[0][0] *= -1;
2854 _Qy[0][2] *= -1;
2855 _Qy[2][0] *= -1;
2856 _Qy[2][2] *= -1;
2857 }
2858 }
2859 else if( matR[1][1] < 0 )
2860 {
2861 // ??? for some reason, we never get here ???
2862
2863 // rotate around x for 180 degree, i.e. a rotation matrix of
2864 // [ 1, 0, 0],
2865 // [ 0, -1, 0],
2866 // [ 0, 0, -1]
2867 matR[0][1] *= -1;
2868 matR[0][2] *= -1;
2869 matR[1][1] *= -1;
2870 matR[1][2] *= -1;
2871 matR[2][2] *= -1;
2872
2873 cvTranspose( &Qz, &Qz );
2874 cvTranspose( &Qy, &Qy );
2875
2876 _Qx[1][1] *= -1;
2877 _Qx[1][2] *= -1;
2878 _Qx[2][1] *= -1;
2879 _Qx[2][2] *= -1;
2880 }
2881
2882 // calculate the euler angle
2883 if( eulerAngles )
2884 {
2885 eulerAngles->x = acos(_Qx[1][1]) * (_Qx[1][2] >= 0 ? 1 : -1) * (180.0 / CV_PI);
2886 eulerAngles->y = acos(_Qy[0][0]) * (_Qy[2][0] >= 0 ? 1 : -1) * (180.0 / CV_PI);
2887 eulerAngles->z = acos(_Qz[0][0]) * (_Qz[0][1] >= 0 ? 1 : -1) * (180.0 / CV_PI);
2888 }
2889
2890 /* Calulate orthogonal matrix. */
2891 /*
2892 Q = QzT * QyT * QxT
2893 */
2894 cvGEMM( &Qz, &Qy, 1, 0, 0, &M, CV_GEMM_A_T + CV_GEMM_B_T );
2895 cvGEMM( &M, &Qx, 1, 0, 0, &Q, CV_GEMM_B_T );
2896
2897 /* Save R and Q matrices. */
2898 cvConvert( &R, matrixR );
2899 cvConvert( &Q, matrixQ );
2900
2901 if( matrixQx )
2902 cvConvert(&Qx, matrixQx);
2903 if( matrixQy )
2904 cvConvert(&Qy, matrixQy);
2905 if( matrixQz )
2906 cvConvert(&Qz, matrixQz);
2907 }
2908
2909
2910 CV_IMPL void
cvDecomposeProjectionMatrix(const CvMat * projMatr,CvMat * calibMatr,CvMat * rotMatr,CvMat * posVect,CvMat * rotMatrX,CvMat * rotMatrY,CvMat * rotMatrZ,CvPoint3D64f * eulerAngles)2911 cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
2912 CvMat *rotMatr, CvMat *posVect,
2913 CvMat *rotMatrX, CvMat *rotMatrY,
2914 CvMat *rotMatrZ, CvPoint3D64f *eulerAngles)
2915 {
2916 double tmpProjMatrData[16], tmpMatrixDData[16], tmpMatrixVData[16];
2917 CvMat tmpProjMatr = cvMat(4, 4, CV_64F, tmpProjMatrData);
2918 CvMat tmpMatrixD = cvMat(4, 4, CV_64F, tmpMatrixDData);
2919 CvMat tmpMatrixV = cvMat(4, 4, CV_64F, tmpMatrixVData);
2920 CvMat tmpMatrixM;
2921
2922 /* Validate parameters. */
2923 if(projMatr == 0 || calibMatr == 0 || rotMatr == 0 || posVect == 0)
2924 CV_Error(CV_StsNullPtr, "Some of parameters is a NULL pointer!");
2925
2926 if(!CV_IS_MAT(projMatr) || !CV_IS_MAT(calibMatr) || !CV_IS_MAT(rotMatr) || !CV_IS_MAT(posVect))
2927 CV_Error(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");
2928
2929 if(projMatr->cols != 4 || projMatr->rows != 3)
2930 CV_Error(CV_StsUnmatchedSizes, "Size of projection matrix must be 3x4!");
2931
2932 if(calibMatr->cols != 3 || calibMatr->rows != 3 || rotMatr->cols != 3 || rotMatr->rows != 3)
2933 CV_Error(CV_StsUnmatchedSizes, "Size of calibration and rotation matrices must be 3x3!");
2934
2935 if(posVect->cols != 1 || posVect->rows != 4)
2936 CV_Error(CV_StsUnmatchedSizes, "Size of position vector must be 4x1!");
2937
2938 /* Compute position vector. */
2939 cvSetZero(&tmpProjMatr); // Add zero row to make matrix square.
2940 int i, k;
2941 for(i = 0; i < 3; i++)
2942 for(k = 0; k < 4; k++)
2943 cvmSet(&tmpProjMatr, i, k, cvmGet(projMatr, i, k));
2944
2945 cvSVD(&tmpProjMatr, &tmpMatrixD, NULL, &tmpMatrixV, CV_SVD_MODIFY_A + CV_SVD_V_T);
2946
2947 /* Save position vector. */
2948 for(i = 0; i < 4; i++)
2949 cvmSet(posVect, i, 0, cvmGet(&tmpMatrixV, 3, i)); // Solution is last row of V.
2950
2951 /* Compute calibration and rotation matrices via RQ decomposition. */
2952 cvGetCols(projMatr, &tmpMatrixM, 0, 3); // M is first square matrix of P.
2953
2954 CV_Assert(cvDet(&tmpMatrixM) != 0.0); // So far only finite cameras could be decomposed, so M has to be nonsingular [det(M) != 0].
2955
2956 cvRQDecomp3x3(&tmpMatrixM, calibMatr, rotMatr, rotMatrX, rotMatrY, rotMatrZ, eulerAngles);
2957 }
2958
2959
2960
2961 namespace cv
2962 {
2963
collectCalibrationData(InputArrayOfArrays objectPoints,InputArrayOfArrays imagePoints1,InputArrayOfArrays imagePoints2,Mat & objPtMat,Mat & imgPtMat1,Mat * imgPtMat2,Mat & npoints)2964 static void collectCalibrationData( InputArrayOfArrays objectPoints,
2965 InputArrayOfArrays imagePoints1,
2966 InputArrayOfArrays imagePoints2,
2967 Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
2968 Mat& npoints )
2969 {
2970 int nimages = (int)objectPoints.total();
2971 int i, j = 0, ni = 0, total = 0;
2972 CV_Assert(nimages > 0 && nimages == (int)imagePoints1.total() &&
2973 (!imgPtMat2 || nimages == (int)imagePoints2.total()));
2974
2975 for( i = 0; i < nimages; i++ )
2976 {
2977 ni = objectPoints.getMat(i).checkVector(3, CV_32F);
2978 if( ni <= 0 )
2979 CV_Error(CV_StsUnsupportedFormat, "objectPoints should contain vector of vectors of points of type Point3f");
2980 int ni1 = imagePoints1.getMat(i).checkVector(2, CV_32F);
2981 if( ni1 <= 0 )
2982 CV_Error(CV_StsUnsupportedFormat, "imagePoints1 should contain vector of vectors of points of type Point2f");
2983 CV_Assert( ni == ni1 );
2984
2985 total += ni;
2986 }
2987
2988 npoints.create(1, (int)nimages, CV_32S);
2989 objPtMat.create(1, (int)total, CV_32FC3);
2990 imgPtMat1.create(1, (int)total, CV_32FC2);
2991 Point2f* imgPtData2 = 0;
2992
2993 if( imgPtMat2 )
2994 {
2995 imgPtMat2->create(1, (int)total, CV_32FC2);
2996 imgPtData2 = imgPtMat2->ptr<Point2f>();
2997 }
2998
2999 Point3f* objPtData = objPtMat.ptr<Point3f>();
3000 Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
3001
3002 for( i = 0; i < nimages; i++, j += ni )
3003 {
3004 Mat objpt = objectPoints.getMat(i);
3005 Mat imgpt1 = imagePoints1.getMat(i);
3006 ni = objpt.checkVector(3, CV_32F);
3007 npoints.at<int>(i) = ni;
3008 memcpy( objPtData + j, objpt.ptr(), ni*sizeof(objPtData[0]) );
3009 memcpy( imgPtData1 + j, imgpt1.ptr(), ni*sizeof(imgPtData1[0]) );
3010
3011 if( imgPtData2 )
3012 {
3013 Mat imgpt2 = imagePoints2.getMat(i);
3014 int ni2 = imgpt2.checkVector(2, CV_32F);
3015 CV_Assert( ni == ni2 );
3016 memcpy( imgPtData2 + j, imgpt2.ptr(), ni*sizeof(imgPtData2[0]) );
3017 }
3018 }
3019 }
3020
3021
prepareCameraMatrix(Mat & cameraMatrix0,int rtype)3022 static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
3023 {
3024 Mat cameraMatrix = Mat::eye(3, 3, rtype);
3025 if( cameraMatrix0.size() == cameraMatrix.size() )
3026 cameraMatrix0.convertTo(cameraMatrix, rtype);
3027 return cameraMatrix;
3028 }
3029
prepareDistCoeffs(Mat & distCoeffs0,int rtype)3030 static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype)
3031 {
3032 Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, 12) : Size(12, 1), rtype);
3033 if( distCoeffs0.size() == Size(1, 4) ||
3034 distCoeffs0.size() == Size(1, 5) ||
3035 distCoeffs0.size() == Size(1, 8) ||
3036 distCoeffs0.size() == Size(1, 12) ||
3037 distCoeffs0.size() == Size(4, 1) ||
3038 distCoeffs0.size() == Size(5, 1) ||
3039 distCoeffs0.size() == Size(8, 1) ||
3040 distCoeffs0.size() == Size(12, 1) )
3041 {
3042 Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));
3043 distCoeffs0.convertTo(dstCoeffs, rtype);
3044 }
3045 return distCoeffs;
3046 }
3047
3048 } // namespace cv
3049
3050
Rodrigues(InputArray _src,OutputArray _dst,OutputArray _jacobian)3051 void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian)
3052 {
3053 Mat src = _src.getMat();
3054 bool v2m = src.cols == 1 || src.rows == 1;
3055 _dst.create(3, v2m ? 3 : 1, src.depth());
3056 Mat dst = _dst.getMat();
3057 CvMat _csrc = src, _cdst = dst, _cjacobian;
3058 if( _jacobian.needed() )
3059 {
3060 _jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.depth());
3061 _cjacobian = _jacobian.getMat();
3062 }
3063 bool ok = cvRodrigues2(&_csrc, &_cdst, _jacobian.needed() ? &_cjacobian : 0) > 0;
3064 if( !ok )
3065 dst = Scalar(0);
3066 }
3067
matMulDeriv(InputArray _Amat,InputArray _Bmat,OutputArray _dABdA,OutputArray _dABdB)3068 void cv::matMulDeriv( InputArray _Amat, InputArray _Bmat,
3069 OutputArray _dABdA, OutputArray _dABdB )
3070 {
3071 Mat A = _Amat.getMat(), B = _Bmat.getMat();
3072 _dABdA.create(A.rows*B.cols, A.rows*A.cols, A.type());
3073 _dABdB.create(A.rows*B.cols, B.rows*B.cols, A.type());
3074 CvMat matA = A, matB = B, c_dABdA = _dABdA.getMat(), c_dABdB = _dABdB.getMat();
3075 cvCalcMatMulDeriv(&matA, &matB, &c_dABdA, &c_dABdB);
3076 }
3077
3078
composeRT(InputArray _rvec1,InputArray _tvec1,InputArray _rvec2,InputArray _tvec2,OutputArray _rvec3,OutputArray _tvec3,OutputArray _dr3dr1,OutputArray _dr3dt1,OutputArray _dr3dr2,OutputArray _dr3dt2,OutputArray _dt3dr1,OutputArray _dt3dt1,OutputArray _dt3dr2,OutputArray _dt3dt2)3079 void cv::composeRT( InputArray _rvec1, InputArray _tvec1,
3080 InputArray _rvec2, InputArray _tvec2,
3081 OutputArray _rvec3, OutputArray _tvec3,
3082 OutputArray _dr3dr1, OutputArray _dr3dt1,
3083 OutputArray _dr3dr2, OutputArray _dr3dt2,
3084 OutputArray _dt3dr1, OutputArray _dt3dt1,
3085 OutputArray _dt3dr2, OutputArray _dt3dt2 )
3086 {
3087 Mat rvec1 = _rvec1.getMat(), tvec1 = _tvec1.getMat();
3088 Mat rvec2 = _rvec2.getMat(), tvec2 = _tvec2.getMat();
3089 int rtype = rvec1.type();
3090 _rvec3.create(rvec1.size(), rtype);
3091 _tvec3.create(tvec1.size(), rtype);
3092 Mat rvec3 = _rvec3.getMat(), tvec3 = _tvec3.getMat();
3093
3094 CvMat c_rvec1 = rvec1, c_tvec1 = tvec1, c_rvec2 = rvec2,
3095 c_tvec2 = tvec2, c_rvec3 = rvec3, c_tvec3 = tvec3;
3096 CvMat c_dr3dr1, c_dr3dt1, c_dr3dr2, c_dr3dt2, c_dt3dr1, c_dt3dt1, c_dt3dr2, c_dt3dt2;
3097 CvMat *p_dr3dr1=0, *p_dr3dt1=0, *p_dr3dr2=0, *p_dr3dt2=0, *p_dt3dr1=0, *p_dt3dt1=0, *p_dt3dr2=0, *p_dt3dt2=0;
3098
3099 if( _dr3dr1.needed() )
3100 {
3101 _dr3dr1.create(3, 3, rtype);
3102 p_dr3dr1 = &(c_dr3dr1 = _dr3dr1.getMat());
3103 }
3104
3105 if( _dr3dt1.needed() )
3106 {
3107 _dr3dt1.create(3, 3, rtype);
3108 p_dr3dt1 = &(c_dr3dt1 = _dr3dt1.getMat());
3109 }
3110
3111 if( _dr3dr2.needed() )
3112 {
3113 _dr3dr2.create(3, 3, rtype);
3114 p_dr3dr2 = &(c_dr3dr2 = _dr3dr2.getMat());
3115 }
3116
3117 if( _dr3dt2.needed() )
3118 {
3119 _dr3dt2.create(3, 3, rtype);
3120 p_dr3dt2 = &(c_dr3dt2 = _dr3dt2.getMat());
3121 }
3122
3123 if( _dt3dr1.needed() )
3124 {
3125 _dt3dr1.create(3, 3, rtype);
3126 p_dt3dr1 = &(c_dt3dr1 = _dt3dr1.getMat());
3127 }
3128
3129 if( _dt3dt1.needed() )
3130 {
3131 _dt3dt1.create(3, 3, rtype);
3132 p_dt3dt1 = &(c_dt3dt1 = _dt3dt1.getMat());
3133 }
3134
3135 if( _dt3dr2.needed() )
3136 {
3137 _dt3dr2.create(3, 3, rtype);
3138 p_dt3dr2 = &(c_dt3dr2 = _dt3dr2.getMat());
3139 }
3140
3141 if( _dt3dt2.needed() )
3142 {
3143 _dt3dt2.create(3, 3, rtype);
3144 p_dt3dt2 = &(c_dt3dt2 = _dt3dt2.getMat());
3145 }
3146
3147 cvComposeRT(&c_rvec1, &c_tvec1, &c_rvec2, &c_tvec2, &c_rvec3, &c_tvec3,
3148 p_dr3dr1, p_dr3dt1, p_dr3dr2, p_dr3dt2,
3149 p_dt3dr1, p_dt3dt1, p_dt3dr2, p_dt3dt2);
3150 }
3151
3152
projectPoints(InputArray _opoints,InputArray _rvec,InputArray _tvec,InputArray _cameraMatrix,InputArray _distCoeffs,OutputArray _ipoints,OutputArray _jacobian,double aspectRatio)3153 void cv::projectPoints( InputArray _opoints,
3154 InputArray _rvec,
3155 InputArray _tvec,
3156 InputArray _cameraMatrix,
3157 InputArray _distCoeffs,
3158 OutputArray _ipoints,
3159 OutputArray _jacobian,
3160 double aspectRatio )
3161 {
3162 Mat opoints = _opoints.getMat();
3163 int npoints = opoints.checkVector(3), depth = opoints.depth();
3164 CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
3165
3166 CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
3167 CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;
3168
3169 _ipoints.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
3170 CvMat c_imagePoints = _ipoints.getMat();
3171 CvMat c_objectPoints = opoints;
3172 Mat cameraMatrix = _cameraMatrix.getMat();
3173
3174 Mat rvec = _rvec.getMat(), tvec = _tvec.getMat();
3175 CvMat c_cameraMatrix = cameraMatrix;
3176 CvMat c_rvec = rvec, c_tvec = tvec;
3177
3178 double dc0buf[5]={0};
3179 Mat dc0(5,1,CV_64F,dc0buf);
3180 Mat distCoeffs = _distCoeffs.getMat();
3181 if( distCoeffs.empty() )
3182 distCoeffs = dc0;
3183 CvMat c_distCoeffs = distCoeffs;
3184 int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
3185
3186 if( _jacobian.needed() )
3187 {
3188 _jacobian.create(npoints*2, 3+3+2+2+ndistCoeffs, CV_64F);
3189 Mat jacobian = _jacobian.getMat();
3190 pdpdrot = &(dpdrot = jacobian.colRange(0, 3));
3191 pdpdt = &(dpdt = jacobian.colRange(3, 6));
3192 pdpdf = &(dpdf = jacobian.colRange(6, 8));
3193 pdpdc = &(dpdc = jacobian.colRange(8, 10));
3194 pdpddist = &(dpddist = jacobian.colRange(10, 10+ndistCoeffs));
3195 }
3196
3197 cvProjectPoints2( &c_objectPoints, &c_rvec, &c_tvec, &c_cameraMatrix, &c_distCoeffs,
3198 &c_imagePoints, pdpdrot, pdpdt, pdpdf, pdpdc, pdpddist, aspectRatio );
3199 }
3200
initCameraMatrix2D(InputArrayOfArrays objectPoints,InputArrayOfArrays imagePoints,Size imageSize,double aspectRatio)3201 cv::Mat cv::initCameraMatrix2D( InputArrayOfArrays objectPoints,
3202 InputArrayOfArrays imagePoints,
3203 Size imageSize, double aspectRatio )
3204 {
3205 Mat objPt, imgPt, npoints, cameraMatrix(3, 3, CV_64F);
3206 collectCalibrationData( objectPoints, imagePoints, noArray(),
3207 objPt, imgPt, 0, npoints );
3208 CvMat _objPt = objPt, _imgPt = imgPt, _npoints = npoints, _cameraMatrix = cameraMatrix;
3209 cvInitIntrinsicParams2D( &_objPt, &_imgPt, &_npoints,
3210 imageSize, &_cameraMatrix, aspectRatio );
3211 return cameraMatrix;
3212 }
3213
3214
calibrateCamera(InputArrayOfArrays _objectPoints,InputArrayOfArrays _imagePoints,Size imageSize,InputOutputArray _cameraMatrix,InputOutputArray _distCoeffs,OutputArrayOfArrays _rvecs,OutputArrayOfArrays _tvecs,int flags,TermCriteria criteria)3215 double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
3216 InputArrayOfArrays _imagePoints,
3217 Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
3218 OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria )
3219 {
3220 int rtype = CV_64F;
3221 Mat cameraMatrix = _cameraMatrix.getMat();
3222 cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
3223 Mat distCoeffs = _distCoeffs.getMat();
3224 distCoeffs = prepareDistCoeffs(distCoeffs, rtype);
3225 if( !(flags & CALIB_RATIONAL_MODEL) &&(!(flags & CALIB_THIN_PRISM_MODEL)))
3226 distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
3227
3228 int i;
3229 size_t nimages = _objectPoints.total();
3230 CV_Assert( nimages > 0 );
3231 Mat objPt, imgPt, npoints, rvecM((int)nimages, 3, CV_64FC1), tvecM((int)nimages, 3, CV_64FC1);
3232 collectCalibrationData( _objectPoints, _imagePoints, noArray(),
3233 objPt, imgPt, 0, npoints );
3234 CvMat c_objPt = objPt, c_imgPt = imgPt, c_npoints = npoints;
3235 CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
3236 CvMat c_rvecM = rvecM, c_tvecM = tvecM;
3237
3238 double reprojErr = cvCalibrateCamera2(&c_objPt, &c_imgPt, &c_npoints, imageSize,
3239 &c_cameraMatrix, &c_distCoeffs, &c_rvecM,
3240 &c_tvecM, flags, criteria );
3241
3242 bool rvecs_needed = _rvecs.needed(), tvecs_needed = _tvecs.needed();
3243
3244 if( rvecs_needed )
3245 _rvecs.create((int)nimages, 1, CV_64FC3);
3246 if( tvecs_needed )
3247 _tvecs.create((int)nimages, 1, CV_64FC3);
3248
3249 for( i = 0; i < (int)nimages; i++ )
3250 {
3251 if( rvecs_needed )
3252 {
3253 _rvecs.create(3, 1, CV_64F, i, true);
3254 Mat rv = _rvecs.getMat(i);
3255 memcpy(rv.ptr(), rvecM.ptr<double>(i), 3*sizeof(double));
3256 }
3257 if( tvecs_needed )
3258 {
3259 _tvecs.create(3, 1, CV_64F, i, true);
3260 Mat tv = _tvecs.getMat(i);
3261 memcpy(tv.ptr(), tvecM.ptr<double>(i), 3*sizeof(double));
3262 }
3263 }
3264 cameraMatrix.copyTo(_cameraMatrix);
3265 distCoeffs.copyTo(_distCoeffs);
3266
3267 return reprojErr;
3268 }
3269
3270
calibrationMatrixValues(InputArray _cameraMatrix,Size imageSize,double apertureWidth,double apertureHeight,double & fovx,double & fovy,double & focalLength,Point2d & principalPoint,double & aspectRatio)3271 void cv::calibrationMatrixValues( InputArray _cameraMatrix, Size imageSize,
3272 double apertureWidth, double apertureHeight,
3273 double& fovx, double& fovy, double& focalLength,
3274 Point2d& principalPoint, double& aspectRatio )
3275 {
3276 Mat cameraMatrix = _cameraMatrix.getMat();
3277 CvMat c_cameraMatrix = cameraMatrix;
3278 cvCalibrationMatrixValues( &c_cameraMatrix, imageSize, apertureWidth, apertureHeight,
3279 &fovx, &fovy, &focalLength, (CvPoint2D64f*)&principalPoint, &aspectRatio );
3280 }
3281
stereoCalibrate(InputArrayOfArrays _objectPoints,InputArrayOfArrays _imagePoints1,InputArrayOfArrays _imagePoints2,InputOutputArray _cameraMatrix1,InputOutputArray _distCoeffs1,InputOutputArray _cameraMatrix2,InputOutputArray _distCoeffs2,Size imageSize,OutputArray _Rmat,OutputArray _Tmat,OutputArray _Emat,OutputArray _Fmat,int flags,TermCriteria criteria)3282 double cv::stereoCalibrate( InputArrayOfArrays _objectPoints,
3283 InputArrayOfArrays _imagePoints1,
3284 InputArrayOfArrays _imagePoints2,
3285 InputOutputArray _cameraMatrix1, InputOutputArray _distCoeffs1,
3286 InputOutputArray _cameraMatrix2, InputOutputArray _distCoeffs2,
3287 Size imageSize, OutputArray _Rmat, OutputArray _Tmat,
3288 OutputArray _Emat, OutputArray _Fmat, int flags ,
3289 TermCriteria criteria)
3290 {
3291 int rtype = CV_64F;
3292 Mat cameraMatrix1 = _cameraMatrix1.getMat();
3293 Mat cameraMatrix2 = _cameraMatrix2.getMat();
3294 Mat distCoeffs1 = _distCoeffs1.getMat();
3295 Mat distCoeffs2 = _distCoeffs2.getMat();
3296 cameraMatrix1 = prepareCameraMatrix(cameraMatrix1, rtype);
3297 cameraMatrix2 = prepareCameraMatrix(cameraMatrix2, rtype);
3298 distCoeffs1 = prepareDistCoeffs(distCoeffs1, rtype);
3299 distCoeffs2 = prepareDistCoeffs(distCoeffs2, rtype);
3300
3301 if( !(flags & CALIB_RATIONAL_MODEL) &&(!(flags & CALIB_THIN_PRISM_MODEL)))
3302 {
3303 distCoeffs1 = distCoeffs1.rows == 1 ? distCoeffs1.colRange(0, 5) : distCoeffs1.rowRange(0, 5);
3304 distCoeffs2 = distCoeffs2.rows == 1 ? distCoeffs2.colRange(0, 5) : distCoeffs2.rowRange(0, 5);
3305 }
3306
3307 _Rmat.create(3, 3, rtype);
3308 _Tmat.create(3, 1, rtype);
3309
3310 Mat objPt, imgPt, imgPt2, npoints;
3311
3312 collectCalibrationData( _objectPoints, _imagePoints1, _imagePoints2,
3313 objPt, imgPt, &imgPt2, npoints );
3314 CvMat c_objPt = objPt, c_imgPt = imgPt, c_imgPt2 = imgPt2, c_npoints = npoints;
3315 CvMat c_cameraMatrix1 = cameraMatrix1, c_distCoeffs1 = distCoeffs1;
3316 CvMat c_cameraMatrix2 = cameraMatrix2, c_distCoeffs2 = distCoeffs2;
3317 CvMat c_matR = _Rmat.getMat(), c_matT = _Tmat.getMat(), c_matE, c_matF, *p_matE = 0, *p_matF = 0;
3318
3319 if( _Emat.needed() )
3320 {
3321 _Emat.create(3, 3, rtype);
3322 p_matE = &(c_matE = _Emat.getMat());
3323 }
3324 if( _Fmat.needed() )
3325 {
3326 _Fmat.create(3, 3, rtype);
3327 p_matF = &(c_matF = _Fmat.getMat());
3328 }
3329
3330 double err = cvStereoCalibrate(&c_objPt, &c_imgPt, &c_imgPt2, &c_npoints, &c_cameraMatrix1,
3331 &c_distCoeffs1, &c_cameraMatrix2, &c_distCoeffs2, imageSize,
3332 &c_matR, &c_matT, p_matE, p_matF, flags, criteria );
3333
3334 cameraMatrix1.copyTo(_cameraMatrix1);
3335 cameraMatrix2.copyTo(_cameraMatrix2);
3336 distCoeffs1.copyTo(_distCoeffs1);
3337 distCoeffs2.copyTo(_distCoeffs2);
3338
3339 return err;
3340 }
3341
3342
stereoRectify(InputArray _cameraMatrix1,InputArray _distCoeffs1,InputArray _cameraMatrix2,InputArray _distCoeffs2,Size imageSize,InputArray _Rmat,InputArray _Tmat,OutputArray _Rmat1,OutputArray _Rmat2,OutputArray _Pmat1,OutputArray _Pmat2,OutputArray _Qmat,int flags,double alpha,Size newImageSize,Rect * validPixROI1,Rect * validPixROI2)3343 void cv::stereoRectify( InputArray _cameraMatrix1, InputArray _distCoeffs1,
3344 InputArray _cameraMatrix2, InputArray _distCoeffs2,
3345 Size imageSize, InputArray _Rmat, InputArray _Tmat,
3346 OutputArray _Rmat1, OutputArray _Rmat2,
3347 OutputArray _Pmat1, OutputArray _Pmat2,
3348 OutputArray _Qmat, int flags,
3349 double alpha, Size newImageSize,
3350 Rect* validPixROI1, Rect* validPixROI2 )
3351 {
3352 Mat cameraMatrix1 = _cameraMatrix1.getMat(), cameraMatrix2 = _cameraMatrix2.getMat();
3353 Mat distCoeffs1 = _distCoeffs1.getMat(), distCoeffs2 = _distCoeffs2.getMat();
3354 Mat Rmat = _Rmat.getMat(), Tmat = _Tmat.getMat();
3355 CvMat c_cameraMatrix1 = cameraMatrix1;
3356 CvMat c_cameraMatrix2 = cameraMatrix2;
3357 CvMat c_distCoeffs1 = distCoeffs1;
3358 CvMat c_distCoeffs2 = distCoeffs2;
3359 CvMat c_R = Rmat, c_T = Tmat;
3360
3361 int rtype = CV_64F;
3362 _Rmat1.create(3, 3, rtype);
3363 _Rmat2.create(3, 3, rtype);
3364 _Pmat1.create(3, 4, rtype);
3365 _Pmat2.create(3, 4, rtype);
3366 CvMat c_R1 = _Rmat1.getMat(), c_R2 = _Rmat2.getMat(), c_P1 = _Pmat1.getMat(), c_P2 = _Pmat2.getMat();
3367 CvMat c_Q, *p_Q = 0;
3368
3369 if( _Qmat.needed() )
3370 {
3371 _Qmat.create(4, 4, rtype);
3372 p_Q = &(c_Q = _Qmat.getMat());
3373 }
3374
3375 cvStereoRectify( &c_cameraMatrix1, &c_cameraMatrix2, &c_distCoeffs1, &c_distCoeffs2,
3376 imageSize, &c_R, &c_T, &c_R1, &c_R2, &c_P1, &c_P2, p_Q, flags, alpha,
3377 newImageSize, (CvRect*)validPixROI1, (CvRect*)validPixROI2);
3378 }
3379
stereoRectifyUncalibrated(InputArray _points1,InputArray _points2,InputArray _Fmat,Size imgSize,OutputArray _Hmat1,OutputArray _Hmat2,double threshold)3380 bool cv::stereoRectifyUncalibrated( InputArray _points1, InputArray _points2,
3381 InputArray _Fmat, Size imgSize,
3382 OutputArray _Hmat1, OutputArray _Hmat2, double threshold )
3383 {
3384 int rtype = CV_64F;
3385 _Hmat1.create(3, 3, rtype);
3386 _Hmat2.create(3, 3, rtype);
3387 Mat F = _Fmat.getMat();
3388 Mat points1 = _points1.getMat(), points2 = _points2.getMat();
3389 CvMat c_pt1 = points1, c_pt2 = points2;
3390 CvMat c_F, *p_F=0, c_H1 = _Hmat1.getMat(), c_H2 = _Hmat2.getMat();
3391 if( F.size() == Size(3, 3) )
3392 p_F = &(c_F = F);
3393 return cvStereoRectifyUncalibrated(&c_pt1, &c_pt2, p_F, imgSize, &c_H1, &c_H2, threshold) > 0;
3394 }
3395
getOptimalNewCameraMatrix(InputArray _cameraMatrix,InputArray _distCoeffs,Size imgSize,double alpha,Size newImgSize,Rect * validPixROI,bool centerPrincipalPoint)3396 cv::Mat cv::getOptimalNewCameraMatrix( InputArray _cameraMatrix,
3397 InputArray _distCoeffs,
3398 Size imgSize, double alpha, Size newImgSize,
3399 Rect* validPixROI, bool centerPrincipalPoint )
3400 {
3401 Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
3402 CvMat c_cameraMatrix = cameraMatrix, c_distCoeffs = distCoeffs;
3403
3404 Mat newCameraMatrix(3, 3, CV_MAT_TYPE(c_cameraMatrix.type));
3405 CvMat c_newCameraMatrix = newCameraMatrix;
3406
3407 cvGetOptimalNewCameraMatrix(&c_cameraMatrix, &c_distCoeffs, imgSize,
3408 alpha, &c_newCameraMatrix,
3409 newImgSize, (CvRect*)validPixROI, (int)centerPrincipalPoint);
3410 return newCameraMatrix;
3411 }
3412
3413
RQDecomp3x3(InputArray _Mmat,OutputArray _Rmat,OutputArray _Qmat,OutputArray _Qx,OutputArray _Qy,OutputArray _Qz)3414 cv::Vec3d cv::RQDecomp3x3( InputArray _Mmat,
3415 OutputArray _Rmat,
3416 OutputArray _Qmat,
3417 OutputArray _Qx,
3418 OutputArray _Qy,
3419 OutputArray _Qz )
3420 {
3421 Mat M = _Mmat.getMat();
3422 _Rmat.create(3, 3, M.type());
3423 _Qmat.create(3, 3, M.type());
3424 Vec3d eulerAngles;
3425
3426 CvMat matM = M, matR = _Rmat.getMat(), matQ = _Qmat.getMat(), Qx, Qy, Qz, *pQx=0, *pQy=0, *pQz=0;
3427 if( _Qx.needed() )
3428 {
3429 _Qx.create(3, 3, M.type());
3430 pQx = &(Qx = _Qx.getMat());
3431 }
3432 if( _Qy.needed() )
3433 {
3434 _Qy.create(3, 3, M.type());
3435 pQy = &(Qy = _Qy.getMat());
3436 }
3437 if( _Qz.needed() )
3438 {
3439 _Qz.create(3, 3, M.type());
3440 pQz = &(Qz = _Qz.getMat());
3441 }
3442 cvRQDecomp3x3(&matM, &matR, &matQ, pQx, pQy, pQz, (CvPoint3D64f*)&eulerAngles[0]);
3443 return eulerAngles;
3444 }
3445
3446
decomposeProjectionMatrix(InputArray _projMatrix,OutputArray _cameraMatrix,OutputArray _rotMatrix,OutputArray _transVect,OutputArray _rotMatrixX,OutputArray _rotMatrixY,OutputArray _rotMatrixZ,OutputArray _eulerAngles)3447 void cv::decomposeProjectionMatrix( InputArray _projMatrix, OutputArray _cameraMatrix,
3448 OutputArray _rotMatrix, OutputArray _transVect,
3449 OutputArray _rotMatrixX, OutputArray _rotMatrixY,
3450 OutputArray _rotMatrixZ, OutputArray _eulerAngles )
3451 {
3452 Mat projMatrix = _projMatrix.getMat();
3453 int type = projMatrix.type();
3454 _cameraMatrix.create(3, 3, type);
3455 _rotMatrix.create(3, 3, type);
3456 _transVect.create(4, 1, type);
3457 CvMat c_projMatrix = projMatrix, c_cameraMatrix = _cameraMatrix.getMat();
3458 CvMat c_rotMatrix = _rotMatrix.getMat(), c_transVect = _transVect.getMat();
3459 CvMat c_rotMatrixX, *p_rotMatrixX = 0;
3460 CvMat c_rotMatrixY, *p_rotMatrixY = 0;
3461 CvMat c_rotMatrixZ, *p_rotMatrixZ = 0;
3462 CvPoint3D64f *p_eulerAngles = 0;
3463
3464 if( _rotMatrixX.needed() )
3465 {
3466 _rotMatrixX.create(3, 3, type);
3467 p_rotMatrixX = &(c_rotMatrixX = _rotMatrixX.getMat());
3468 }
3469 if( _rotMatrixY.needed() )
3470 {
3471 _rotMatrixY.create(3, 3, type);
3472 p_rotMatrixY = &(c_rotMatrixY = _rotMatrixY.getMat());
3473 }
3474 if( _rotMatrixZ.needed() )
3475 {
3476 _rotMatrixZ.create(3, 3, type);
3477 p_rotMatrixZ = &(c_rotMatrixZ = _rotMatrixZ.getMat());
3478 }
3479 if( _eulerAngles.needed() )
3480 {
3481 _eulerAngles.create(3, 1, CV_64F, -1, true);
3482 p_eulerAngles = _eulerAngles.getMat().ptr<CvPoint3D64f>();
3483 }
3484
3485 cvDecomposeProjectionMatrix(&c_projMatrix, &c_cameraMatrix, &c_rotMatrix,
3486 &c_transVect, p_rotMatrixX, p_rotMatrixY,
3487 p_rotMatrixZ, p_eulerAngles);
3488 }
3489
3490
3491 namespace cv
3492 {
3493
adjust3rdMatrix(InputArrayOfArrays _imgpt1_0,InputArrayOfArrays _imgpt3_0,const Mat & cameraMatrix1,const Mat & distCoeffs1,const Mat & cameraMatrix3,const Mat & distCoeffs3,const Mat & R1,const Mat & R3,const Mat & P1,Mat & P3)3494 static void adjust3rdMatrix(InputArrayOfArrays _imgpt1_0,
3495 InputArrayOfArrays _imgpt3_0,
3496 const Mat& cameraMatrix1, const Mat& distCoeffs1,
3497 const Mat& cameraMatrix3, const Mat& distCoeffs3,
3498 const Mat& R1, const Mat& R3, const Mat& P1, Mat& P3 )
3499 {
3500 size_t n1 = _imgpt1_0.total(), n3 = _imgpt3_0.total();
3501 std::vector<Point2f> imgpt1, imgpt3;
3502
3503 for( int i = 0; i < (int)std::min(n1, n3); i++ )
3504 {
3505 Mat pt1 = _imgpt1_0.getMat(i), pt3 = _imgpt3_0.getMat(i);
3506 int ni1 = pt1.checkVector(2, CV_32F), ni3 = pt3.checkVector(2, CV_32F);
3507 CV_Assert( ni1 > 0 && ni1 == ni3 );
3508 const Point2f* pt1data = pt1.ptr<Point2f>();
3509 const Point2f* pt3data = pt3.ptr<Point2f>();
3510 std::copy(pt1data, pt1data + ni1, std::back_inserter(imgpt1));
3511 std::copy(pt3data, pt3data + ni3, std::back_inserter(imgpt3));
3512 }
3513
3514 undistortPoints(imgpt1, imgpt1, cameraMatrix1, distCoeffs1, R1, P1);
3515 undistortPoints(imgpt3, imgpt3, cameraMatrix3, distCoeffs3, R3, P3);
3516
3517 double y1_ = 0, y2_ = 0, y1y1_ = 0, y1y2_ = 0;
3518 size_t n = imgpt1.size();
3519
3520 for( size_t i = 0; i < n; i++ )
3521 {
3522 double y1 = imgpt3[i].y, y2 = imgpt1[i].y;
3523
3524 y1_ += y1; y2_ += y2;
3525 y1y1_ += y1*y1; y1y2_ += y1*y2;
3526 }
3527
3528 y1_ /= n;
3529 y2_ /= n;
3530 y1y1_ /= n;
3531 y1y2_ /= n;
3532
3533 double a = (y1y2_ - y1_*y2_)/(y1y1_ - y1_*y1_);
3534 double b = y2_ - a*y1_;
3535
3536 P3.at<double>(0,0) *= a;
3537 P3.at<double>(1,1) *= a;
3538 P3.at<double>(0,2) = P3.at<double>(0,2)*a;
3539 P3.at<double>(1,2) = P3.at<double>(1,2)*a + b;
3540 P3.at<double>(0,3) *= a;
3541 P3.at<double>(1,3) *= a;
3542 }
3543
3544 }
3545
rectify3Collinear(InputArray _cameraMatrix1,InputArray _distCoeffs1,InputArray _cameraMatrix2,InputArray _distCoeffs2,InputArray _cameraMatrix3,InputArray _distCoeffs3,InputArrayOfArrays _imgpt1,InputArrayOfArrays _imgpt3,Size imageSize,InputArray _Rmat12,InputArray _Tmat12,InputArray _Rmat13,InputArray _Tmat13,OutputArray _Rmat1,OutputArray _Rmat2,OutputArray _Rmat3,OutputArray _Pmat1,OutputArray _Pmat2,OutputArray _Pmat3,OutputArray _Qmat,double alpha,Size newImgSize,Rect * roi1,Rect * roi2,int flags)3546 float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1,
3547 InputArray _cameraMatrix2, InputArray _distCoeffs2,
3548 InputArray _cameraMatrix3, InputArray _distCoeffs3,
3549 InputArrayOfArrays _imgpt1,
3550 InputArrayOfArrays _imgpt3,
3551 Size imageSize, InputArray _Rmat12, InputArray _Tmat12,
3552 InputArray _Rmat13, InputArray _Tmat13,
3553 OutputArray _Rmat1, OutputArray _Rmat2, OutputArray _Rmat3,
3554 OutputArray _Pmat1, OutputArray _Pmat2, OutputArray _Pmat3,
3555 OutputArray _Qmat,
3556 double alpha, Size newImgSize,
3557 Rect* roi1, Rect* roi2, int flags )
3558 {
3559 // first, rectify the 1-2 stereo pair
3560 stereoRectify( _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2,
3561 imageSize, _Rmat12, _Tmat12, _Rmat1, _Rmat2, _Pmat1, _Pmat2, _Qmat,
3562 flags, alpha, newImgSize, roi1, roi2 );
3563
3564 Mat R12 = _Rmat12.getMat(), R13 = _Rmat13.getMat(), T12 = _Tmat12.getMat(), T13 = _Tmat13.getMat();
3565
3566 _Rmat3.create(3, 3, CV_64F);
3567 _Pmat3.create(3, 4, CV_64F);
3568
3569 Mat P1 = _Pmat1.getMat(), P2 = _Pmat2.getMat();
3570 Mat R3 = _Rmat3.getMat(), P3 = _Pmat3.getMat();
3571
3572 // recompute rectification transforms for cameras 1 & 2.
3573 Mat om, r_r, r_r13;
3574
3575 if( R13.size() != Size(3,3) )
3576 Rodrigues(R13, r_r13);
3577 else
3578 R13.copyTo(r_r13);
3579
3580 if( R12.size() == Size(3,3) )
3581 Rodrigues(R12, om);
3582 else
3583 R12.copyTo(om);
3584
3585 om *= -0.5;
3586 Rodrigues(om, r_r); // rotate cameras to same orientation by averaging
3587 Mat_<double> t12 = r_r * T12;
3588
3589 int idx = fabs(t12(0,0)) > fabs(t12(1,0)) ? 0 : 1;
3590 double c = t12(idx,0), nt = norm(t12, CV_L2);
3591 Mat_<double> uu = Mat_<double>::zeros(3,1);
3592 uu(idx, 0) = c > 0 ? 1 : -1;
3593
3594 // calculate global Z rotation
3595 Mat_<double> ww = t12.cross(uu), wR;
3596 double nw = norm(ww, CV_L2);
3597 ww *= acos(fabs(c)/nt)/nw;
3598 Rodrigues(ww, wR);
3599
3600 // now rotate camera 3 to make its optical axis parallel to cameras 1 and 2.
3601 R3 = wR*r_r.t()*r_r13.t();
3602 Mat_<double> t13 = R3 * T13;
3603
3604 P2.copyTo(P3);
3605 Mat t = P3.col(3);
3606 t13.copyTo(t);
3607 P3.at<double>(0,3) *= P3.at<double>(0,0);
3608 P3.at<double>(1,3) *= P3.at<double>(1,1);
3609
3610 if( !_imgpt1.empty() && _imgpt3.empty() )
3611 adjust3rdMatrix(_imgpt1, _imgpt3, _cameraMatrix1.getMat(), _distCoeffs1.getMat(),
3612 _cameraMatrix3.getMat(), _distCoeffs3.getMat(), _Rmat1.getMat(), R3, P1, P3);
3613
3614 return (float)((P3.at<double>(idx,3)/P3.at<double>(idx,idx))/
3615 (P2.at<double>(idx,3)/P2.at<double>(idx,idx)));
3616 }
3617
3618
3619 /* End of file. */
3620