• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                           License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
16 //
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
19 //
20 //   * Redistribution's of source code must retain the above copyright notice,
21 //     this list of conditions and the following disclaimer.
22 //
23 //   * Redistribution's in binary form must reproduce the above copyright notice,
24 //     this list of conditions and the following disclaimer in the documentation
25 //     and/or other materials provided with the distribution.
26 //
27 //   * The name of the copyright holders may not be used to endorse or promote products
28 //     derived from this software without specific prior written permission.
29 //
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
40 //
41 //M*/
42 
43 #include "precomp.hpp"
44 #include "opencv2/imgproc/imgproc_c.h"
45 #include "opencv2/imgproc/detail/distortion_model.hpp"
46 #include "opencv2/calib3d/calib3d_c.h"
47 #include <stdio.h>
48 #include <iterator>
49 
50 /*
51     This is straight-forward port v3 of Matlab calibration engine by Jean-Yves Bouguet
52     that is (in a large extent) based on the paper:
53     Z. Zhang. "A flexible new technique for camera calibration".
54     IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(11):1330-1334, 2000.
55     The 1st initial port was done by Valery Mosyagin.
56 */
57 
58 using namespace cv;
59 
60 // reimplementation of dAB.m
cvCalcMatMulDeriv(const CvMat * A,const CvMat * B,CvMat * dABdA,CvMat * dABdB)61 CV_IMPL void cvCalcMatMulDeriv( const CvMat* A, const CvMat* B, CvMat* dABdA, CvMat* dABdB )
62 {
63     int i, j, M, N, L;
64     int bstep;
65 
66     CV_Assert( CV_IS_MAT(A) && CV_IS_MAT(B) );
67     CV_Assert( CV_ARE_TYPES_EQ(A, B) &&
68         (CV_MAT_TYPE(A->type) == CV_32F || CV_MAT_TYPE(A->type) == CV_64F) );
69     CV_Assert( A->cols == B->rows );
70 
71     M = A->rows;
72     L = A->cols;
73     N = B->cols;
74     bstep = B->step/CV_ELEM_SIZE(B->type);
75 
76     if( dABdA )
77     {
78         CV_Assert( CV_ARE_TYPES_EQ(A, dABdA) &&
79             dABdA->rows == A->rows*B->cols && dABdA->cols == A->rows*A->cols );
80     }
81 
82     if( dABdB )
83     {
84         CV_Assert( CV_ARE_TYPES_EQ(A, dABdB) &&
85             dABdB->rows == A->rows*B->cols && dABdB->cols == B->rows*B->cols );
86     }
87 
88     if( CV_MAT_TYPE(A->type) == CV_32F )
89     {
90         for( i = 0; i < M*N; i++ )
91         {
92             int i1 = i / N,  i2 = i % N;
93 
94             if( dABdA )
95             {
96                 float* dcda = (float*)(dABdA->data.ptr + dABdA->step*i);
97                 const float* b = (const float*)B->data.ptr + i2;
98 
99                 for( j = 0; j < M*L; j++ )
100                     dcda[j] = 0;
101                 for( j = 0; j < L; j++ )
102                     dcda[i1*L + j] = b[j*bstep];
103             }
104 
105             if( dABdB )
106             {
107                 float* dcdb = (float*)(dABdB->data.ptr + dABdB->step*i);
108                 const float* a = (const float*)(A->data.ptr + A->step*i1);
109 
110                 for( j = 0; j < L*N; j++ )
111                     dcdb[j] = 0;
112                 for( j = 0; j < L; j++ )
113                     dcdb[j*N + i2] = a[j];
114             }
115         }
116     }
117     else
118     {
119         for( i = 0; i < M*N; i++ )
120         {
121             int i1 = i / N,  i2 = i % N;
122 
123             if( dABdA )
124             {
125                 double* dcda = (double*)(dABdA->data.ptr + dABdA->step*i);
126                 const double* b = (const double*)B->data.ptr + i2;
127 
128                 for( j = 0; j < M*L; j++ )
129                     dcda[j] = 0;
130                 for( j = 0; j < L; j++ )
131                     dcda[i1*L + j] = b[j*bstep];
132             }
133 
134             if( dABdB )
135             {
136                 double* dcdb = (double*)(dABdB->data.ptr + dABdB->step*i);
137                 const double* a = (const double*)(A->data.ptr + A->step*i1);
138 
139                 for( j = 0; j < L*N; j++ )
140                     dcdb[j] = 0;
141                 for( j = 0; j < L; j++ )
142                     dcdb[j*N + i2] = a[j];
143             }
144         }
145     }
146 }
147 
148 // reimplementation of compose_motion.m
cvComposeRT(const CvMat * _rvec1,const CvMat * _tvec1,const CvMat * _rvec2,const CvMat * _tvec2,CvMat * _rvec3,CvMat * _tvec3,CvMat * dr3dr1,CvMat * dr3dt1,CvMat * dr3dr2,CvMat * dr3dt2,CvMat * dt3dr1,CvMat * dt3dt1,CvMat * dt3dr2,CvMat * dt3dt2)149 CV_IMPL void cvComposeRT( const CvMat* _rvec1, const CvMat* _tvec1,
150              const CvMat* _rvec2, const CvMat* _tvec2,
151              CvMat* _rvec3, CvMat* _tvec3,
152              CvMat* dr3dr1, CvMat* dr3dt1,
153              CvMat* dr3dr2, CvMat* dr3dt2,
154              CvMat* dt3dr1, CvMat* dt3dt1,
155              CvMat* dt3dr2, CvMat* dt3dt2 )
156 {
157     double _r1[3], _r2[3];
158     double _R1[9], _d1[9*3], _R2[9], _d2[9*3];
159     CvMat r1 = cvMat(3,1,CV_64F,_r1), r2 = cvMat(3,1,CV_64F,_r2);
160     CvMat R1 = cvMat(3,3,CV_64F,_R1), R2 = cvMat(3,3,CV_64F,_R2);
161     CvMat dR1dr1 = cvMat(9,3,CV_64F,_d1), dR2dr2 = cvMat(9,3,CV_64F,_d2);
162 
163     CV_Assert( CV_IS_MAT(_rvec1) && CV_IS_MAT(_rvec2) );
164 
165     CV_Assert( CV_MAT_TYPE(_rvec1->type) == CV_32F ||
166                CV_MAT_TYPE(_rvec1->type) == CV_64F );
167 
168     CV_Assert( _rvec1->rows == 3 && _rvec1->cols == 1 && CV_ARE_SIZES_EQ(_rvec1, _rvec2) );
169 
170     cvConvert( _rvec1, &r1 );
171     cvConvert( _rvec2, &r2 );
172 
173     cvRodrigues2( &r1, &R1, &dR1dr1 );
174     cvRodrigues2( &r2, &R2, &dR2dr2 );
175 
176     if( _rvec3 || dr3dr1 || dr3dr2 )
177     {
178         double _r3[3], _R3[9], _dR3dR1[9*9], _dR3dR2[9*9], _dr3dR3[9*3];
179         double _W1[9*3], _W2[3*3];
180         CvMat r3 = cvMat(3,1,CV_64F,_r3), R3 = cvMat(3,3,CV_64F,_R3);
181         CvMat dR3dR1 = cvMat(9,9,CV_64F,_dR3dR1), dR3dR2 = cvMat(9,9,CV_64F,_dR3dR2);
182         CvMat dr3dR3 = cvMat(3,9,CV_64F,_dr3dR3);
183         CvMat W1 = cvMat(3,9,CV_64F,_W1), W2 = cvMat(3,3,CV_64F,_W2);
184 
185         cvMatMul( &R2, &R1, &R3 );
186         cvCalcMatMulDeriv( &R2, &R1, &dR3dR2, &dR3dR1 );
187 
188         cvRodrigues2( &R3, &r3, &dr3dR3 );
189 
190         if( _rvec3 )
191             cvConvert( &r3, _rvec3 );
192 
193         if( dr3dr1 )
194         {
195             cvMatMul( &dr3dR3, &dR3dR1, &W1 );
196             cvMatMul( &W1, &dR1dr1, &W2 );
197             cvConvert( &W2, dr3dr1 );
198         }
199 
200         if( dr3dr2 )
201         {
202             cvMatMul( &dr3dR3, &dR3dR2, &W1 );
203             cvMatMul( &W1, &dR2dr2, &W2 );
204             cvConvert( &W2, dr3dr2 );
205         }
206     }
207 
208     if( dr3dt1 )
209         cvZero( dr3dt1 );
210     if( dr3dt2 )
211         cvZero( dr3dt2 );
212 
213     if( _tvec3 || dt3dr2 || dt3dt1 )
214     {
215         double _t1[3], _t2[3], _t3[3], _dxdR2[3*9], _dxdt1[3*3], _W3[3*3];
216         CvMat t1 = cvMat(3,1,CV_64F,_t1), t2 = cvMat(3,1,CV_64F,_t2);
217         CvMat t3 = cvMat(3,1,CV_64F,_t3);
218         CvMat dxdR2 = cvMat(3, 9, CV_64F, _dxdR2);
219         CvMat dxdt1 = cvMat(3, 3, CV_64F, _dxdt1);
220         CvMat W3 = cvMat(3, 3, CV_64F, _W3);
221 
222         CV_Assert( CV_IS_MAT(_tvec1) && CV_IS_MAT(_tvec2) );
223         CV_Assert( CV_ARE_SIZES_EQ(_tvec1, _tvec2) && CV_ARE_SIZES_EQ(_tvec1, _rvec1) );
224 
225         cvConvert( _tvec1, &t1 );
226         cvConvert( _tvec2, &t2 );
227         cvMatMulAdd( &R2, &t1, &t2, &t3 );
228 
229         if( _tvec3 )
230             cvConvert( &t3, _tvec3 );
231 
232         if( dt3dr2 || dt3dt1 )
233         {
234             cvCalcMatMulDeriv( &R2, &t1, &dxdR2, &dxdt1 );
235             if( dt3dr2 )
236             {
237                 cvMatMul( &dxdR2, &dR2dr2, &W3 );
238                 cvConvert( &W3, dt3dr2 );
239             }
240             if( dt3dt1 )
241                 cvConvert( &dxdt1, dt3dt1 );
242         }
243     }
244 
245     if( dt3dt2 )
246         cvSetIdentity( dt3dt2 );
247     if( dt3dr1 )
248         cvZero( dt3dr1 );
249 }
250 
cvRodrigues2(const CvMat * src,CvMat * dst,CvMat * jacobian)251 CV_IMPL int cvRodrigues2( const CvMat* src, CvMat* dst, CvMat* jacobian )
252 {
253     double J[27] = {0};
254     CvMat matJ = cvMat( 3, 9, CV_64F, J );
255 
256     if( !CV_IS_MAT(src) )
257         CV_Error( !src ? CV_StsNullPtr : CV_StsBadArg, "Input argument is not a valid matrix" );
258 
259     if( !CV_IS_MAT(dst) )
260         CV_Error( !dst ? CV_StsNullPtr : CV_StsBadArg,
261         "The first output argument is not a valid matrix" );
262 
263     int depth = CV_MAT_DEPTH(src->type);
264     int elem_size = CV_ELEM_SIZE(depth);
265 
266     if( depth != CV_32F && depth != CV_64F )
267         CV_Error( CV_StsUnsupportedFormat, "The matrices must have 32f or 64f data type" );
268 
269     if( !CV_ARE_DEPTHS_EQ(src, dst) )
270         CV_Error( CV_StsUnmatchedFormats, "All the matrices must have the same data type" );
271 
272     if( jacobian )
273     {
274         if( !CV_IS_MAT(jacobian) )
275             CV_Error( CV_StsBadArg, "Jacobian is not a valid matrix" );
276 
277         if( !CV_ARE_DEPTHS_EQ(src, jacobian) || CV_MAT_CN(jacobian->type) != 1 )
278             CV_Error( CV_StsUnmatchedFormats, "Jacobian must have 32fC1 or 64fC1 datatype" );
279 
280         if( (jacobian->rows != 9 || jacobian->cols != 3) &&
281             (jacobian->rows != 3 || jacobian->cols != 9))
282             CV_Error( CV_StsBadSize, "Jacobian must be 3x9 or 9x3" );
283     }
284 
285     if( src->cols == 1 || src->rows == 1 )
286     {
287         int step = src->rows > 1 ? src->step / elem_size : 1;
288 
289         if( src->rows + src->cols*CV_MAT_CN(src->type) - 1 != 3 )
290             CV_Error( CV_StsBadSize, "Input matrix must be 1x3, 3x1 or 3x3" );
291 
292         if( dst->rows != 3 || dst->cols != 3 || CV_MAT_CN(dst->type) != 1 )
293             CV_Error( CV_StsBadSize, "Output matrix must be 3x3, single-channel floating point matrix" );
294 
295         Point3d r;
296         if( depth == CV_32F )
297         {
298             r.x = src->data.fl[0];
299             r.y = src->data.fl[step];
300             r.z = src->data.fl[step*2];
301         }
302         else
303         {
304             r.x = src->data.db[0];
305             r.y = src->data.db[step];
306             r.z = src->data.db[step*2];
307         }
308 
309         double theta = norm(r);
310 
311         if( theta < DBL_EPSILON )
312         {
313             cvSetIdentity( dst );
314 
315             if( jacobian )
316             {
317                 memset( J, 0, sizeof(J) );
318                 J[5] = J[15] = J[19] = -1;
319                 J[7] = J[11] = J[21] = 1;
320             }
321         }
322         else
323         {
324             double c = cos(theta);
325             double s = sin(theta);
326             double c1 = 1. - c;
327             double itheta = theta ? 1./theta : 0.;
328 
329             r *= itheta;
330 
331             Matx33d rrt( r.x*r.x, r.x*r.y, r.x*r.z, r.x*r.y, r.y*r.y, r.y*r.z, r.x*r.z, r.y*r.z, r.z*r.z );
332             Matx33d r_x(    0, -r.z,  r.y,
333                           r.z,    0, -r.x,
334                          -r.y,  r.x,    0 );
335 
336             // R = cos(theta)*I + (1 - cos(theta))*r*rT + sin(theta)*[r_x]
337             Matx33d R = c*Matx33d::eye() + c1*rrt + s*r_x;
338 
339             Mat(R).convertTo(cvarrToMat(dst), dst->type);
340 
341             if( jacobian )
342             {
343                 const double I[] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 };
344                 double drrt[] = { r.x+r.x, r.y, r.z, r.y, 0, 0, r.z, 0, 0,
345                                   0, r.x, 0, r.x, r.y+r.y, r.z, 0, r.z, 0,
346                                   0, 0, r.x, 0, 0, r.y, r.x, r.y, r.z+r.z };
347                 double d_r_x_[] = { 0, 0, 0, 0, 0, -1, 0, 1, 0,
348                                     0, 0, 1, 0, 0, 0, -1, 0, 0,
349                                     0, -1, 0, 1, 0, 0, 0, 0, 0 };
350                 for( int i = 0; i < 3; i++ )
351                 {
352                     double ri = i == 0 ? r.x : i == 1 ? r.y : r.z;
353                     double a0 = -s*ri, a1 = (s - 2*c1*itheta)*ri, a2 = c1*itheta;
354                     double a3 = (c - s*itheta)*ri, a4 = s*itheta;
355                     for( int k = 0; k < 9; k++ )
356                         J[i*9+k] = a0*I[k] + a1*rrt.val[k] + a2*drrt[i*9+k] +
357                                    a3*r_x.val[k] + a4*d_r_x_[i*9+k];
358                 }
359             }
360         }
361     }
362     else if( src->cols == 3 && src->rows == 3 )
363     {
364         Matx33d U, Vt;
365         Vec3d W;
366         double theta, s, c;
367         int step = dst->rows > 1 ? dst->step / elem_size : 1;
368 
369         if( (dst->rows != 1 || dst->cols*CV_MAT_CN(dst->type) != 3) &&
370             (dst->rows != 3 || dst->cols != 1 || CV_MAT_CN(dst->type) != 1))
371             CV_Error( CV_StsBadSize, "Output matrix must be 1x3 or 3x1" );
372 
373         Matx33d R = cvarrToMat(src);
374 
375         if( !checkRange(R, true, NULL, -100, 100) )
376         {
377             cvZero(dst);
378             if( jacobian )
379                 cvZero(jacobian);
380             return 0;
381         }
382 
383         SVD::compute(R, W, U, Vt);
384         R = U*Vt;
385 
386         Point3d r(R(2, 1) - R(1, 2), R(0, 2) - R(2, 0), R(1, 0) - R(0, 1));
387 
388         s = std::sqrt((r.x*r.x + r.y*r.y + r.z*r.z)*0.25);
389         c = (R(0, 0) + R(1, 1) + R(2, 2) - 1)*0.5;
390         c = c > 1. ? 1. : c < -1. ? -1. : c;
391         theta = acos(c);
392 
393         if( s < 1e-5 )
394         {
395             double t;
396 
397             if( c > 0 )
398                 r = Point3d(0, 0, 0);
399             else
400             {
401                 t = (R(0, 0) + 1)*0.5;
402                 r.x = std::sqrt(MAX(t,0.));
403                 t = (R(1, 1) + 1)*0.5;
404                 r.y = std::sqrt(MAX(t,0.))*(R(0, 1) < 0 ? -1. : 1.);
405                 t = (R(2, 2) + 1)*0.5;
406                 r.z = std::sqrt(MAX(t,0.))*(R(0, 2) < 0 ? -1. : 1.);
407                 if( fabs(r.x) < fabs(r.y) && fabs(r.x) < fabs(r.z) && (R(1, 2) > 0) != (r.y*r.z > 0) )
408                     r.z = -r.z;
409                 theta /= norm(r);
410                 r *= theta;
411             }
412 
413             if( jacobian )
414             {
415                 memset( J, 0, sizeof(J) );
416                 if( c > 0 )
417                 {
418                     J[5] = J[15] = J[19] = -0.5;
419                     J[7] = J[11] = J[21] = 0.5;
420                 }
421             }
422         }
423         else
424         {
425             double vth = 1/(2*s);
426 
427             if( jacobian )
428             {
429                 double t, dtheta_dtr = -1./s;
430                 // var1 = [vth;theta]
431                 // var = [om1;var1] = [om1;vth;theta]
432                 double dvth_dtheta = -vth*c/s;
433                 double d1 = 0.5*dvth_dtheta*dtheta_dtr;
434                 double d2 = 0.5*dtheta_dtr;
435                 // dvar1/dR = dvar1/dtheta*dtheta/dR = [dvth/dtheta; 1] * dtheta/dtr * dtr/dR
436                 double dvardR[5*9] =
437                 {
438                     0, 0, 0, 0, 0, 1, 0, -1, 0,
439                     0, 0, -1, 0, 0, 0, 1, 0, 0,
440                     0, 1, 0, -1, 0, 0, 0, 0, 0,
441                     d1, 0, 0, 0, d1, 0, 0, 0, d1,
442                     d2, 0, 0, 0, d2, 0, 0, 0, d2
443                 };
444                 // var2 = [om;theta]
445                 double dvar2dvar[] =
446                 {
447                     vth, 0, 0, r.x, 0,
448                     0, vth, 0, r.y, 0,
449                     0, 0, vth, r.z, 0,
450                     0, 0, 0, 0, 1
451                 };
452                 double domegadvar2[] =
453                 {
454                     theta, 0, 0, r.x*vth,
455                     0, theta, 0, r.y*vth,
456                     0, 0, theta, r.z*vth
457                 };
458 
459                 CvMat _dvardR = cvMat( 5, 9, CV_64FC1, dvardR );
460                 CvMat _dvar2dvar = cvMat( 4, 5, CV_64FC1, dvar2dvar );
461                 CvMat _domegadvar2 = cvMat( 3, 4, CV_64FC1, domegadvar2 );
462                 double t0[3*5];
463                 CvMat _t0 = cvMat( 3, 5, CV_64FC1, t0 );
464 
465                 cvMatMul( &_domegadvar2, &_dvar2dvar, &_t0 );
466                 cvMatMul( &_t0, &_dvardR, &matJ );
467 
468                 // transpose every row of matJ (treat the rows as 3x3 matrices)
469                 CV_SWAP(J[1], J[3], t); CV_SWAP(J[2], J[6], t); CV_SWAP(J[5], J[7], t);
470                 CV_SWAP(J[10], J[12], t); CV_SWAP(J[11], J[15], t); CV_SWAP(J[14], J[16], t);
471                 CV_SWAP(J[19], J[21], t); CV_SWAP(J[20], J[24], t); CV_SWAP(J[23], J[25], t);
472             }
473 
474             vth *= theta;
475             r *= vth;
476         }
477 
478         if( depth == CV_32F )
479         {
480             dst->data.fl[0] = (float)r.x;
481             dst->data.fl[step] = (float)r.y;
482             dst->data.fl[step*2] = (float)r.z;
483         }
484         else
485         {
486             dst->data.db[0] = r.x;
487             dst->data.db[step] = r.y;
488             dst->data.db[step*2] = r.z;
489         }
490     }
491     else
492     {
493         CV_Error(CV_StsBadSize, "Input matrix must be 1x3 or 3x1 for a rotation vector, or 3x3 for a rotation matrix");
494     }
495 
496     if( jacobian )
497     {
498         if( depth == CV_32F )
499         {
500             if( jacobian->rows == matJ.rows )
501                 cvConvert( &matJ, jacobian );
502             else
503             {
504                 float Jf[3*9];
505                 CvMat _Jf = cvMat( matJ.rows, matJ.cols, CV_32FC1, Jf );
506                 cvConvert( &matJ, &_Jf );
507                 cvTranspose( &_Jf, jacobian );
508             }
509         }
510         else if( jacobian->rows == matJ.rows )
511             cvCopy( &matJ, jacobian );
512         else
513             cvTranspose( &matJ, jacobian );
514     }
515 
516     return 1;
517 }
518 
519 
520 static const char* cvDistCoeffErr = "Distortion coefficients must be 1x4, 4x1, 1x5, 5x1, 1x8, 8x1, 1x12, 12x1, 1x14 or 14x1 floating-point vector";
521 
cvProjectPoints2(const CvMat * objectPoints,const CvMat * r_vec,const CvMat * t_vec,const CvMat * A,const CvMat * distCoeffs,CvMat * imagePoints,CvMat * dpdr,CvMat * dpdt,CvMat * dpdf,CvMat * dpdc,CvMat * dpdk,double aspectRatio)522 CV_IMPL void cvProjectPoints2( const CvMat* objectPoints,
523                   const CvMat* r_vec,
524                   const CvMat* t_vec,
525                   const CvMat* A,
526                   const CvMat* distCoeffs,
527                   CvMat* imagePoints, CvMat* dpdr,
528                   CvMat* dpdt, CvMat* dpdf,
529                   CvMat* dpdc, CvMat* dpdk,
530                   double aspectRatio )
531 {
532     Ptr<CvMat> matM, _m;
533     Ptr<CvMat> _dpdr, _dpdt, _dpdc, _dpdf, _dpdk;
534 
535     int i, j, count;
536     int calc_derivatives;
537     const CvPoint3D64f* M;
538     CvPoint2D64f* m;
539     double r[3], R[9], dRdr[27], t[3], a[9], k[14] = {0,0,0,0,0,0,0,0,0,0,0,0,0,0}, fx, fy, cx, cy;
540     Matx33d matTilt = Matx33d::eye();
541     Matx33d dMatTiltdTauX(0,0,0,0,0,0,0,-1,0);
542     Matx33d dMatTiltdTauY(0,0,0,0,0,0,1,0,0);
543     CvMat _r, _t, _a = cvMat( 3, 3, CV_64F, a ), _k;
544     CvMat matR = cvMat( 3, 3, CV_64F, R ), _dRdr = cvMat( 3, 9, CV_64F, dRdr );
545     double *dpdr_p = 0, *dpdt_p = 0, *dpdk_p = 0, *dpdf_p = 0, *dpdc_p = 0;
546     int dpdr_step = 0, dpdt_step = 0, dpdk_step = 0, dpdf_step = 0, dpdc_step = 0;
547     bool fixedAspectRatio = aspectRatio > FLT_EPSILON;
548 
549     if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(r_vec) ||
550         !CV_IS_MAT(t_vec) || !CV_IS_MAT(A) ||
551         /*!CV_IS_MAT(distCoeffs) ||*/ !CV_IS_MAT(imagePoints) )
552         CV_Error( CV_StsBadArg, "One of required arguments is not a valid matrix" );
553 
554     int total = objectPoints->rows * objectPoints->cols * CV_MAT_CN(objectPoints->type);
555     if(total % 3 != 0)
556     {
557         //we have stopped support of homogeneous coordinates because it cause ambiguity in interpretation of the input data
558         CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
559     }
560     count = total / 3;
561 
562     if( CV_IS_CONT_MAT(objectPoints->type) &&
563         (CV_MAT_DEPTH(objectPoints->type) == CV_32F || CV_MAT_DEPTH(objectPoints->type) == CV_64F)&&
564         ((objectPoints->rows == 1 && CV_MAT_CN(objectPoints->type) == 3) ||
565         (objectPoints->rows == count && CV_MAT_CN(objectPoints->type)*objectPoints->cols == 3) ||
566         (objectPoints->rows == 3 && CV_MAT_CN(objectPoints->type) == 1 && objectPoints->cols == count)))
567     {
568         matM.reset(cvCreateMat( objectPoints->rows, objectPoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(objectPoints->type)) ));
569         cvConvert(objectPoints, matM);
570     }
571     else
572     {
573 //        matM = cvCreateMat( 1, count, CV_64FC3 );
574 //        cvConvertPointsHomogeneous( objectPoints, matM );
575         CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
576     }
577 
578     if( CV_IS_CONT_MAT(imagePoints->type) &&
579         (CV_MAT_DEPTH(imagePoints->type) == CV_32F || CV_MAT_DEPTH(imagePoints->type) == CV_64F) &&
580         ((imagePoints->rows == 1 && CV_MAT_CN(imagePoints->type) == 2) ||
581         (imagePoints->rows == count && CV_MAT_CN(imagePoints->type)*imagePoints->cols == 2) ||
582         (imagePoints->rows == 2 && CV_MAT_CN(imagePoints->type) == 1 && imagePoints->cols == count)))
583     {
584         _m.reset(cvCreateMat( imagePoints->rows, imagePoints->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(imagePoints->type)) ));
585         cvConvert(imagePoints, _m);
586     }
587     else
588     {
589 //        _m = cvCreateMat( 1, count, CV_64FC2 );
590         CV_Error( CV_StsBadArg, "Homogeneous coordinates are not supported" );
591     }
592 
593     M = (CvPoint3D64f*)matM->data.db;
594     m = (CvPoint2D64f*)_m->data.db;
595 
596     if( (CV_MAT_DEPTH(r_vec->type) != CV_64F && CV_MAT_DEPTH(r_vec->type) != CV_32F) ||
597         (((r_vec->rows != 1 && r_vec->cols != 1) ||
598         r_vec->rows*r_vec->cols*CV_MAT_CN(r_vec->type) != 3) &&
599         ((r_vec->rows != 3 && r_vec->cols != 3) || CV_MAT_CN(r_vec->type) != 1)))
600         CV_Error( CV_StsBadArg, "Rotation must be represented by 1x3 or 3x1 "
601                   "floating-point rotation vector, or 3x3 rotation matrix" );
602 
603     if( r_vec->rows == 3 && r_vec->cols == 3 )
604     {
605         _r = cvMat( 3, 1, CV_64FC1, r );
606         cvRodrigues2( r_vec, &_r );
607         cvRodrigues2( &_r, &matR, &_dRdr );
608         cvCopy( r_vec, &matR );
609     }
610     else
611     {
612         _r = cvMat( r_vec->rows, r_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(r_vec->type)), r );
613         cvConvert( r_vec, &_r );
614         cvRodrigues2( &_r, &matR, &_dRdr );
615     }
616 
617     if( (CV_MAT_DEPTH(t_vec->type) != CV_64F && CV_MAT_DEPTH(t_vec->type) != CV_32F) ||
618         (t_vec->rows != 1 && t_vec->cols != 1) ||
619         t_vec->rows*t_vec->cols*CV_MAT_CN(t_vec->type) != 3 )
620         CV_Error( CV_StsBadArg,
621             "Translation vector must be 1x3 or 3x1 floating-point vector" );
622 
623     _t = cvMat( t_vec->rows, t_vec->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(t_vec->type)), t );
624     cvConvert( t_vec, &_t );
625 
626     if( (CV_MAT_TYPE(A->type) != CV_64FC1 && CV_MAT_TYPE(A->type) != CV_32FC1) ||
627         A->rows != 3 || A->cols != 3 )
628         CV_Error( CV_StsBadArg, "Intrinsic parameters must be 3x3 floating-point matrix" );
629 
630     cvConvert( A, &_a );
631     fx = a[0]; fy = a[4];
632     cx = a[2]; cy = a[5];
633 
634     if( fixedAspectRatio )
635         fx = fy*aspectRatio;
636 
637     if( distCoeffs )
638     {
639         if( !CV_IS_MAT(distCoeffs) ||
640             (CV_MAT_DEPTH(distCoeffs->type) != CV_64F &&
641             CV_MAT_DEPTH(distCoeffs->type) != CV_32F) ||
642             (distCoeffs->rows != 1 && distCoeffs->cols != 1) ||
643             (distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 4 &&
644             distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 5 &&
645             distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 8 &&
646             distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 12 &&
647             distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) != 14) )
648             CV_Error( CV_StsBadArg, cvDistCoeffErr );
649 
650         _k = cvMat( distCoeffs->rows, distCoeffs->cols,
651                     CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k );
652         cvConvert( distCoeffs, &_k );
653         if(k[12] != 0 || k[13] != 0)
654         {
655           detail::computeTiltProjectionMatrix(k[12], k[13],
656             &matTilt, &dMatTiltdTauX, &dMatTiltdTauY);
657         }
658     }
659 
660     if( dpdr )
661     {
662         if( !CV_IS_MAT(dpdr) ||
663             (CV_MAT_TYPE(dpdr->type) != CV_32FC1 &&
664             CV_MAT_TYPE(dpdr->type) != CV_64FC1) ||
665             dpdr->rows != count*2 || dpdr->cols != 3 )
666             CV_Error( CV_StsBadArg, "dp/drot must be 2Nx3 floating-point matrix" );
667 
668         if( CV_MAT_TYPE(dpdr->type) == CV_64FC1 )
669         {
670             _dpdr.reset(cvCloneMat(dpdr));
671         }
672         else
673             _dpdr.reset(cvCreateMat( 2*count, 3, CV_64FC1 ));
674         dpdr_p = _dpdr->data.db;
675         dpdr_step = _dpdr->step/sizeof(dpdr_p[0]);
676     }
677 
678     if( dpdt )
679     {
680         if( !CV_IS_MAT(dpdt) ||
681             (CV_MAT_TYPE(dpdt->type) != CV_32FC1 &&
682             CV_MAT_TYPE(dpdt->type) != CV_64FC1) ||
683             dpdt->rows != count*2 || dpdt->cols != 3 )
684             CV_Error( CV_StsBadArg, "dp/dT must be 2Nx3 floating-point matrix" );
685 
686         if( CV_MAT_TYPE(dpdt->type) == CV_64FC1 )
687         {
688             _dpdt.reset(cvCloneMat(dpdt));
689         }
690         else
691             _dpdt.reset(cvCreateMat( 2*count, 3, CV_64FC1 ));
692         dpdt_p = _dpdt->data.db;
693         dpdt_step = _dpdt->step/sizeof(dpdt_p[0]);
694     }
695 
696     if( dpdf )
697     {
698         if( !CV_IS_MAT(dpdf) ||
699             (CV_MAT_TYPE(dpdf->type) != CV_32FC1 && CV_MAT_TYPE(dpdf->type) != CV_64FC1) ||
700             dpdf->rows != count*2 || dpdf->cols != 2 )
701             CV_Error( CV_StsBadArg, "dp/df must be 2Nx2 floating-point matrix" );
702 
703         if( CV_MAT_TYPE(dpdf->type) == CV_64FC1 )
704         {
705             _dpdf.reset(cvCloneMat(dpdf));
706         }
707         else
708             _dpdf.reset(cvCreateMat( 2*count, 2, CV_64FC1 ));
709         dpdf_p = _dpdf->data.db;
710         dpdf_step = _dpdf->step/sizeof(dpdf_p[0]);
711     }
712 
713     if( dpdc )
714     {
715         if( !CV_IS_MAT(dpdc) ||
716             (CV_MAT_TYPE(dpdc->type) != CV_32FC1 && CV_MAT_TYPE(dpdc->type) != CV_64FC1) ||
717             dpdc->rows != count*2 || dpdc->cols != 2 )
718             CV_Error( CV_StsBadArg, "dp/dc must be 2Nx2 floating-point matrix" );
719 
720         if( CV_MAT_TYPE(dpdc->type) == CV_64FC1 )
721         {
722             _dpdc.reset(cvCloneMat(dpdc));
723         }
724         else
725             _dpdc.reset(cvCreateMat( 2*count, 2, CV_64FC1 ));
726         dpdc_p = _dpdc->data.db;
727         dpdc_step = _dpdc->step/sizeof(dpdc_p[0]);
728     }
729 
730     if( dpdk )
731     {
732         if( !CV_IS_MAT(dpdk) ||
733             (CV_MAT_TYPE(dpdk->type) != CV_32FC1 && CV_MAT_TYPE(dpdk->type) != CV_64FC1) ||
734             dpdk->rows != count*2 || (dpdk->cols != 14 && dpdk->cols != 12 && dpdk->cols != 8 && dpdk->cols != 5 && dpdk->cols != 4 && dpdk->cols != 2) )
735             CV_Error( CV_StsBadArg, "dp/df must be 2Nx14, 2Nx12, 2Nx8, 2Nx5, 2Nx4 or 2Nx2 floating-point matrix" );
736 
737         if( !distCoeffs )
738             CV_Error( CV_StsNullPtr, "distCoeffs is NULL while dpdk is not" );
739 
740         if( CV_MAT_TYPE(dpdk->type) == CV_64FC1 )
741         {
742             _dpdk.reset(cvCloneMat(dpdk));
743         }
744         else
745             _dpdk.reset(cvCreateMat( dpdk->rows, dpdk->cols, CV_64FC1 ));
746         dpdk_p = _dpdk->data.db;
747         dpdk_step = _dpdk->step/sizeof(dpdk_p[0]);
748     }
749 
750     calc_derivatives = dpdr || dpdt || dpdf || dpdc || dpdk;
751 
752     for( i = 0; i < count; i++ )
753     {
754         double X = M[i].x, Y = M[i].y, Z = M[i].z;
755         double x = R[0]*X + R[1]*Y + R[2]*Z + t[0];
756         double y = R[3]*X + R[4]*Y + R[5]*Z + t[1];
757         double z = R[6]*X + R[7]*Y + R[8]*Z + t[2];
758         double r2, r4, r6, a1, a2, a3, cdist, icdist2;
759         double xd, yd, xd0, yd0, invProj;
760         Vec3d vecTilt;
761         Vec3d dVecTilt;
762         Matx22d dMatTilt;
763         Vec2d dXdYd;
764 
765         z = z ? 1./z : 1;
766         x *= z; y *= z;
767 
768         r2 = x*x + y*y;
769         r4 = r2*r2;
770         r6 = r4*r2;
771         a1 = 2*x*y;
772         a2 = r2 + 2*x*x;
773         a3 = r2 + 2*y*y;
774         cdist = 1 + k[0]*r2 + k[1]*r4 + k[4]*r6;
775         icdist2 = 1./(1 + k[5]*r2 + k[6]*r4 + k[7]*r6);
776         xd0 = x*cdist*icdist2 + k[2]*a1 + k[3]*a2 + k[8]*r2+k[9]*r4;
777         yd0 = y*cdist*icdist2 + k[2]*a3 + k[3]*a1 + k[10]*r2+k[11]*r4;
778 
779         // additional distortion by projecting onto a tilt plane
780         vecTilt = matTilt*Vec3d(xd0, yd0, 1);
781         invProj = vecTilt(2) ? 1./vecTilt(2) : 1;
782         xd = invProj * vecTilt(0);
783         yd = invProj * vecTilt(1);
784 
785         m[i].x = xd*fx + cx;
786         m[i].y = yd*fy + cy;
787 
788         if( calc_derivatives )
789         {
790             if( dpdc_p )
791             {
792                 dpdc_p[0] = 1; dpdc_p[1] = 0; // dp_xdc_x; dp_xdc_y
793                 dpdc_p[dpdc_step] = 0;
794                 dpdc_p[dpdc_step+1] = 1;
795                 dpdc_p += dpdc_step*2;
796             }
797 
798             if( dpdf_p )
799             {
800                 if( fixedAspectRatio )
801                 {
802                     dpdf_p[0] = 0; dpdf_p[1] = xd*aspectRatio; // dp_xdf_x; dp_xdf_y
803                     dpdf_p[dpdf_step] = 0;
804                     dpdf_p[dpdf_step+1] = yd;
805                 }
806                 else
807                 {
808                     dpdf_p[0] = xd; dpdf_p[1] = 0;
809                     dpdf_p[dpdf_step] = 0;
810                     dpdf_p[dpdf_step+1] = yd;
811                 }
812                 dpdf_p += dpdf_step*2;
813             }
814             for (int row = 0; row < 2; ++row)
815                 for (int col = 0; col < 2; ++col)
816                     dMatTilt(row,col) = matTilt(row,col)*vecTilt(2)
817                       - matTilt(2,col)*vecTilt(row);
818             double invProjSquare = (invProj*invProj);
819             dMatTilt *= invProjSquare;
820             if( dpdk_p )
821             {
822                 dXdYd = dMatTilt*Vec2d(x*icdist2*r2, y*icdist2*r2);
823                 dpdk_p[0] = fx*dXdYd(0);
824                 dpdk_p[dpdk_step] = fy*dXdYd(1);
825                 dXdYd = dMatTilt*Vec2d(x*icdist2*r4, y*icdist2*r4);
826                 dpdk_p[1] = fx*dXdYd(0);
827                 dpdk_p[dpdk_step+1] = fy*dXdYd(1);
828                 if( _dpdk->cols > 2 )
829                 {
830                     dXdYd = dMatTilt*Vec2d(a1, a3);
831                     dpdk_p[2] = fx*dXdYd(0);
832                     dpdk_p[dpdk_step+2] = fy*dXdYd(1);
833                     dXdYd = dMatTilt*Vec2d(a2, a1);
834                     dpdk_p[3] = fx*dXdYd(0);
835                     dpdk_p[dpdk_step+3] = fy*dXdYd(1);
836                     if( _dpdk->cols > 4 )
837                     {
838                         dXdYd = dMatTilt*Vec2d(x*icdist2*r6, y*icdist2*r6);
839                         dpdk_p[4] = fx*dXdYd(0);
840                         dpdk_p[dpdk_step+4] = fy*dXdYd(1);
841 
842                         if( _dpdk->cols > 5 )
843                         {
844                             dXdYd = dMatTilt*Vec2d(
845                               x*cdist*(-icdist2)*icdist2*r2, y*cdist*(-icdist2)*icdist2*r2);
846                             dpdk_p[5] = fx*dXdYd(0);
847                             dpdk_p[dpdk_step+5] = fy*dXdYd(1);
848                             dXdYd = dMatTilt*Vec2d(
849                               x*cdist*(-icdist2)*icdist2*r4, y*cdist*(-icdist2)*icdist2*r4);
850                             dpdk_p[6] = fx*dXdYd(0);
851                             dpdk_p[dpdk_step+6] = fy*dXdYd(1);
852                             dXdYd = dMatTilt*Vec2d(
853                               x*cdist*(-icdist2)*icdist2*r6, y*cdist*(-icdist2)*icdist2*r6);
854                             dpdk_p[7] = fx*dXdYd(0);
855                             dpdk_p[dpdk_step+7] = fy*dXdYd(1);
856                             if( _dpdk->cols > 8 )
857                             {
858                                 dXdYd = dMatTilt*Vec2d(r2, 0);
859                                 dpdk_p[8] = fx*dXdYd(0); //s1
860                                 dpdk_p[dpdk_step+8] = fy*dXdYd(1); //s1
861                                 dXdYd = dMatTilt*Vec2d(r4, 0);
862                                 dpdk_p[9] = fx*dXdYd(0); //s2
863                                 dpdk_p[dpdk_step+9] = fy*dXdYd(1); //s2
864                                 dXdYd = dMatTilt*Vec2d(0, r2);
865                                 dpdk_p[10] = fx*dXdYd(0);//s3
866                                 dpdk_p[dpdk_step+10] = fy*dXdYd(1); //s3
867                                 dXdYd = dMatTilt*Vec2d(0, r4);
868                                 dpdk_p[11] = fx*dXdYd(0);//s4
869                                 dpdk_p[dpdk_step+11] = fy*dXdYd(1); //s4
870                                 if( _dpdk->cols > 12 )
871                                 {
872                                     dVecTilt = dMatTiltdTauX * Vec3d(xd0, yd0, 1);
873                                     dpdk_p[12] = fx * invProjSquare * (
874                                       dVecTilt(0) * vecTilt(2) - dVecTilt(2) * vecTilt(0));
875                                     dpdk_p[dpdk_step+12] = fy*invProjSquare * (
876                                       dVecTilt(1) * vecTilt(2) - dVecTilt(2) * vecTilt(1));
877                                     dVecTilt = dMatTiltdTauY * Vec3d(xd0, yd0, 1);
878                                     dpdk_p[13] = fx * invProjSquare * (
879                                       dVecTilt(0) * vecTilt(2) - dVecTilt(2) * vecTilt(0));
880                                     dpdk_p[dpdk_step+13] = fy * invProjSquare * (
881                                       dVecTilt(1) * vecTilt(2) - dVecTilt(2) * vecTilt(1));
882                                 }
883                             }
884                         }
885                     }
886                 }
887                 dpdk_p += dpdk_step*2;
888             }
889 
890             if( dpdt_p )
891             {
892                 double dxdt[] = { z, 0, -x*z }, dydt[] = { 0, z, -y*z };
893                 for( j = 0; j < 3; j++ )
894                 {
895                     double dr2dt = 2*x*dxdt[j] + 2*y*dydt[j];
896                     double dcdist_dt = k[0]*dr2dt + 2*k[1]*r2*dr2dt + 3*k[4]*r4*dr2dt;
897                     double dicdist2_dt = -icdist2*icdist2*(k[5]*dr2dt + 2*k[6]*r2*dr2dt + 3*k[7]*r4*dr2dt);
898                     double da1dt = 2*(x*dydt[j] + y*dxdt[j]);
899                     double dmxdt = (dxdt[j]*cdist*icdist2 + x*dcdist_dt*icdist2 + x*cdist*dicdist2_dt +
900                                        k[2]*da1dt + k[3]*(dr2dt + 4*x*dxdt[j]) + k[8]*dr2dt + 2*r2*k[9]*dr2dt);
901                     double dmydt = (dydt[j]*cdist*icdist2 + y*dcdist_dt*icdist2 + y*cdist*dicdist2_dt +
902                                        k[2]*(dr2dt + 4*y*dydt[j]) + k[3]*da1dt + k[10]*dr2dt + 2*r2*k[11]*dr2dt);
903                     dXdYd = dMatTilt*Vec2d(dmxdt, dmydt);
904                     dpdt_p[j] = fx*dXdYd(0);
905                     dpdt_p[dpdt_step+j] = fy*dXdYd(1);
906                 }
907                 dpdt_p += dpdt_step*2;
908             }
909 
910             if( dpdr_p )
911             {
912                 double dx0dr[] =
913                 {
914                     X*dRdr[0] + Y*dRdr[1] + Z*dRdr[2],
915                     X*dRdr[9] + Y*dRdr[10] + Z*dRdr[11],
916                     X*dRdr[18] + Y*dRdr[19] + Z*dRdr[20]
917                 };
918                 double dy0dr[] =
919                 {
920                     X*dRdr[3] + Y*dRdr[4] + Z*dRdr[5],
921                     X*dRdr[12] + Y*dRdr[13] + Z*dRdr[14],
922                     X*dRdr[21] + Y*dRdr[22] + Z*dRdr[23]
923                 };
924                 double dz0dr[] =
925                 {
926                     X*dRdr[6] + Y*dRdr[7] + Z*dRdr[8],
927                     X*dRdr[15] + Y*dRdr[16] + Z*dRdr[17],
928                     X*dRdr[24] + Y*dRdr[25] + Z*dRdr[26]
929                 };
930                 for( j = 0; j < 3; j++ )
931                 {
932                     double dxdr = z*(dx0dr[j] - x*dz0dr[j]);
933                     double dydr = z*(dy0dr[j] - y*dz0dr[j]);
934                     double dr2dr = 2*x*dxdr + 2*y*dydr;
935                     double dcdist_dr = (k[0] + 2*k[1]*r2 + 3*k[4]*r4)*dr2dr;
936                     double dicdist2_dr = -icdist2*icdist2*(k[5] + 2*k[6]*r2 + 3*k[7]*r4)*dr2dr;
937                     double da1dr = 2*(x*dydr + y*dxdr);
938                     double dmxdr = (dxdr*cdist*icdist2 + x*dcdist_dr*icdist2 + x*cdist*dicdist2_dr +
939                                        k[2]*da1dr + k[3]*(dr2dr + 4*x*dxdr) + (k[8] + 2*r2*k[9])*dr2dr);
940                     double dmydr = (dydr*cdist*icdist2 + y*dcdist_dr*icdist2 + y*cdist*dicdist2_dr +
941                                        k[2]*(dr2dr + 4*y*dydr) + k[3]*da1dr + (k[10] + 2*r2*k[11])*dr2dr);
942                     dXdYd = dMatTilt*Vec2d(dmxdr, dmydr);
943                     dpdr_p[j] = fx*dXdYd(0);
944                     dpdr_p[dpdr_step+j] = fy*dXdYd(1);
945                 }
946                 dpdr_p += dpdr_step*2;
947             }
948         }
949     }
950 
951     if( _m != imagePoints )
952         cvConvert( _m, imagePoints );
953 
954     if( _dpdr != dpdr )
955         cvConvert( _dpdr, dpdr );
956 
957     if( _dpdt != dpdt )
958         cvConvert( _dpdt, dpdt );
959 
960     if( _dpdf != dpdf )
961         cvConvert( _dpdf, dpdf );
962 
963     if( _dpdc != dpdc )
964         cvConvert( _dpdc, dpdc );
965 
966     if( _dpdk != dpdk )
967         cvConvert( _dpdk, dpdk );
968 }
969 
970 
cvFindExtrinsicCameraParams2(const CvMat * objectPoints,const CvMat * imagePoints,const CvMat * A,const CvMat * distCoeffs,CvMat * rvec,CvMat * tvec,int useExtrinsicGuess)971 CV_IMPL void cvFindExtrinsicCameraParams2( const CvMat* objectPoints,
972                   const CvMat* imagePoints, const CvMat* A,
973                   const CvMat* distCoeffs, CvMat* rvec, CvMat* tvec,
974                   int useExtrinsicGuess )
975 {
976     const int max_iter = 20;
977     Ptr<CvMat> matM, _Mxy, _m, _mn, matL;
978 
979     int i, count;
980     double a[9], ar[9]={1,0,0,0,1,0,0,0,1}, R[9];
981     double MM[9], U[9], V[9], W[3];
982     cv::Scalar Mc;
983     double param[6];
984     CvMat matA = cvMat( 3, 3, CV_64F, a );
985     CvMat _Ar = cvMat( 3, 3, CV_64F, ar );
986     CvMat matR = cvMat( 3, 3, CV_64F, R );
987     CvMat _r = cvMat( 3, 1, CV_64F, param );
988     CvMat _t = cvMat( 3, 1, CV_64F, param + 3 );
989     CvMat _Mc = cvMat( 1, 3, CV_64F, Mc.val );
990     CvMat _MM = cvMat( 3, 3, CV_64F, MM );
991     CvMat matU = cvMat( 3, 3, CV_64F, U );
992     CvMat matV = cvMat( 3, 3, CV_64F, V );
993     CvMat matW = cvMat( 3, 1, CV_64F, W );
994     CvMat _param = cvMat( 6, 1, CV_64F, param );
995     CvMat _dpdr, _dpdt;
996 
997     CV_Assert( CV_IS_MAT(objectPoints) && CV_IS_MAT(imagePoints) &&
998         CV_IS_MAT(A) && CV_IS_MAT(rvec) && CV_IS_MAT(tvec) );
999 
1000     count = MAX(objectPoints->cols, objectPoints->rows);
1001     matM.reset(cvCreateMat( 1, count, CV_64FC3 ));
1002     _m.reset(cvCreateMat( 1, count, CV_64FC2 ));
1003 
1004     cvConvertPointsHomogeneous( objectPoints, matM );
1005     cvConvertPointsHomogeneous( imagePoints, _m );
1006     cvConvert( A, &matA );
1007 
1008     CV_Assert( (CV_MAT_DEPTH(rvec->type) == CV_64F || CV_MAT_DEPTH(rvec->type) == CV_32F) &&
1009         (rvec->rows == 1 || rvec->cols == 1) && rvec->rows*rvec->cols*CV_MAT_CN(rvec->type) == 3 );
1010 
1011     CV_Assert( (CV_MAT_DEPTH(tvec->type) == CV_64F || CV_MAT_DEPTH(tvec->type) == CV_32F) &&
1012         (tvec->rows == 1 || tvec->cols == 1) && tvec->rows*tvec->cols*CV_MAT_CN(tvec->type) == 3 );
1013 
1014     CV_Assert((count >= 4) || (count == 3 && useExtrinsicGuess)); // it is unsafe to call LM optimisation without an extrinsic guess in the case of 3 points. This is because there is no guarantee that it will converge on the correct solution.
1015 
1016     _mn.reset(cvCreateMat( 1, count, CV_64FC2 ));
1017     _Mxy.reset(cvCreateMat( 1, count, CV_64FC2 ));
1018 
1019     // normalize image points
1020     // (unapply the intrinsic matrix transformation and distortion)
1021     cvUndistortPoints( _m, _mn, &matA, distCoeffs, 0, &_Ar );
1022 
1023     if( useExtrinsicGuess )
1024     {
1025         CvMat _r_temp = cvMat(rvec->rows, rvec->cols,
1026             CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
1027         CvMat _t_temp = cvMat(tvec->rows, tvec->cols,
1028             CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3);
1029         cvConvert( rvec, &_r_temp );
1030         cvConvert( tvec, &_t_temp );
1031     }
1032     else
1033     {
1034         Mc = cvAvg(matM);
1035         cvReshape( matM, matM, 1, count );
1036         cvMulTransposed( matM, &_MM, 1, &_Mc );
1037         cvSVD( &_MM, &matW, 0, &matV, CV_SVD_MODIFY_A + CV_SVD_V_T );
1038 
1039         // initialize extrinsic parameters
1040         if( W[2]/W[1] < 1e-3)
1041         {
1042             // a planar structure case (all M's lie in the same plane)
1043             double tt[3], h[9], h1_norm, h2_norm;
1044             CvMat* R_transform = &matV;
1045             CvMat T_transform = cvMat( 3, 1, CV_64F, tt );
1046             CvMat matH = cvMat( 3, 3, CV_64F, h );
1047             CvMat _h1, _h2, _h3;
1048 
1049             if( V[2]*V[2] + V[5]*V[5] < 1e-10 )
1050                 cvSetIdentity( R_transform );
1051 
1052             if( cvDet(R_transform) < 0 )
1053                 cvScale( R_transform, R_transform, -1 );
1054 
1055             cvGEMM( R_transform, &_Mc, -1, 0, 0, &T_transform, CV_GEMM_B_T );
1056 
1057             for( i = 0; i < count; i++ )
1058             {
1059                 const double* Rp = R_transform->data.db;
1060                 const double* Tp = T_transform.data.db;
1061                 const double* src = matM->data.db + i*3;
1062                 double* dst = _Mxy->data.db + i*2;
1063 
1064                 dst[0] = Rp[0]*src[0] + Rp[1]*src[1] + Rp[2]*src[2] + Tp[0];
1065                 dst[1] = Rp[3]*src[0] + Rp[4]*src[1] + Rp[5]*src[2] + Tp[1];
1066             }
1067 
1068             cvFindHomography( _Mxy, _mn, &matH );
1069 
1070             if( cvCheckArr(&matH, CV_CHECK_QUIET) )
1071             {
1072                 cvGetCol( &matH, &_h1, 0 );
1073                 _h2 = _h1; _h2.data.db++;
1074                 _h3 = _h2; _h3.data.db++;
1075                 h1_norm = std::sqrt(h[0]*h[0] + h[3]*h[3] + h[6]*h[6]);
1076                 h2_norm = std::sqrt(h[1]*h[1] + h[4]*h[4] + h[7]*h[7]);
1077 
1078                 cvScale( &_h1, &_h1, 1./MAX(h1_norm, DBL_EPSILON) );
1079                 cvScale( &_h2, &_h2, 1./MAX(h2_norm, DBL_EPSILON) );
1080                 cvScale( &_h3, &_t, 2./MAX(h1_norm + h2_norm, DBL_EPSILON));
1081                 cvCrossProduct( &_h1, &_h2, &_h3 );
1082 
1083                 cvRodrigues2( &matH, &_r );
1084                 cvRodrigues2( &_r, &matH );
1085                 cvMatMulAdd( &matH, &T_transform, &_t, &_t );
1086                 cvMatMul( &matH, R_transform, &matR );
1087             }
1088             else
1089             {
1090                 cvSetIdentity( &matR );
1091                 cvZero( &_t );
1092             }
1093 
1094             cvRodrigues2( &matR, &_r );
1095         }
1096         else
1097         {
1098             // non-planar structure. Use DLT method
1099             CV_CheckGE(count, 6, "DLT algorithm needs at least 6 points for pose estimation from 3D-2D point correspondences.");
1100             double* L;
1101             double LL[12*12], LW[12], LV[12*12], sc;
1102             CvMat _LL = cvMat( 12, 12, CV_64F, LL );
1103             CvMat _LW = cvMat( 12, 1, CV_64F, LW );
1104             CvMat _LV = cvMat( 12, 12, CV_64F, LV );
1105             CvMat _RRt, _RR, _tt;
1106             CvPoint3D64f* M = (CvPoint3D64f*)matM->data.db;
1107             CvPoint2D64f* mn = (CvPoint2D64f*)_mn->data.db;
1108 
1109             matL.reset(cvCreateMat( 2*count, 12, CV_64F ));
1110             L = matL->data.db;
1111 
1112             for( i = 0; i < count; i++, L += 24 )
1113             {
1114                 double x = -mn[i].x, y = -mn[i].y;
1115                 L[0] = L[16] = M[i].x;
1116                 L[1] = L[17] = M[i].y;
1117                 L[2] = L[18] = M[i].z;
1118                 L[3] = L[19] = 1.;
1119                 L[4] = L[5] = L[6] = L[7] = 0.;
1120                 L[12] = L[13] = L[14] = L[15] = 0.;
1121                 L[8] = x*M[i].x;
1122                 L[9] = x*M[i].y;
1123                 L[10] = x*M[i].z;
1124                 L[11] = x;
1125                 L[20] = y*M[i].x;
1126                 L[21] = y*M[i].y;
1127                 L[22] = y*M[i].z;
1128                 L[23] = y;
1129             }
1130 
1131             cvMulTransposed( matL, &_LL, 1 );
1132             cvSVD( &_LL, &_LW, 0, &_LV, CV_SVD_MODIFY_A + CV_SVD_V_T );
1133             _RRt = cvMat( 3, 4, CV_64F, LV + 11*12 );
1134             cvGetCols( &_RRt, &_RR, 0, 3 );
1135             cvGetCol( &_RRt, &_tt, 3 );
1136             if( cvDet(&_RR) < 0 )
1137                 cvScale( &_RRt, &_RRt, -1 );
1138             sc = cvNorm(&_RR);
1139             cvSVD( &_RR, &matW, &matU, &matV, CV_SVD_MODIFY_A + CV_SVD_U_T + CV_SVD_V_T );
1140             cvGEMM( &matU, &matV, 1, 0, 0, &matR, CV_GEMM_A_T );
1141             cvScale( &_tt, &_t, cvNorm(&matR)/sc );
1142             cvRodrigues2( &matR, &_r );
1143         }
1144     }
1145 
1146     cvReshape( matM, matM, 3, 1 );
1147     cvReshape( _mn, _mn, 2, 1 );
1148 
1149     // refine extrinsic parameters using iterative algorithm
1150     CvLevMarq solver( 6, count*2, cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,max_iter,FLT_EPSILON), true);
1151     cvCopy( &_param, solver.param );
1152 
1153     for(;;)
1154     {
1155         CvMat *matJ = 0, *_err = 0;
1156         const CvMat *__param = 0;
1157         bool proceed = solver.update( __param, matJ, _err );
1158         cvCopy( __param, &_param );
1159         if( !proceed || !_err )
1160             break;
1161         cvReshape( _err, _err, 2, 1 );
1162         if( matJ )
1163         {
1164             cvGetCols( matJ, &_dpdr, 0, 3 );
1165             cvGetCols( matJ, &_dpdt, 3, 6 );
1166             cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs,
1167                               _err, &_dpdr, &_dpdt, 0, 0, 0 );
1168         }
1169         else
1170         {
1171             cvProjectPoints2( matM, &_r, &_t, &matA, distCoeffs,
1172                               _err, 0, 0, 0, 0, 0 );
1173         }
1174         cvSub(_err, _m, _err);
1175         cvReshape( _err, _err, 1, 2*count );
1176     }
1177     cvCopy( solver.param, &_param );
1178 
1179     _r = cvMat( rvec->rows, rvec->cols,
1180         CV_MAKETYPE(CV_64F,CV_MAT_CN(rvec->type)), param );
1181     _t = cvMat( tvec->rows, tvec->cols,
1182         CV_MAKETYPE(CV_64F,CV_MAT_CN(tvec->type)), param + 3 );
1183 
1184     cvConvert( &_r, rvec );
1185     cvConvert( &_t, tvec );
1186 }
1187 
cvInitIntrinsicParams2D(const CvMat * objectPoints,const CvMat * imagePoints,const CvMat * npoints,CvSize imageSize,CvMat * cameraMatrix,double aspectRatio)1188 CV_IMPL void cvInitIntrinsicParams2D( const CvMat* objectPoints,
1189                          const CvMat* imagePoints, const CvMat* npoints,
1190                          CvSize imageSize, CvMat* cameraMatrix,
1191                          double aspectRatio )
1192 {
1193     Ptr<CvMat> matA, _b, _allH;
1194 
1195     int i, j, pos, nimages, ni = 0;
1196     double a[9] = { 0, 0, 0, 0, 0, 0, 0, 0, 1 };
1197     double H[9] = {0}, f[2] = {0};
1198     CvMat _a = cvMat( 3, 3, CV_64F, a );
1199     CvMat matH = cvMat( 3, 3, CV_64F, H );
1200     CvMat _f = cvMat( 2, 1, CV_64F, f );
1201 
1202     assert( CV_MAT_TYPE(npoints->type) == CV_32SC1 &&
1203             CV_IS_MAT_CONT(npoints->type) );
1204     nimages = npoints->rows + npoints->cols - 1;
1205 
1206     if( (CV_MAT_TYPE(objectPoints->type) != CV_32FC3 &&
1207         CV_MAT_TYPE(objectPoints->type) != CV_64FC3) ||
1208         (CV_MAT_TYPE(imagePoints->type) != CV_32FC2 &&
1209         CV_MAT_TYPE(imagePoints->type) != CV_64FC2) )
1210         CV_Error( CV_StsUnsupportedFormat, "Both object points and image points must be 2D" );
1211 
1212     if( objectPoints->rows != 1 || imagePoints->rows != 1 )
1213         CV_Error( CV_StsBadSize, "object points and image points must be a single-row matrices" );
1214 
1215     matA.reset(cvCreateMat( 2*nimages, 2, CV_64F ));
1216     _b.reset(cvCreateMat( 2*nimages, 1, CV_64F ));
1217     a[2] = (!imageSize.width) ? 0.5 : (imageSize.width - 1)*0.5;
1218     a[5] = (!imageSize.height) ? 0.5 : (imageSize.height - 1)*0.5;
1219     _allH.reset(cvCreateMat( nimages, 9, CV_64F ));
1220 
1221     // extract vanishing points in order to obtain initial value for the focal length
1222     for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1223     {
1224         double* Ap = matA->data.db + i*4;
1225         double* bp = _b->data.db + i*2;
1226         ni = npoints->data.i[i];
1227         double h[3], v[3], d1[3], d2[3];
1228         double n[4] = {0,0,0,0};
1229         CvMat _m, matM;
1230         cvGetCols( objectPoints, &matM, pos, pos + ni );
1231         cvGetCols( imagePoints, &_m, pos, pos + ni );
1232 
1233         cvFindHomography( &matM, &_m, &matH );
1234         memcpy( _allH->data.db + i*9, H, sizeof(H) );
1235 
1236         H[0] -= H[6]*a[2]; H[1] -= H[7]*a[2]; H[2] -= H[8]*a[2];
1237         H[3] -= H[6]*a[5]; H[4] -= H[7]*a[5]; H[5] -= H[8]*a[5];
1238 
1239         for( j = 0; j < 3; j++ )
1240         {
1241             double t0 = H[j*3], t1 = H[j*3+1];
1242             h[j] = t0; v[j] = t1;
1243             d1[j] = (t0 + t1)*0.5;
1244             d2[j] = (t0 - t1)*0.5;
1245             n[0] += t0*t0; n[1] += t1*t1;
1246             n[2] += d1[j]*d1[j]; n[3] += d2[j]*d2[j];
1247         }
1248 
1249         for( j = 0; j < 4; j++ )
1250             n[j] = 1./std::sqrt(n[j]);
1251 
1252         for( j = 0; j < 3; j++ )
1253         {
1254             h[j] *= n[0]; v[j] *= n[1];
1255             d1[j] *= n[2]; d2[j] *= n[3];
1256         }
1257 
1258         Ap[0] = h[0]*v[0]; Ap[1] = h[1]*v[1];
1259         Ap[2] = d1[0]*d2[0]; Ap[3] = d1[1]*d2[1];
1260         bp[0] = -h[2]*v[2]; bp[1] = -d1[2]*d2[2];
1261     }
1262 
1263     cvSolve( matA, _b, &_f, CV_NORMAL + CV_SVD );
1264     a[0] = std::sqrt(fabs(1./f[0]));
1265     a[4] = std::sqrt(fabs(1./f[1]));
1266     if( aspectRatio != 0 )
1267     {
1268         double tf = (a[0] + a[4])/(aspectRatio + 1.);
1269         a[0] = aspectRatio*tf;
1270         a[4] = tf;
1271     }
1272 
1273     cvConvert( &_a, cameraMatrix );
1274 }
1275 
subMatrix(const cv::Mat & src,cv::Mat & dst,const std::vector<uchar> & cols,const std::vector<uchar> & rows)1276 static void subMatrix(const cv::Mat& src, cv::Mat& dst, const std::vector<uchar>& cols,
1277                       const std::vector<uchar>& rows) {
1278     int nonzeros_cols = cv::countNonZero(cols);
1279     cv::Mat tmp(src.rows, nonzeros_cols, CV_64FC1);
1280 
1281     for (int i = 0, j = 0; i < (int)cols.size(); i++)
1282     {
1283         if (cols[i])
1284         {
1285             src.col(i).copyTo(tmp.col(j++));
1286         }
1287     }
1288 
1289     int nonzeros_rows  = cv::countNonZero(rows);
1290     dst.create(nonzeros_rows, nonzeros_cols, CV_64FC1);
1291     for (int i = 0, j = 0; i < (int)rows.size(); i++)
1292     {
1293         if (rows[i])
1294         {
1295             tmp.row(i).copyTo(dst.row(j++));
1296         }
1297     }
1298 }
1299 
cvCalibrateCamera2Internal(const CvMat * objectPoints,const CvMat * imagePoints,const CvMat * npoints,CvSize imageSize,CvMat * cameraMatrix,CvMat * distCoeffs,CvMat * rvecs,CvMat * tvecs,CvMat * stdDevs,CvMat * perViewErrors,int flags,CvTermCriteria termCrit)1300 static double cvCalibrateCamera2Internal( const CvMat* objectPoints,
1301                     const CvMat* imagePoints, const CvMat* npoints,
1302                     CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs,
1303                     CvMat* rvecs, CvMat* tvecs, CvMat* stdDevs,
1304                     CvMat* perViewErrors, int flags, CvTermCriteria termCrit )
1305 {
1306     const int NINTRINSIC = CV_CALIB_NINTRINSIC;
1307     double reprojErr = 0;
1308 
1309     Matx33d A;
1310     double k[14] = {0};
1311     CvMat matA = cvMat(3, 3, CV_64F, A.val), _k;
1312     int i, nimages, maxPoints = 0, ni = 0, pos, total = 0, nparams, npstep, cn;
1313     double aspectRatio = 0.;
1314 
1315     // 0. check the parameters & allocate buffers
1316     if( !CV_IS_MAT(objectPoints) || !CV_IS_MAT(imagePoints) ||
1317         !CV_IS_MAT(npoints) || !CV_IS_MAT(cameraMatrix) || !CV_IS_MAT(distCoeffs) )
1318         CV_Error( CV_StsBadArg, "One of required vector arguments is not a valid matrix" );
1319 
1320     if( imageSize.width <= 0 || imageSize.height <= 0 )
1321         CV_Error( CV_StsOutOfRange, "image width and height must be positive" );
1322 
1323     if( CV_MAT_TYPE(npoints->type) != CV_32SC1 ||
1324         (npoints->rows != 1 && npoints->cols != 1) )
1325         CV_Error( CV_StsUnsupportedFormat,
1326             "the array of point counters must be 1-dimensional integer vector" );
1327     if(flags & CALIB_TILTED_MODEL)
1328     {
1329         //when the tilted sensor model is used the distortion coefficients matrix must have 14 parameters
1330         if (distCoeffs->cols*distCoeffs->rows != 14)
1331             CV_Error( CV_StsBadArg, "The tilted sensor model must have 14 parameters in the distortion matrix" );
1332     }
1333     else
1334     {
1335         //when the thin prism model is used the distortion coefficients matrix must have 12 parameters
1336         if(flags & CALIB_THIN_PRISM_MODEL)
1337             if (distCoeffs->cols*distCoeffs->rows != 12)
1338                 CV_Error( CV_StsBadArg, "Thin prism model must have 12 parameters in the distortion matrix" );
1339     }
1340 
1341     nimages = npoints->rows*npoints->cols;
1342     npstep = npoints->rows == 1 ? 1 : npoints->step/CV_ELEM_SIZE(npoints->type);
1343 
1344     if( rvecs )
1345     {
1346         cn = CV_MAT_CN(rvecs->type);
1347         if( !CV_IS_MAT(rvecs) ||
1348             (CV_MAT_DEPTH(rvecs->type) != CV_32F && CV_MAT_DEPTH(rvecs->type) != CV_64F) ||
1349             ((rvecs->rows != nimages || (rvecs->cols*cn != 3 && rvecs->cols*cn != 9)) &&
1350             (rvecs->rows != 1 || rvecs->cols != nimages || cn != 3)) )
1351             CV_Error( CV_StsBadArg, "the output array of rotation vectors must be 3-channel "
1352                 "1xn or nx1 array or 1-channel nx3 or nx9 array, where n is the number of views" );
1353     }
1354 
1355     if( tvecs )
1356     {
1357         cn = CV_MAT_CN(tvecs->type);
1358         if( !CV_IS_MAT(tvecs) ||
1359             (CV_MAT_DEPTH(tvecs->type) != CV_32F && CV_MAT_DEPTH(tvecs->type) != CV_64F) ||
1360             ((tvecs->rows != nimages || tvecs->cols*cn != 3) &&
1361             (tvecs->rows != 1 || tvecs->cols != nimages || cn != 3)) )
1362             CV_Error( CV_StsBadArg, "the output array of translation vectors must be 3-channel "
1363                 "1xn or nx1 array or 1-channel nx3 array, where n is the number of views" );
1364     }
1365 
1366     if( stdDevs )
1367     {
1368         cn = CV_MAT_CN(stdDevs->type);
1369         if( !CV_IS_MAT(stdDevs) ||
1370             (CV_MAT_DEPTH(stdDevs->type) != CV_32F && CV_MAT_DEPTH(stdDevs->type) != CV_64F) ||
1371             ((stdDevs->rows != (nimages*6 + NINTRINSIC) || stdDevs->cols*cn != 1) &&
1372             (stdDevs->rows != 1 || stdDevs->cols != (nimages*6 + NINTRINSIC) || cn != 1)) )
1373 #define STR__(x) #x
1374 #define STR_(x) STR__(x)
1375             CV_Error( CV_StsBadArg, "the output array of standard deviations vectors must be 1-channel "
1376                 "1x(n*6 + NINTRINSIC) or (n*6 + NINTRINSIC)x1 array, where n is the number of views,"
1377                 " NINTRINSIC = " STR_(CV_CALIB_NINTRINSIC));
1378     }
1379 
1380     if( (CV_MAT_TYPE(cameraMatrix->type) != CV_32FC1 &&
1381         CV_MAT_TYPE(cameraMatrix->type) != CV_64FC1) ||
1382         cameraMatrix->rows != 3 || cameraMatrix->cols != 3 )
1383         CV_Error( CV_StsBadArg,
1384             "Intrinsic parameters must be 3x3 floating-point matrix" );
1385 
1386     if( (CV_MAT_TYPE(distCoeffs->type) != CV_32FC1 &&
1387         CV_MAT_TYPE(distCoeffs->type) != CV_64FC1) ||
1388         (distCoeffs->cols != 1 && distCoeffs->rows != 1) ||
1389         (distCoeffs->cols*distCoeffs->rows != 4 &&
1390         distCoeffs->cols*distCoeffs->rows != 5 &&
1391         distCoeffs->cols*distCoeffs->rows != 8 &&
1392         distCoeffs->cols*distCoeffs->rows != 12 &&
1393         distCoeffs->cols*distCoeffs->rows != 14) )
1394         CV_Error( CV_StsBadArg, cvDistCoeffErr );
1395 
1396     for( i = 0; i < nimages; i++ )
1397     {
1398         ni = npoints->data.i[i*npstep];
1399         if( ni < 4 )
1400         {
1401             CV_Error_( CV_StsOutOfRange, ("The number of points in the view #%d is < 4", i));
1402         }
1403         maxPoints = MAX( maxPoints, ni );
1404         total += ni;
1405     }
1406 
1407     Mat matM( 1, total, CV_64FC3 );
1408     Mat _m( 1, total, CV_64FC2 );
1409     Mat allErrors(1, total, CV_64FC2);
1410 
1411     if(CV_MAT_CN(objectPoints->type) == 3) {
1412         cvarrToMat(objectPoints).convertTo(matM, CV_64F);
1413     } else {
1414         convertPointsHomogeneous(cvarrToMat(objectPoints), matM);
1415     }
1416 
1417     if(CV_MAT_CN(imagePoints->type) == 2) {
1418         cvarrToMat(imagePoints).convertTo(_m, CV_64F);
1419     } else {
1420         convertPointsHomogeneous(cvarrToMat(imagePoints), _m);
1421     }
1422 
1423     nparams = NINTRINSIC + nimages*6;
1424     Mat _Ji( maxPoints*2, NINTRINSIC, CV_64FC1, Scalar(0));
1425     Mat _Je( maxPoints*2, 6, CV_64FC1 );
1426     Mat _err( maxPoints*2, 1, CV_64FC1 );
1427 
1428     _k = cvMat( distCoeffs->rows, distCoeffs->cols, CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), k);
1429     if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 8 )
1430     {
1431         if( distCoeffs->rows*distCoeffs->cols*CV_MAT_CN(distCoeffs->type) < 5 )
1432             flags |= CALIB_FIX_K3;
1433         flags |= CALIB_FIX_K4 | CALIB_FIX_K5 | CALIB_FIX_K6;
1434     }
1435     const double minValidAspectRatio = 0.01;
1436     const double maxValidAspectRatio = 100.0;
1437 
1438     // 1. initialize intrinsic parameters & LM solver
1439     if( flags & CALIB_USE_INTRINSIC_GUESS )
1440     {
1441         cvConvert( cameraMatrix, &matA );
1442         if( A(0, 0) <= 0 || A(1, 1) <= 0 )
1443             CV_Error( CV_StsOutOfRange, "Focal length (fx and fy) must be positive" );
1444         if( A(0, 2) < 0 || A(0, 2) >= imageSize.width ||
1445             A(1, 2) < 0 || A(1, 2) >= imageSize.height )
1446             CV_Error( CV_StsOutOfRange, "Principal point must be within the image" );
1447         if( fabs(A(0, 1)) > 1e-5 )
1448             CV_Error( CV_StsOutOfRange, "Non-zero skew is not supported by the function" );
1449         if( fabs(A(1, 0)) > 1e-5 || fabs(A(2, 0)) > 1e-5 ||
1450             fabs(A(2, 1)) > 1e-5 || fabs(A(2,2)-1) > 1e-5 )
1451             CV_Error( CV_StsOutOfRange,
1452                 "The intrinsic matrix must have [fx 0 cx; 0 fy cy; 0 0 1] shape" );
1453         A(0, 1) = A(1, 0) = A(2, 0) = A(2, 1) = 0.;
1454         A(2, 2) = 1.;
1455 
1456         if( flags & CALIB_FIX_ASPECT_RATIO )
1457         {
1458             aspectRatio = A(0, 0)/A(1, 1);
1459 
1460             if( aspectRatio < minValidAspectRatio || aspectRatio > maxValidAspectRatio )
1461                 CV_Error( CV_StsOutOfRange,
1462                     "The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
1463         }
1464         cvConvert( distCoeffs, &_k );
1465     }
1466     else
1467     {
1468         Scalar mean, sdv;
1469         meanStdDev(matM, mean, sdv);
1470         if( fabs(mean[2]) > 1e-5 || fabs(sdv[2]) > 1e-5 )
1471             CV_Error( CV_StsBadArg,
1472             "For non-planar calibration rigs the initial intrinsic matrix must be specified" );
1473         for( i = 0; i < total; i++ )
1474             matM.at<Point3d>(i).z = 0.;
1475 
1476         if( flags & CALIB_FIX_ASPECT_RATIO )
1477         {
1478             aspectRatio = cvmGet(cameraMatrix,0,0);
1479             aspectRatio /= cvmGet(cameraMatrix,1,1);
1480             if( aspectRatio < minValidAspectRatio || aspectRatio > maxValidAspectRatio )
1481                 CV_Error( CV_StsOutOfRange,
1482                     "The specified aspect ratio (= cameraMatrix[0][0] / cameraMatrix[1][1]) is incorrect" );
1483         }
1484         CvMat _matM = cvMat(matM), m = cvMat(_m);
1485         cvInitIntrinsicParams2D( &_matM, &m, npoints, imageSize, &matA, aspectRatio );
1486     }
1487 
1488     CvLevMarq solver( nparams, 0, termCrit );
1489 
1490     if(flags & CALIB_USE_LU) {
1491         solver.solveMethod = DECOMP_LU;
1492     }
1493     else if(flags & CALIB_USE_QR) {
1494         solver.solveMethod = DECOMP_QR;
1495     }
1496 
1497     {
1498     double* param = solver.param->data.db;
1499     uchar* mask = solver.mask->data.ptr;
1500 
1501     param[0] = A(0, 0); param[1] = A(1, 1); param[2] = A(0, 2); param[3] = A(1, 2);
1502     std::copy(k, k + 14, param + 4);
1503 
1504     if(flags & CALIB_FIX_ASPECT_RATIO)
1505         mask[0] = 0;
1506     if( flags & CALIB_FIX_FOCAL_LENGTH )
1507         mask[0] = mask[1] = 0;
1508     if( flags & CALIB_FIX_PRINCIPAL_POINT )
1509         mask[2] = mask[3] = 0;
1510     if( flags & CALIB_ZERO_TANGENT_DIST )
1511     {
1512         param[6] = param[7] = 0;
1513         mask[6] = mask[7] = 0;
1514     }
1515     if( !(flags & CALIB_RATIONAL_MODEL) )
1516         flags |= CALIB_FIX_K4 + CALIB_FIX_K5 + CALIB_FIX_K6;
1517     if( !(flags & CALIB_THIN_PRISM_MODEL))
1518         flags |= CALIB_FIX_S1_S2_S3_S4;
1519     if( !(flags & CALIB_TILTED_MODEL))
1520         flags |= CALIB_FIX_TAUX_TAUY;
1521 
1522     mask[ 4] = !(flags & CALIB_FIX_K1);
1523     mask[ 5] = !(flags & CALIB_FIX_K2);
1524     if( flags & CALIB_FIX_TANGENT_DIST )
1525     {
1526       mask[6]  = mask[7]  = 0;
1527     }
1528     mask[ 8] = !(flags & CALIB_FIX_K3);
1529     mask[ 9] = !(flags & CALIB_FIX_K4);
1530     mask[10] = !(flags & CALIB_FIX_K5);
1531     mask[11] = !(flags & CALIB_FIX_K6);
1532 
1533     if(flags & CALIB_FIX_S1_S2_S3_S4)
1534     {
1535         mask[12] = 0;
1536         mask[13] = 0;
1537         mask[14] = 0;
1538         mask[15] = 0;
1539     }
1540     if(flags & CALIB_FIX_TAUX_TAUY)
1541     {
1542         mask[16] = 0;
1543         mask[17] = 0;
1544     }
1545     }
1546 
1547     // 2. initialize extrinsic parameters
1548     for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1549     {
1550         CvMat _ri, _ti;
1551         ni = npoints->data.i[i*npstep];
1552 
1553         cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
1554         cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
1555 
1556         CvMat _Mi = cvMat(matM.colRange(pos, pos + ni));
1557         CvMat _mi = cvMat(_m.colRange(pos, pos + ni));
1558 
1559         cvFindExtrinsicCameraParams2( &_Mi, &_mi, &matA, &_k, &_ri, &_ti );
1560     }
1561 
1562     // 3. run the optimization
1563     for(;;)
1564     {
1565         const CvMat* _param = 0;
1566         CvMat *_JtJ = 0, *_JtErr = 0;
1567         double* _errNorm = 0;
1568         bool proceed = solver.updateAlt( _param, _JtJ, _JtErr, _errNorm );
1569         double *param = solver.param->data.db, *pparam = solver.prevParam->data.db;
1570         bool calcJ = solver.state == CvLevMarq::CALC_J || (!proceed && stdDevs);
1571 
1572         if( flags & CALIB_FIX_ASPECT_RATIO )
1573         {
1574             param[0] = param[1]*aspectRatio;
1575             pparam[0] = pparam[1]*aspectRatio;
1576         }
1577 
1578         A(0, 0) = param[0]; A(1, 1) = param[1]; A(0, 2) = param[2]; A(1, 2) = param[3];
1579         std::copy(param + 4, param + 4 + 14, k);
1580 
1581         if ( !proceed && !stdDevs && !perViewErrors )
1582             break;
1583         else if ( !proceed && stdDevs )
1584             cvZero(_JtJ);
1585 
1586         reprojErr = 0;
1587 
1588         for( i = 0, pos = 0; i < nimages; i++, pos += ni )
1589         {
1590             CvMat _ri, _ti;
1591             ni = npoints->data.i[i*npstep];
1592 
1593             cvGetRows( solver.param, &_ri, NINTRINSIC + i*6, NINTRINSIC + i*6 + 3 );
1594             cvGetRows( solver.param, &_ti, NINTRINSIC + i*6 + 3, NINTRINSIC + i*6 + 6 );
1595 
1596             CvMat _Mi = cvMat(matM.colRange(pos, pos + ni));
1597             CvMat _mi = cvMat(_m.colRange(pos, pos + ni));
1598             CvMat _me = cvMat(allErrors.colRange(pos, pos + ni));
1599 
1600             _Je.resize(ni*2); _Ji.resize(ni*2); _err.resize(ni*2);
1601             CvMat _dpdr = cvMat(_Je.colRange(0, 3));
1602             CvMat _dpdt = cvMat(_Je.colRange(3, 6));
1603             CvMat _dpdf = cvMat(_Ji.colRange(0, 2));
1604             CvMat _dpdc = cvMat(_Ji.colRange(2, 4));
1605             CvMat _dpdk = cvMat(_Ji.colRange(4, NINTRINSIC));
1606             CvMat _mp = cvMat(_err.reshape(2, 1));
1607 
1608             if( calcJ )
1609             {
1610                  cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp, &_dpdr, &_dpdt,
1611                                   (flags & CALIB_FIX_FOCAL_LENGTH) ? 0 : &_dpdf,
1612                                   (flags & CALIB_FIX_PRINCIPAL_POINT) ? 0 : &_dpdc, &_dpdk,
1613                                   (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio : 0);
1614             }
1615             else
1616                 cvProjectPoints2( &_Mi, &_ri, &_ti, &matA, &_k, &_mp );
1617 
1618             cvSub( &_mp, &_mi, &_mp );
1619             if (perViewErrors || stdDevs)
1620                 cvCopy(&_mp, &_me);
1621 
1622             if( calcJ )
1623             {
1624                 Mat JtJ(cvarrToMat(_JtJ)), JtErr(cvarrToMat(_JtErr));
1625 
1626                 // see HZ: (A6.14) for details on the structure of the Jacobian
1627                 JtJ(Rect(0, 0, NINTRINSIC, NINTRINSIC)) += _Ji.t() * _Ji;
1628                 JtJ(Rect(NINTRINSIC + i * 6, NINTRINSIC + i * 6, 6, 6)) = _Je.t() * _Je;
1629                 JtJ(Rect(NINTRINSIC + i * 6, 0, 6, NINTRINSIC)) = _Ji.t() * _Je;
1630 
1631                 JtErr.rowRange(0, NINTRINSIC) += _Ji.t() * _err;
1632                 JtErr.rowRange(NINTRINSIC + i * 6, NINTRINSIC + (i + 1) * 6) = _Je.t() * _err;
1633             }
1634 
1635             double viewErr = norm(_err, NORM_L2SQR);
1636 
1637             if( perViewErrors )
1638                 perViewErrors->data.db[i] = std::sqrt(viewErr / ni);
1639 
1640             reprojErr += viewErr;
1641         }
1642         if( _errNorm )
1643             *_errNorm = reprojErr;
1644 
1645         if( !proceed )
1646         {
1647             if( stdDevs )
1648             {
1649                 Mat mask = cvarrToMat(solver.mask);
1650                 int nparams_nz = countNonZero(mask);
1651                 Mat JtJinv, JtJN;
1652                 JtJN.create(nparams_nz, nparams_nz, CV_64F);
1653                 subMatrix(cvarrToMat(_JtJ), JtJN, mask, mask);
1654                 completeSymm(JtJN, false);
1655                 cv::invert(JtJN, JtJinv, DECOMP_SVD);
1656                 //sigma2 is deviation of the noise
1657                 //see any papers about variance of the least squares estimator for
1658                 //detailed description of the variance estimation methods
1659                 double sigma2 = norm(allErrors, NORM_L2SQR) / (total - nparams_nz);
1660                 Mat stdDevsM = cvarrToMat(stdDevs);
1661                 int j = 0;
1662                 for ( int s = 0; s < nparams; s++ )
1663                     if( mask.data[s] )
1664                     {
1665                         stdDevsM.at<double>(s) = std::sqrt(JtJinv.at<double>(j,j) * sigma2);
1666                         j++;
1667                     }
1668                     else
1669                         stdDevsM.at<double>(s) = 0.;
1670             }
1671             break;
1672         }
1673     }
1674 
1675     // 4. store the results
1676     cvConvert( &matA, cameraMatrix );
1677     cvConvert( &_k, distCoeffs );
1678 
1679     for( i = 0, pos = 0; i < nimages; i++ )
1680     {
1681         CvMat src, dst;
1682 
1683         if( rvecs )
1684         {
1685             src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 );
1686             if( rvecs->rows == nimages && rvecs->cols*CV_MAT_CN(rvecs->type) == 9 )
1687             {
1688                 dst = cvMat( 3, 3, CV_MAT_DEPTH(rvecs->type),
1689                     rvecs->data.ptr + rvecs->step*i );
1690                 cvRodrigues2( &src, &matA );
1691                 cvConvert( &matA, &dst );
1692             }
1693             else
1694             {
1695                 dst = cvMat( 3, 1, CV_MAT_DEPTH(rvecs->type), rvecs->rows == 1 ?
1696                     rvecs->data.ptr + i*CV_ELEM_SIZE(rvecs->type) :
1697                     rvecs->data.ptr + rvecs->step*i );
1698                 cvConvert( &src, &dst );
1699             }
1700         }
1701         if( tvecs )
1702         {
1703             src = cvMat( 3, 1, CV_64F, solver.param->data.db + NINTRINSIC + i*6 + 3 );
1704             dst = cvMat( 3, 1, CV_MAT_DEPTH(tvecs->type), tvecs->rows == 1 ?
1705                     tvecs->data.ptr + i*CV_ELEM_SIZE(tvecs->type) :
1706                     tvecs->data.ptr + tvecs->step*i );
1707             cvConvert( &src, &dst );
1708          }
1709     }
1710 
1711     return std::sqrt(reprojErr/total);
1712 }
1713 
1714 
1715 /* finds intrinsic and extrinsic camera parameters
1716    from a few views of known calibration pattern */
cvCalibrateCamera2(const CvMat * objectPoints,const CvMat * imagePoints,const CvMat * npoints,CvSize imageSize,CvMat * cameraMatrix,CvMat * distCoeffs,CvMat * rvecs,CvMat * tvecs,int flags,CvTermCriteria termCrit)1717 CV_IMPL double cvCalibrateCamera2( const CvMat* objectPoints,
1718                     const CvMat* imagePoints, const CvMat* npoints,
1719                     CvSize imageSize, CvMat* cameraMatrix, CvMat* distCoeffs,
1720                     CvMat* rvecs, CvMat* tvecs, int flags, CvTermCriteria termCrit )
1721 {
1722     return cvCalibrateCamera2Internal(objectPoints, imagePoints, npoints, imageSize, cameraMatrix,
1723                                       distCoeffs, rvecs, tvecs, NULL, NULL, flags, termCrit);
1724 }
1725 
cvCalibrationMatrixValues(const CvMat * calibMatr,CvSize imgSize,double apertureWidth,double apertureHeight,double * fovx,double * fovy,double * focalLength,CvPoint2D64f * principalPoint,double * pasp)1726 void cvCalibrationMatrixValues( const CvMat *calibMatr, CvSize imgSize,
1727     double apertureWidth, double apertureHeight, double *fovx, double *fovy,
1728     double *focalLength, CvPoint2D64f *principalPoint, double *pasp )
1729 {
1730     /* Validate parameters. */
1731     if(calibMatr == 0)
1732         CV_Error(CV_StsNullPtr, "Some of parameters is a NULL pointer!");
1733 
1734     if(!CV_IS_MAT(calibMatr))
1735         CV_Error(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");
1736 
1737     double dummy = .0;
1738     Point2d pp;
1739     cv::calibrationMatrixValues(cvarrToMat(calibMatr), imgSize, apertureWidth, apertureHeight,
1740             fovx ? *fovx : dummy,
1741             fovy ? *fovy : dummy,
1742             focalLength ? *focalLength : dummy,
1743             pp,
1744             pasp ? *pasp : dummy);
1745 
1746     if(principalPoint)
1747         *principalPoint = cvPoint2D64f(pp.x, pp.y);
1748 }
1749 
1750 
1751 //////////////////////////////// Stereo Calibration ///////////////////////////////////
1752 
dbCmp(const void * _a,const void * _b)1753 static int dbCmp( const void* _a, const void* _b )
1754 {
1755     double a = *(const double*)_a;
1756     double b = *(const double*)_b;
1757 
1758     return (a > b) - (a < b);
1759 }
1760 
cvStereoCalibrateImpl(const CvMat * _objectPoints,const CvMat * _imagePoints1,const CvMat * _imagePoints2,const CvMat * _npoints,CvMat * _cameraMatrix1,CvMat * _distCoeffs1,CvMat * _cameraMatrix2,CvMat * _distCoeffs2,CvSize imageSize,CvMat * matR,CvMat * matT,CvMat * matE,CvMat * matF,CvMat * perViewErr,int flags,CvTermCriteria termCrit)1761 static double cvStereoCalibrateImpl( const CvMat* _objectPoints, const CvMat* _imagePoints1,
1762                         const CvMat* _imagePoints2, const CvMat* _npoints,
1763                         CvMat* _cameraMatrix1, CvMat* _distCoeffs1,
1764                         CvMat* _cameraMatrix2, CvMat* _distCoeffs2,
1765                         CvSize imageSize, CvMat* matR, CvMat* matT,
1766                         CvMat* matE, CvMat* matF,
1767                         CvMat* perViewErr, int flags,
1768                         CvTermCriteria termCrit )
1769 {
1770     const int NINTRINSIC = 18;
1771     Ptr<CvMat> npoints, imagePoints[2], objectPoints, RT0;
1772     double reprojErr = 0;
1773 
1774     double A[2][9], dk[2][14]={{0}}, rlr[9];
1775     CvMat K[2], Dist[2], om_LR, T_LR;
1776     CvMat R_LR = cvMat(3, 3, CV_64F, rlr);
1777     int i, k, p, ni = 0, ofs, nimages, pointsTotal, maxPoints = 0;
1778     int nparams;
1779     bool recomputeIntrinsics = false;
1780     double aspectRatio[2] = {0};
1781 
1782     CV_Assert( CV_IS_MAT(_imagePoints1) && CV_IS_MAT(_imagePoints2) &&
1783                CV_IS_MAT(_objectPoints) && CV_IS_MAT(_npoints) &&
1784                CV_IS_MAT(matR) && CV_IS_MAT(matT) );
1785 
1786     CV_Assert( CV_ARE_TYPES_EQ(_imagePoints1, _imagePoints2) &&
1787                CV_ARE_DEPTHS_EQ(_imagePoints1, _objectPoints) );
1788 
1789     CV_Assert( (_npoints->cols == 1 || _npoints->rows == 1) &&
1790                CV_MAT_TYPE(_npoints->type) == CV_32SC1 );
1791 
1792     nimages = _npoints->cols + _npoints->rows - 1;
1793     npoints.reset(cvCreateMat( _npoints->rows, _npoints->cols, _npoints->type ));
1794     cvCopy( _npoints, npoints );
1795 
1796     for( i = 0, pointsTotal = 0; i < nimages; i++ )
1797     {
1798         maxPoints = MAX(maxPoints, npoints->data.i[i]);
1799         pointsTotal += npoints->data.i[i];
1800     }
1801 
1802     objectPoints.reset(cvCreateMat( _objectPoints->rows, _objectPoints->cols,
1803                                     CV_64FC(CV_MAT_CN(_objectPoints->type))));
1804     cvConvert( _objectPoints, objectPoints );
1805     cvReshape( objectPoints, objectPoints, 3, 1 );
1806 
1807     for( k = 0; k < 2; k++ )
1808     {
1809         const CvMat* points = k == 0 ? _imagePoints1 : _imagePoints2;
1810         const CvMat* cameraMatrix = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
1811         const CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
1812 
1813         int cn = CV_MAT_CN(_imagePoints1->type);
1814         CV_Assert( (CV_MAT_DEPTH(_imagePoints1->type) == CV_32F ||
1815                 CV_MAT_DEPTH(_imagePoints1->type) == CV_64F) &&
1816                ((_imagePoints1->rows == pointsTotal && _imagePoints1->cols*cn == 2) ||
1817                 (_imagePoints1->rows == 1 && _imagePoints1->cols == pointsTotal && cn == 2)) );
1818 
1819         K[k] = cvMat(3,3,CV_64F,A[k]);
1820         Dist[k] = cvMat(1,14,CV_64F,dk[k]);
1821 
1822         imagePoints[k].reset(cvCreateMat( points->rows, points->cols, CV_64FC(CV_MAT_CN(points->type))));
1823         cvConvert( points, imagePoints[k] );
1824         cvReshape( imagePoints[k], imagePoints[k], 2, 1 );
1825 
1826         if( flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS|
1827             CALIB_FIX_ASPECT_RATIO|CALIB_FIX_FOCAL_LENGTH) )
1828             cvConvert( cameraMatrix, &K[k] );
1829 
1830         if( flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS|
1831             CALIB_FIX_K1|CALIB_FIX_K2|CALIB_FIX_K3|CALIB_FIX_K4|CALIB_FIX_K5|CALIB_FIX_K6|CALIB_FIX_TANGENT_DIST) )
1832         {
1833             CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
1834                 CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
1835             cvConvert( distCoeffs, &tdist );
1836         }
1837 
1838         if( !(flags & (CALIB_FIX_INTRINSIC|CALIB_USE_INTRINSIC_GUESS)))
1839         {
1840             cvCalibrateCamera2( objectPoints, imagePoints[k],
1841                 npoints, imageSize, &K[k], &Dist[k], NULL, NULL, flags );
1842         }
1843     }
1844 
1845     if( flags & CALIB_SAME_FOCAL_LENGTH )
1846     {
1847         static const int avg_idx[] = { 0, 4, 2, 5, -1 };
1848         for( k = 0; avg_idx[k] >= 0; k++ )
1849             A[0][avg_idx[k]] = A[1][avg_idx[k]] = (A[0][avg_idx[k]] + A[1][avg_idx[k]])*0.5;
1850     }
1851 
1852     if( flags & CALIB_FIX_ASPECT_RATIO )
1853     {
1854         for( k = 0; k < 2; k++ )
1855             aspectRatio[k] = A[k][0]/A[k][4];
1856     }
1857 
1858     recomputeIntrinsics = (flags & CALIB_FIX_INTRINSIC) == 0;
1859 
1860     Mat err( maxPoints*2, 1, CV_64F );
1861     Mat Je( maxPoints*2, 6, CV_64F );
1862     Mat J_LR( maxPoints*2, 6, CV_64F );
1863     Mat Ji( maxPoints*2, NINTRINSIC, CV_64F, Scalar(0) );
1864 
1865     // we optimize for the inter-camera R(3),t(3), then, optionally,
1866     // for intrinisic parameters of each camera ((fx,fy,cx,cy,k1,k2,p1,p2) ~ 8 parameters).
1867     nparams = 6*(nimages+1) + (recomputeIntrinsics ? NINTRINSIC*2 : 0);
1868 
1869     CvLevMarq solver( nparams, 0, termCrit );
1870 
1871     if(flags & CALIB_USE_LU) {
1872         solver.solveMethod = DECOMP_LU;
1873     }
1874 
1875     if( recomputeIntrinsics )
1876     {
1877         uchar* imask = solver.mask->data.ptr + nparams - NINTRINSIC*2;
1878         if( !(flags & CALIB_RATIONAL_MODEL) )
1879             flags |= CALIB_FIX_K4 | CALIB_FIX_K5 | CALIB_FIX_K6;
1880         if( !(flags & CALIB_THIN_PRISM_MODEL) )
1881             flags |= CALIB_FIX_S1_S2_S3_S4;
1882         if( !(flags & CALIB_TILTED_MODEL) )
1883             flags |= CALIB_FIX_TAUX_TAUY;
1884         if( flags & CALIB_FIX_ASPECT_RATIO )
1885             imask[0] = imask[NINTRINSIC] = 0;
1886         if( flags & CALIB_FIX_FOCAL_LENGTH )
1887             imask[0] = imask[1] = imask[NINTRINSIC] = imask[NINTRINSIC+1] = 0;
1888         if( flags & CALIB_FIX_PRINCIPAL_POINT )
1889             imask[2] = imask[3] = imask[NINTRINSIC+2] = imask[NINTRINSIC+3] = 0;
1890         if( flags & (CALIB_ZERO_TANGENT_DIST|CALIB_FIX_TANGENT_DIST) )
1891             imask[6] = imask[7] = imask[NINTRINSIC+6] = imask[NINTRINSIC+7] = 0;
1892         if( flags & CALIB_FIX_K1 )
1893             imask[4] = imask[NINTRINSIC+4] = 0;
1894         if( flags & CALIB_FIX_K2 )
1895             imask[5] = imask[NINTRINSIC+5] = 0;
1896         if( flags & CALIB_FIX_K3 )
1897             imask[8] = imask[NINTRINSIC+8] = 0;
1898         if( flags & CALIB_FIX_K4 )
1899             imask[9] = imask[NINTRINSIC+9] = 0;
1900         if( flags & CALIB_FIX_K5 )
1901             imask[10] = imask[NINTRINSIC+10] = 0;
1902         if( flags & CALIB_FIX_K6 )
1903             imask[11] = imask[NINTRINSIC+11] = 0;
1904         if( flags & CALIB_FIX_S1_S2_S3_S4 )
1905         {
1906             imask[12] = imask[NINTRINSIC+12] = 0;
1907             imask[13] = imask[NINTRINSIC+13] = 0;
1908             imask[14] = imask[NINTRINSIC+14] = 0;
1909             imask[15] = imask[NINTRINSIC+15] = 0;
1910         }
1911         if( flags & CALIB_FIX_TAUX_TAUY )
1912         {
1913             imask[16] = imask[NINTRINSIC+16] = 0;
1914             imask[17] = imask[NINTRINSIC+17] = 0;
1915         }
1916     }
1917 
1918     // storage for initial [om(R){i}|t{i}] (in order to compute the median for each component)
1919     RT0.reset(cvCreateMat( 6, nimages, CV_64F ));
1920     /*
1921        Compute initial estimate of pose
1922        For each image, compute:
1923           R(om) is the rotation matrix of om
1924           om(R) is the rotation vector of R
1925           R_ref = R(om_right) * R(om_left)'
1926           T_ref_list = [T_ref_list; T_right - R_ref * T_left]
1927           om_ref_list = {om_ref_list; om(R_ref)]
1928        om = median(om_ref_list)
1929        T = median(T_ref_list)
1930     */
1931     for( i = ofs = 0; i < nimages; ofs += ni, i++ )
1932     {
1933         ni = npoints->data.i[i];
1934         CvMat objpt_i;
1935         double _om[2][3], r[2][9], t[2][3];
1936         CvMat om[2], R[2], T[2], imgpt_i[2];
1937 
1938         objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
1939         for( k = 0; k < 2; k++ )
1940         {
1941             imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
1942             om[k] = cvMat(3, 1, CV_64F, _om[k]);
1943             R[k] = cvMat(3, 3, CV_64F, r[k]);
1944             T[k] = cvMat(3, 1, CV_64F, t[k]);
1945 
1946             cvFindExtrinsicCameraParams2( &objpt_i, &imgpt_i[k], &K[k], &Dist[k], &om[k], &T[k] );
1947             cvRodrigues2( &om[k], &R[k] );
1948             if( k == 0 )
1949             {
1950                 // save initial om_left and T_left
1951                 solver.param->data.db[(i+1)*6] = _om[0][0];
1952                 solver.param->data.db[(i+1)*6 + 1] = _om[0][1];
1953                 solver.param->data.db[(i+1)*6 + 2] = _om[0][2];
1954                 solver.param->data.db[(i+1)*6 + 3] = t[0][0];
1955                 solver.param->data.db[(i+1)*6 + 4] = t[0][1];
1956                 solver.param->data.db[(i+1)*6 + 5] = t[0][2];
1957             }
1958         }
1959         cvGEMM( &R[1], &R[0], 1, 0, 0, &R[0], CV_GEMM_B_T );
1960         cvGEMM( &R[0], &T[0], -1, &T[1], 1, &T[1] );
1961         cvRodrigues2( &R[0], &T[0] );
1962         RT0->data.db[i] = t[0][0];
1963         RT0->data.db[i + nimages] = t[0][1];
1964         RT0->data.db[i + nimages*2] = t[0][2];
1965         RT0->data.db[i + nimages*3] = t[1][0];
1966         RT0->data.db[i + nimages*4] = t[1][1];
1967         RT0->data.db[i + nimages*5] = t[1][2];
1968     }
1969 
1970     if(flags & CALIB_USE_EXTRINSIC_GUESS)
1971     {
1972         Vec3d R, T;
1973         cvarrToMat(matT).convertTo(T, CV_64F);
1974 
1975         if( matR->rows == 3 && matR->cols == 3 )
1976             Rodrigues(cvarrToMat(matR), R);
1977         else
1978             cvarrToMat(matR).convertTo(R, CV_64F);
1979 
1980         solver.param->data.db[0] = R[0];
1981         solver.param->data.db[1] = R[1];
1982         solver.param->data.db[2] = R[2];
1983         solver.param->data.db[3] = T[0];
1984         solver.param->data.db[4] = T[1];
1985         solver.param->data.db[5] = T[2];
1986     }
1987     else
1988     {
1989         // find the medians and save the first 6 parameters
1990         for( i = 0; i < 6; i++ )
1991         {
1992             qsort( RT0->data.db + i*nimages, nimages, CV_ELEM_SIZE(RT0->type), dbCmp );
1993             solver.param->data.db[i] = nimages % 2 != 0 ? RT0->data.db[i*nimages + nimages/2] :
1994                 (RT0->data.db[i*nimages + nimages/2 - 1] + RT0->data.db[i*nimages + nimages/2])*0.5;
1995         }
1996     }
1997 
1998     if( recomputeIntrinsics )
1999         for( k = 0; k < 2; k++ )
2000         {
2001             double* iparam = solver.param->data.db + (nimages+1)*6 + k*NINTRINSIC;
2002             if( flags & CALIB_ZERO_TANGENT_DIST )
2003                 dk[k][2] = dk[k][3] = 0;
2004             iparam[0] = A[k][0]; iparam[1] = A[k][4]; iparam[2] = A[k][2]; iparam[3] = A[k][5];
2005             iparam[4] = dk[k][0]; iparam[5] = dk[k][1]; iparam[6] = dk[k][2];
2006             iparam[7] = dk[k][3]; iparam[8] = dk[k][4]; iparam[9] = dk[k][5];
2007             iparam[10] = dk[k][6]; iparam[11] = dk[k][7];
2008             iparam[12] = dk[k][8];
2009             iparam[13] = dk[k][9];
2010             iparam[14] = dk[k][10];
2011             iparam[15] = dk[k][11];
2012             iparam[16] = dk[k][12];
2013             iparam[17] = dk[k][13];
2014         }
2015 
2016     om_LR = cvMat(3, 1, CV_64F, solver.param->data.db);
2017     T_LR = cvMat(3, 1, CV_64F, solver.param->data.db + 3);
2018 
2019     for(;;)
2020     {
2021         const CvMat* param = 0;
2022         CvMat *JtJ = 0, *JtErr = 0;
2023         double *_errNorm = 0;
2024         double _omR[3], _tR[3];
2025         double _dr3dr1[9], _dr3dr2[9], /*_dt3dr1[9],*/ _dt3dr2[9], _dt3dt1[9], _dt3dt2[9];
2026         CvMat dr3dr1 = cvMat(3, 3, CV_64F, _dr3dr1);
2027         CvMat dr3dr2 = cvMat(3, 3, CV_64F, _dr3dr2);
2028         //CvMat dt3dr1 = cvMat(3, 3, CV_64F, _dt3dr1);
2029         CvMat dt3dr2 = cvMat(3, 3, CV_64F, _dt3dr2);
2030         CvMat dt3dt1 = cvMat(3, 3, CV_64F, _dt3dt1);
2031         CvMat dt3dt2 = cvMat(3, 3, CV_64F, _dt3dt2);
2032         CvMat om[2], T[2], imgpt_i[2];
2033 
2034         if( !solver.updateAlt( param, JtJ, JtErr, _errNorm ))
2035             break;
2036         reprojErr = 0;
2037 
2038         cvRodrigues2( &om_LR, &R_LR );
2039         om[1] = cvMat(3,1,CV_64F,_omR);
2040         T[1] = cvMat(3,1,CV_64F,_tR);
2041 
2042         if( recomputeIntrinsics )
2043         {
2044             double* iparam = solver.param->data.db + (nimages+1)*6;
2045             double* ipparam = solver.prevParam->data.db + (nimages+1)*6;
2046 
2047             if( flags & CALIB_SAME_FOCAL_LENGTH )
2048             {
2049                 iparam[NINTRINSIC] = iparam[0];
2050                 iparam[NINTRINSIC+1] = iparam[1];
2051                 ipparam[NINTRINSIC] = ipparam[0];
2052                 ipparam[NINTRINSIC+1] = ipparam[1];
2053             }
2054             if( flags & CALIB_FIX_ASPECT_RATIO )
2055             {
2056                 iparam[0] = iparam[1]*aspectRatio[0];
2057                 iparam[NINTRINSIC] = iparam[NINTRINSIC+1]*aspectRatio[1];
2058                 ipparam[0] = ipparam[1]*aspectRatio[0];
2059                 ipparam[NINTRINSIC] = ipparam[NINTRINSIC+1]*aspectRatio[1];
2060             }
2061             for( k = 0; k < 2; k++ )
2062             {
2063                 A[k][0] = iparam[k*NINTRINSIC+0];
2064                 A[k][4] = iparam[k*NINTRINSIC+1];
2065                 A[k][2] = iparam[k*NINTRINSIC+2];
2066                 A[k][5] = iparam[k*NINTRINSIC+3];
2067                 dk[k][0] = iparam[k*NINTRINSIC+4];
2068                 dk[k][1] = iparam[k*NINTRINSIC+5];
2069                 dk[k][2] = iparam[k*NINTRINSIC+6];
2070                 dk[k][3] = iparam[k*NINTRINSIC+7];
2071                 dk[k][4] = iparam[k*NINTRINSIC+8];
2072                 dk[k][5] = iparam[k*NINTRINSIC+9];
2073                 dk[k][6] = iparam[k*NINTRINSIC+10];
2074                 dk[k][7] = iparam[k*NINTRINSIC+11];
2075                 dk[k][8] = iparam[k*NINTRINSIC+12];
2076                 dk[k][9] = iparam[k*NINTRINSIC+13];
2077                 dk[k][10] = iparam[k*NINTRINSIC+14];
2078                 dk[k][11] = iparam[k*NINTRINSIC+15];
2079                 dk[k][12] = iparam[k*NINTRINSIC+16];
2080                 dk[k][13] = iparam[k*NINTRINSIC+17];
2081             }
2082         }
2083 
2084         for( i = ofs = 0; i < nimages; ofs += ni, i++ )
2085         {
2086             ni = npoints->data.i[i];
2087             CvMat objpt_i;
2088 
2089             om[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6);
2090             T[0] = cvMat(3,1,CV_64F,solver.param->data.db+(i+1)*6+3);
2091 
2092             if( JtJ || JtErr )
2093                 cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1], &dr3dr1, 0,
2094                              &dr3dr2, 0, 0, &dt3dt1, &dt3dr2, &dt3dt2 );
2095             else
2096                 cvComposeRT( &om[0], &T[0], &om_LR, &T_LR, &om[1], &T[1] );
2097 
2098             objpt_i = cvMat(1, ni, CV_64FC3, objectPoints->data.db + ofs*3);
2099             err.resize(ni*2); Je.resize(ni*2); J_LR.resize(ni*2); Ji.resize(ni*2);
2100 
2101             CvMat tmpimagePoints = cvMat(err.reshape(2, 1));
2102             CvMat dpdf = cvMat(Ji.colRange(0, 2));
2103             CvMat dpdc = cvMat(Ji.colRange(2, 4));
2104             CvMat dpdk = cvMat(Ji.colRange(4, NINTRINSIC));
2105             CvMat dpdrot = cvMat(Je.colRange(0, 3));
2106             CvMat dpdt = cvMat(Je.colRange(3, 6));
2107 
2108             for( k = 0; k < 2; k++ )
2109             {
2110                 imgpt_i[k] = cvMat(1, ni, CV_64FC2, imagePoints[k]->data.db + ofs*2);
2111 
2112                 if( JtJ || JtErr )
2113                     cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k],
2114                             &tmpimagePoints, &dpdrot, &dpdt, &dpdf, &dpdc, &dpdk,
2115                             (flags & CALIB_FIX_ASPECT_RATIO) ? aspectRatio[k] : 0);
2116                 else
2117                     cvProjectPoints2( &objpt_i, &om[k], &T[k], &K[k], &Dist[k], &tmpimagePoints );
2118                 cvSub( &tmpimagePoints, &imgpt_i[k], &tmpimagePoints );
2119 
2120                 if( solver.state == CvLevMarq::CALC_J )
2121                 {
2122                     int iofs = (nimages+1)*6 + k*NINTRINSIC, eofs = (i+1)*6;
2123                     assert( JtJ && JtErr );
2124 
2125                     Mat _JtJ(cvarrToMat(JtJ)), _JtErr(cvarrToMat(JtErr));
2126 
2127                     if( k == 1 )
2128                     {
2129                         // d(err_{x|y}R) ~ de3
2130                         // convert de3/{dr3,dt3} => de3{dr1,dt1} & de3{dr2,dt2}
2131                         for( p = 0; p < ni*2; p++ )
2132                         {
2133                             CvMat de3dr3 = cvMat( 1, 3, CV_64F, Je.ptr(p));
2134                             CvMat de3dt3 = cvMat( 1, 3, CV_64F, de3dr3.data.db + 3 );
2135                             CvMat de3dr2 = cvMat( 1, 3, CV_64F, J_LR.ptr(p) );
2136                             CvMat de3dt2 = cvMat( 1, 3, CV_64F, de3dr2.data.db + 3 );
2137                             double _de3dr1[3], _de3dt1[3];
2138                             CvMat de3dr1 = cvMat( 1, 3, CV_64F, _de3dr1 );
2139                             CvMat de3dt1 = cvMat( 1, 3, CV_64F, _de3dt1 );
2140 
2141                             cvMatMul( &de3dr3, &dr3dr1, &de3dr1 );
2142                             cvMatMul( &de3dt3, &dt3dt1, &de3dt1 );
2143 
2144                             cvMatMul( &de3dr3, &dr3dr2, &de3dr2 );
2145                             cvMatMulAdd( &de3dt3, &dt3dr2, &de3dr2, &de3dr2 );
2146 
2147                             cvMatMul( &de3dt3, &dt3dt2, &de3dt2 );
2148 
2149                             cvCopy( &de3dr1, &de3dr3 );
2150                             cvCopy( &de3dt1, &de3dt3 );
2151                         }
2152 
2153                         _JtJ(Rect(0, 0, 6, 6)) += J_LR.t()*J_LR;
2154                         _JtJ(Rect(eofs, 0, 6, 6)) = J_LR.t()*Je;
2155                         _JtErr.rowRange(0, 6) += J_LR.t()*err;
2156                     }
2157 
2158                     _JtJ(Rect(eofs, eofs, 6, 6)) += Je.t()*Je;
2159                     _JtErr.rowRange(eofs, eofs + 6) += Je.t()*err;
2160 
2161                     if( recomputeIntrinsics )
2162                     {
2163                         _JtJ(Rect(iofs, iofs, NINTRINSIC, NINTRINSIC)) += Ji.t()*Ji;
2164                         _JtJ(Rect(iofs, eofs, NINTRINSIC, 6)) += Je.t()*Ji;
2165                         if( k == 1 )
2166                         {
2167                             _JtJ(Rect(iofs, 0, NINTRINSIC, 6)) += J_LR.t()*Ji;
2168                         }
2169                         _JtErr.rowRange(iofs, iofs + NINTRINSIC) += Ji.t()*err;
2170                     }
2171                 }
2172 
2173                 double viewErr = norm(err, NORM_L2SQR);
2174 
2175                 if(perViewErr)
2176                     perViewErr->data.db[i*2 + k] = std::sqrt(viewErr/ni);
2177 
2178                 reprojErr += viewErr;
2179             }
2180         }
2181         if(_errNorm)
2182             *_errNorm = reprojErr;
2183     }
2184 
2185     cvRodrigues2( &om_LR, &R_LR );
2186     if( matR->rows == 1 || matR->cols == 1 )
2187         cvConvert( &om_LR, matR );
2188     else
2189         cvConvert( &R_LR, matR );
2190     cvConvert( &T_LR, matT );
2191 
2192     if( recomputeIntrinsics )
2193     {
2194         cvConvert( &K[0], _cameraMatrix1 );
2195         cvConvert( &K[1], _cameraMatrix2 );
2196 
2197         for( k = 0; k < 2; k++ )
2198         {
2199             CvMat* distCoeffs = k == 0 ? _distCoeffs1 : _distCoeffs2;
2200             CvMat tdist = cvMat( distCoeffs->rows, distCoeffs->cols,
2201                 CV_MAKETYPE(CV_64F,CV_MAT_CN(distCoeffs->type)), Dist[k].data.db );
2202             cvConvert( &tdist, distCoeffs );
2203         }
2204     }
2205 
2206     if( matE || matF )
2207     {
2208         double* t = T_LR.data.db;
2209         double tx[] =
2210         {
2211             0, -t[2], t[1],
2212             t[2], 0, -t[0],
2213             -t[1], t[0], 0
2214         };
2215         CvMat Tx = cvMat(3, 3, CV_64F, tx);
2216         double e[9], f[9];
2217         CvMat E = cvMat(3, 3, CV_64F, e);
2218         CvMat F = cvMat(3, 3, CV_64F, f);
2219         cvMatMul( &Tx, &R_LR, &E );
2220         if( matE )
2221             cvConvert( &E, matE );
2222         if( matF )
2223         {
2224             double ik[9];
2225             CvMat iK = cvMat(3, 3, CV_64F, ik);
2226             cvInvert(&K[1], &iK);
2227             cvGEMM( &iK, &E, 1, 0, 0, &E, CV_GEMM_A_T );
2228             cvInvert(&K[0], &iK);
2229             cvMatMul(&E, &iK, &F);
2230             cvConvertScale( &F, matF, fabs(f[8]) > 0 ? 1./f[8] : 1 );
2231         }
2232     }
2233 
2234     return std::sqrt(reprojErr/(pointsTotal*2));
2235 }
cvStereoCalibrate(const CvMat * _objectPoints,const CvMat * _imagePoints1,const CvMat * _imagePoints2,const CvMat * _npoints,CvMat * _cameraMatrix1,CvMat * _distCoeffs1,CvMat * _cameraMatrix2,CvMat * _distCoeffs2,CvSize imageSize,CvMat * matR,CvMat * matT,CvMat * matE,CvMat * matF,int flags,CvTermCriteria termCrit)2236 double cvStereoCalibrate( const CvMat* _objectPoints, const CvMat* _imagePoints1,
2237                         const CvMat* _imagePoints2, const CvMat* _npoints,
2238                         CvMat* _cameraMatrix1, CvMat* _distCoeffs1,
2239                         CvMat* _cameraMatrix2, CvMat* _distCoeffs2,
2240                         CvSize imageSize, CvMat* matR, CvMat* matT,
2241                         CvMat* matE, CvMat* matF,
2242                         int flags,
2243                         CvTermCriteria termCrit )
2244 {
2245     return cvStereoCalibrateImpl(_objectPoints, _imagePoints1, _imagePoints2, _npoints, _cameraMatrix1,
2246                                  _distCoeffs1, _cameraMatrix2, _distCoeffs2, imageSize, matR, matT, matE,
2247                                  matF, NULL, flags, termCrit);
2248 }
2249 
2250 static void
icvGetRectangles(const CvMat * cameraMatrix,const CvMat * distCoeffs,const CvMat * R,const CvMat * newCameraMatrix,CvSize imgSize,cv::Rect_<float> & inner,cv::Rect_<float> & outer)2251 icvGetRectangles( const CvMat* cameraMatrix, const CvMat* distCoeffs,
2252                  const CvMat* R, const CvMat* newCameraMatrix, CvSize imgSize,
2253                  cv::Rect_<float>& inner, cv::Rect_<float>& outer )
2254 {
2255     const int N = 9;
2256     int x, y, k;
2257     cv::Ptr<CvMat> _pts(cvCreateMat(1, N*N, CV_32FC2));
2258     CvPoint2D32f* pts = (CvPoint2D32f*)(_pts->data.ptr);
2259 
2260     for( y = k = 0; y < N; y++ )
2261         for( x = 0; x < N; x++ )
2262             pts[k++] = cvPoint2D32f((float)x*imgSize.width/(N-1),
2263                                     (float)y*imgSize.height/(N-1));
2264 
2265     cvUndistortPoints(_pts, _pts, cameraMatrix, distCoeffs, R, newCameraMatrix);
2266 
2267     float iX0=-FLT_MAX, iX1=FLT_MAX, iY0=-FLT_MAX, iY1=FLT_MAX;
2268     float oX0=FLT_MAX, oX1=-FLT_MAX, oY0=FLT_MAX, oY1=-FLT_MAX;
2269     // find the inscribed rectangle.
2270     // the code will likely not work with extreme rotation matrices (R) (>45%)
2271     for( y = k = 0; y < N; y++ )
2272         for( x = 0; x < N; x++ )
2273         {
2274             CvPoint2D32f p = pts[k++];
2275             oX0 = MIN(oX0, p.x);
2276             oX1 = MAX(oX1, p.x);
2277             oY0 = MIN(oY0, p.y);
2278             oY1 = MAX(oY1, p.y);
2279 
2280             if( x == 0 )
2281                 iX0 = MAX(iX0, p.x);
2282             if( x == N-1 )
2283                 iX1 = MIN(iX1, p.x);
2284             if( y == 0 )
2285                 iY0 = MAX(iY0, p.y);
2286             if( y == N-1 )
2287                 iY1 = MIN(iY1, p.y);
2288         }
2289     inner = cv::Rect_<float>(iX0, iY0, iX1-iX0, iY1-iY0);
2290     outer = cv::Rect_<float>(oX0, oY0, oX1-oX0, oY1-oY0);
2291 }
2292 
2293 
cvStereoRectify(const CvMat * _cameraMatrix1,const CvMat * _cameraMatrix2,const CvMat * _distCoeffs1,const CvMat * _distCoeffs2,CvSize imageSize,const CvMat * matR,const CvMat * matT,CvMat * _R1,CvMat * _R2,CvMat * _P1,CvMat * _P2,CvMat * matQ,int flags,double alpha,CvSize newImgSize,CvRect * roi1,CvRect * roi2)2294 void cvStereoRectify( const CvMat* _cameraMatrix1, const CvMat* _cameraMatrix2,
2295                       const CvMat* _distCoeffs1, const CvMat* _distCoeffs2,
2296                       CvSize imageSize, const CvMat* matR, const CvMat* matT,
2297                       CvMat* _R1, CvMat* _R2, CvMat* _P1, CvMat* _P2,
2298                       CvMat* matQ, int flags, double alpha, CvSize newImgSize,
2299                       CvRect* roi1, CvRect* roi2 )
2300 {
2301     double _om[3], _t[3] = {0}, _uu[3]={0,0,0}, _r_r[3][3], _pp[3][4];
2302     double _ww[3], _wr[3][3], _z[3] = {0,0,0}, _ri[3][3];
2303     cv::Rect_<float> inner1, inner2, outer1, outer2;
2304 
2305     CvMat om  = cvMat(3, 1, CV_64F, _om);
2306     CvMat t   = cvMat(3, 1, CV_64F, _t);
2307     CvMat uu  = cvMat(3, 1, CV_64F, _uu);
2308     CvMat r_r = cvMat(3, 3, CV_64F, _r_r);
2309     CvMat pp  = cvMat(3, 4, CV_64F, _pp);
2310     CvMat ww  = cvMat(3, 1, CV_64F, _ww); // temps
2311     CvMat wR  = cvMat(3, 3, CV_64F, _wr);
2312     CvMat Z   = cvMat(3, 1, CV_64F, _z);
2313     CvMat Ri  = cvMat(3, 3, CV_64F, _ri);
2314     double nx = imageSize.width, ny = imageSize.height;
2315     int i, k;
2316 
2317     if( matR->rows == 3 && matR->cols == 3 )
2318         cvRodrigues2(matR, &om);          // get vector rotation
2319     else
2320         cvConvert(matR, &om); // it's already a rotation vector
2321     cvConvertScale(&om, &om, -0.5); // get average rotation
2322     cvRodrigues2(&om, &r_r);        // rotate cameras to same orientation by averaging
2323     cvMatMul(&r_r, matT, &t);
2324 
2325     int idx = fabs(_t[0]) > fabs(_t[1]) ? 0 : 1;
2326     double c = _t[idx], nt = cvNorm(&t, 0, CV_L2);
2327     _uu[idx] = c > 0 ? 1 : -1;
2328 
2329     CV_Assert(nt > 0.0);
2330 
2331     // calculate global Z rotation
2332     cvCrossProduct(&t,&uu,&ww);
2333     double nw = cvNorm(&ww, 0, CV_L2);
2334     if (nw > 0.0)
2335         cvConvertScale(&ww, &ww, acos(fabs(c)/nt)/nw);
2336     cvRodrigues2(&ww, &wR);
2337 
2338     // apply to both views
2339     cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, CV_GEMM_B_T);
2340     cvConvert( &Ri, _R1 );
2341     cvGEMM(&wR, &r_r, 1, 0, 0, &Ri, 0);
2342     cvConvert( &Ri, _R2 );
2343     cvMatMul(&Ri, matT, &t);
2344 
2345     // calculate projection/camera matrices
2346     // these contain the relevant rectified image internal params (fx, fy=fx, cx, cy)
2347     double fc_new = DBL_MAX;
2348     CvPoint2D64f cc_new[2] = {};
2349 
2350     newImgSize = newImgSize.width * newImgSize.height != 0 ? newImgSize : imageSize;
2351     const double ratio_x = (double)newImgSize.width / imageSize.width / 2;
2352     const double ratio_y = (double)newImgSize.height / imageSize.height / 2;
2353     const double ratio = idx == 1 ? ratio_x : ratio_y;
2354     fc_new = (cvmGet(_cameraMatrix1, idx ^ 1, idx ^ 1) + cvmGet(_cameraMatrix2, idx ^ 1, idx ^ 1)) * ratio;
2355 
2356     for( k = 0; k < 2; k++ )
2357     {
2358         const CvMat* A = k == 0 ? _cameraMatrix1 : _cameraMatrix2;
2359         const CvMat* Dk = k == 0 ? _distCoeffs1 : _distCoeffs2;
2360         CvPoint2D32f _pts[4] = {};
2361         CvPoint3D32f _pts_3[4] = {};
2362         CvMat pts = cvMat(1, 4, CV_32FC2, _pts);
2363         CvMat pts_3 = cvMat(1, 4, CV_32FC3, _pts_3);
2364 
2365         for( i = 0; i < 4; i++ )
2366         {
2367             int j = (i<2) ? 0 : 1;
2368             _pts[i].x = (float)((i % 2)*(nx-1));
2369             _pts[i].y = (float)(j*(ny-1));
2370         }
2371         cvUndistortPoints( &pts, &pts, A, Dk, 0, 0 );
2372         cvConvertPointsHomogeneous( &pts, &pts_3 );
2373 
2374         //Change camera matrix to have cc=[0,0] and fc = fc_new
2375         double _a_tmp[3][3];
2376         CvMat A_tmp  = cvMat(3, 3, CV_64F, _a_tmp);
2377         _a_tmp[0][0]=fc_new;
2378         _a_tmp[1][1]=fc_new;
2379         _a_tmp[0][2]=0.0;
2380         _a_tmp[1][2]=0.0;
2381         cvProjectPoints2( &pts_3, k == 0 ? _R1 : _R2, &Z, &A_tmp, 0, &pts );
2382         CvScalar avg = cvAvg(&pts);
2383         cc_new[k].x = (nx-1)/2 - avg.val[0];
2384         cc_new[k].y = (ny-1)/2 - avg.val[1];
2385     }
2386 
2387     // vertical focal length must be the same for both images to keep the epipolar constraint
2388     // (for horizontal epipolar lines -- TBD: check for vertical epipolar lines)
2389     // use fy for fx also, for simplicity
2390 
2391     // For simplicity, set the principal points for both cameras to be the average
2392     // of the two principal points (either one of or both x- and y- coordinates)
2393     if( flags & CALIB_ZERO_DISPARITY )
2394     {
2395         cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
2396         cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
2397     }
2398     else if( idx == 0 ) // horizontal stereo
2399         cc_new[0].y = cc_new[1].y = (cc_new[0].y + cc_new[1].y)*0.5;
2400     else // vertical stereo
2401         cc_new[0].x = cc_new[1].x = (cc_new[0].x + cc_new[1].x)*0.5;
2402 
2403     cvZero( &pp );
2404     _pp[0][0] = _pp[1][1] = fc_new;
2405     _pp[0][2] = cc_new[0].x;
2406     _pp[1][2] = cc_new[0].y;
2407     _pp[2][2] = 1;
2408     cvConvert(&pp, _P1);
2409 
2410     _pp[0][2] = cc_new[1].x;
2411     _pp[1][2] = cc_new[1].y;
2412     _pp[idx][3] = _t[idx]*fc_new; // baseline * focal length
2413     cvConvert(&pp, _P2);
2414 
2415     alpha = MIN(alpha, 1.);
2416 
2417     icvGetRectangles( _cameraMatrix1, _distCoeffs1, _R1, _P1, imageSize, inner1, outer1 );
2418     icvGetRectangles( _cameraMatrix2, _distCoeffs2, _R2, _P2, imageSize, inner2, outer2 );
2419 
2420     {
2421     newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imageSize;
2422     double cx1_0 = cc_new[0].x;
2423     double cy1_0 = cc_new[0].y;
2424     double cx2_0 = cc_new[1].x;
2425     double cy2_0 = cc_new[1].y;
2426     double cx1 = newImgSize.width*cx1_0/imageSize.width;
2427     double cy1 = newImgSize.height*cy1_0/imageSize.height;
2428     double cx2 = newImgSize.width*cx2_0/imageSize.width;
2429     double cy2 = newImgSize.height*cy2_0/imageSize.height;
2430     double s = 1.;
2431 
2432     if( alpha >= 0 )
2433     {
2434         double s0 = std::max(std::max(std::max((double)cx1/(cx1_0 - inner1.x), (double)cy1/(cy1_0 - inner1.y)),
2435                             (double)(newImgSize.width - cx1)/(inner1.x + inner1.width - cx1_0)),
2436                         (double)(newImgSize.height - cy1)/(inner1.y + inner1.height - cy1_0));
2437         s0 = std::max(std::max(std::max(std::max((double)cx2/(cx2_0 - inner2.x), (double)cy2/(cy2_0 - inner2.y)),
2438                          (double)(newImgSize.width - cx2)/(inner2.x + inner2.width - cx2_0)),
2439                      (double)(newImgSize.height - cy2)/(inner2.y + inner2.height - cy2_0)),
2440                  s0);
2441 
2442         double s1 = std::min(std::min(std::min((double)cx1/(cx1_0 - outer1.x), (double)cy1/(cy1_0 - outer1.y)),
2443                             (double)(newImgSize.width - cx1)/(outer1.x + outer1.width - cx1_0)),
2444                         (double)(newImgSize.height - cy1)/(outer1.y + outer1.height - cy1_0));
2445         s1 = std::min(std::min(std::min(std::min((double)cx2/(cx2_0 - outer2.x), (double)cy2/(cy2_0 - outer2.y)),
2446                          (double)(newImgSize.width - cx2)/(outer2.x + outer2.width - cx2_0)),
2447                      (double)(newImgSize.height - cy2)/(outer2.y + outer2.height - cy2_0)),
2448                  s1);
2449 
2450         s = s0*(1 - alpha) + s1*alpha;
2451     }
2452 
2453     fc_new *= s;
2454     cc_new[0] = cvPoint2D64f(cx1, cy1);
2455     cc_new[1] = cvPoint2D64f(cx2, cy2);
2456 
2457     cvmSet(_P1, 0, 0, fc_new);
2458     cvmSet(_P1, 1, 1, fc_new);
2459     cvmSet(_P1, 0, 2, cx1);
2460     cvmSet(_P1, 1, 2, cy1);
2461 
2462     cvmSet(_P2, 0, 0, fc_new);
2463     cvmSet(_P2, 1, 1, fc_new);
2464     cvmSet(_P2, 0, 2, cx2);
2465     cvmSet(_P2, 1, 2, cy2);
2466     cvmSet(_P2, idx, 3, s*cvmGet(_P2, idx, 3));
2467 
2468     if(roi1)
2469     {
2470         *roi1 = cvRect(
2471             cv::Rect(cvCeil((inner1.x - cx1_0)*s + cx1),
2472                      cvCeil((inner1.y - cy1_0)*s + cy1),
2473                      cvFloor(inner1.width*s), cvFloor(inner1.height*s))
2474             & cv::Rect(0, 0, newImgSize.width, newImgSize.height)
2475         );
2476     }
2477 
2478     if(roi2)
2479     {
2480         *roi2 = cvRect(
2481             cv::Rect(cvCeil((inner2.x - cx2_0)*s + cx2),
2482                      cvCeil((inner2.y - cy2_0)*s + cy2),
2483                      cvFloor(inner2.width*s), cvFloor(inner2.height*s))
2484             & cv::Rect(0, 0, newImgSize.width, newImgSize.height)
2485         );
2486     }
2487     }
2488 
2489     if( matQ )
2490     {
2491         double q[] =
2492         {
2493             1, 0, 0, -cc_new[0].x,
2494             0, 1, 0, -cc_new[0].y,
2495             0, 0, 0, fc_new,
2496             0, 0, -1./_t[idx],
2497             (idx == 0 ? cc_new[0].x - cc_new[1].x : cc_new[0].y - cc_new[1].y)/_t[idx]
2498         };
2499         CvMat Q = cvMat(4, 4, CV_64F, q);
2500         cvConvert( &Q, matQ );
2501     }
2502 }
2503 
2504 
cvGetOptimalNewCameraMatrix(const CvMat * cameraMatrix,const CvMat * distCoeffs,CvSize imgSize,double alpha,CvMat * newCameraMatrix,CvSize newImgSize,CvRect * validPixROI,int centerPrincipalPoint)2505 void cvGetOptimalNewCameraMatrix( const CvMat* cameraMatrix, const CvMat* distCoeffs,
2506                                   CvSize imgSize, double alpha,
2507                                   CvMat* newCameraMatrix, CvSize newImgSize,
2508                                   CvRect* validPixROI, int centerPrincipalPoint )
2509 {
2510     cv::Rect_<float> inner, outer;
2511     newImgSize = newImgSize.width*newImgSize.height != 0 ? newImgSize : imgSize;
2512 
2513     double M[3][3];
2514     CvMat matM = cvMat(3, 3, CV_64F, M);
2515     cvConvert(cameraMatrix, &matM);
2516 
2517     if( centerPrincipalPoint )
2518     {
2519         double cx0 = M[0][2];
2520         double cy0 = M[1][2];
2521         double cx = (newImgSize.width-1)*0.5;
2522         double cy = (newImgSize.height-1)*0.5;
2523 
2524         icvGetRectangles( cameraMatrix, distCoeffs, 0, cameraMatrix, imgSize, inner, outer );
2525         double s0 = std::max(std::max(std::max((double)cx/(cx0 - inner.x), (double)cy/(cy0 - inner.y)),
2526                                       (double)cx/(inner.x + inner.width - cx0)),
2527                              (double)cy/(inner.y + inner.height - cy0));
2528         double s1 = std::min(std::min(std::min((double)cx/(cx0 - outer.x), (double)cy/(cy0 - outer.y)),
2529                                       (double)cx/(outer.x + outer.width - cx0)),
2530                              (double)cy/(outer.y + outer.height - cy0));
2531         double s = s0*(1 - alpha) + s1*alpha;
2532 
2533         M[0][0] *= s;
2534         M[1][1] *= s;
2535         M[0][2] = cx;
2536         M[1][2] = cy;
2537 
2538         if( validPixROI )
2539         {
2540             inner = cv::Rect_<float>((float)((inner.x - cx0)*s + cx),
2541                                      (float)((inner.y - cy0)*s + cy),
2542                                      (float)(inner.width*s),
2543                                      (float)(inner.height*s));
2544             cv::Rect r(cvCeil(inner.x), cvCeil(inner.y), cvFloor(inner.width), cvFloor(inner.height));
2545             r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
2546             *validPixROI = cvRect(r);
2547         }
2548     }
2549     else
2550     {
2551         // Get inscribed and circumscribed rectangles in normalized
2552         // (independent of camera matrix) coordinates
2553         icvGetRectangles( cameraMatrix, distCoeffs, 0, 0, imgSize, inner, outer );
2554 
2555         // Projection mapping inner rectangle to viewport
2556         double fx0 = (newImgSize.width  - 1) / inner.width;
2557         double fy0 = (newImgSize.height - 1) / inner.height;
2558         double cx0 = -fx0 * inner.x;
2559         double cy0 = -fy0 * inner.y;
2560 
2561         // Projection mapping outer rectangle to viewport
2562         double fx1 = (newImgSize.width  - 1) / outer.width;
2563         double fy1 = (newImgSize.height - 1) / outer.height;
2564         double cx1 = -fx1 * outer.x;
2565         double cy1 = -fy1 * outer.y;
2566 
2567         // Interpolate between the two optimal projections
2568         M[0][0] = fx0*(1 - alpha) + fx1*alpha;
2569         M[1][1] = fy0*(1 - alpha) + fy1*alpha;
2570         M[0][2] = cx0*(1 - alpha) + cx1*alpha;
2571         M[1][2] = cy0*(1 - alpha) + cy1*alpha;
2572 
2573         if( validPixROI )
2574         {
2575             icvGetRectangles( cameraMatrix, distCoeffs, 0, &matM, imgSize, inner, outer );
2576             cv::Rect r = inner;
2577             r &= cv::Rect(0, 0, newImgSize.width, newImgSize.height);
2578             *validPixROI = cvRect(r);
2579         }
2580     }
2581 
2582     cvConvert(&matM, newCameraMatrix);
2583 }
2584 
2585 
cvStereoRectifyUncalibrated(const CvMat * _points1,const CvMat * _points2,const CvMat * F0,CvSize imgSize,CvMat * _H1,CvMat * _H2,double threshold)2586 CV_IMPL int cvStereoRectifyUncalibrated(
2587     const CvMat* _points1, const CvMat* _points2,
2588     const CvMat* F0, CvSize imgSize,
2589     CvMat* _H1, CvMat* _H2, double threshold )
2590 {
2591     Ptr<CvMat> _m1, _m2, _lines1, _lines2;
2592 
2593     int i, j, npoints;
2594     double cx, cy;
2595     double u[9], v[9], w[9], f[9], h1[9], h2[9], h0[9], e2[3] = {0};
2596     CvMat E2 = cvMat( 3, 1, CV_64F, e2 );
2597     CvMat U = cvMat( 3, 3, CV_64F, u );
2598     CvMat V = cvMat( 3, 3, CV_64F, v );
2599     CvMat W = cvMat( 3, 3, CV_64F, w );
2600     CvMat F = cvMat( 3, 3, CV_64F, f );
2601     CvMat H1 = cvMat( 3, 3, CV_64F, h1 );
2602     CvMat H2 = cvMat( 3, 3, CV_64F, h2 );
2603     CvMat H0 = cvMat( 3, 3, CV_64F, h0 );
2604 
2605     CvPoint2D64f* m1;
2606     CvPoint2D64f* m2;
2607     CvPoint3D64f* lines1;
2608     CvPoint3D64f* lines2;
2609 
2610     CV_Assert( CV_IS_MAT(_points1) && CV_IS_MAT(_points2) &&
2611         CV_ARE_SIZES_EQ(_points1, _points2) );
2612 
2613     npoints = _points1->rows * _points1->cols * CV_MAT_CN(_points1->type) / 2;
2614 
2615     _m1.reset(cvCreateMat( _points1->rows, _points1->cols, CV_64FC(CV_MAT_CN(_points1->type)) ));
2616     _m2.reset(cvCreateMat( _points2->rows, _points2->cols, CV_64FC(CV_MAT_CN(_points2->type)) ));
2617     _lines1.reset(cvCreateMat( 1, npoints, CV_64FC3 ));
2618     _lines2.reset(cvCreateMat( 1, npoints, CV_64FC3 ));
2619 
2620     cvConvert( F0, &F );
2621 
2622     cvSVD( (CvMat*)&F, &W, &U, &V, CV_SVD_U_T + CV_SVD_V_T );
2623     W.data.db[8] = 0.;
2624     cvGEMM( &U, &W, 1, 0, 0, &W, CV_GEMM_A_T );
2625     cvMatMul( &W, &V, &F );
2626 
2627     cx = cvRound( (imgSize.width-1)*0.5 );
2628     cy = cvRound( (imgSize.height-1)*0.5 );
2629 
2630     cvZero( _H1 );
2631     cvZero( _H2 );
2632 
2633     cvConvert( _points1, _m1 );
2634     cvConvert( _points2, _m2 );
2635     cvReshape( _m1, _m1, 2, 1 );
2636     cvReshape( _m2, _m2, 2, 1 );
2637 
2638     m1 = (CvPoint2D64f*)_m1->data.ptr;
2639     m2 = (CvPoint2D64f*)_m2->data.ptr;
2640     lines1 = (CvPoint3D64f*)_lines1->data.ptr;
2641     lines2 = (CvPoint3D64f*)_lines2->data.ptr;
2642 
2643     if( threshold > 0 )
2644     {
2645         cvComputeCorrespondEpilines( _m1, 1, &F, _lines1 );
2646         cvComputeCorrespondEpilines( _m2, 2, &F, _lines2 );
2647 
2648         // measure distance from points to the corresponding epilines, mark outliers
2649         for( i = j = 0; i < npoints; i++ )
2650         {
2651             if( fabs(m1[i].x*lines2[i].x +
2652                      m1[i].y*lines2[i].y +
2653                      lines2[i].z) <= threshold &&
2654                 fabs(m2[i].x*lines1[i].x +
2655                      m2[i].y*lines1[i].y +
2656                      lines1[i].z) <= threshold )
2657             {
2658                 if( j < i )
2659                 {
2660                     m1[j] = m1[i];
2661                     m2[j] = m2[i];
2662                 }
2663                 j++;
2664             }
2665         }
2666 
2667         npoints = j;
2668         if( npoints == 0 )
2669             return 0;
2670     }
2671 
2672     _m1->cols = _m2->cols = npoints;
2673     memcpy( E2.data.db, U.data.db + 6, sizeof(e2));
2674     cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );
2675 
2676     double t[] =
2677     {
2678         1, 0, -cx,
2679         0, 1, -cy,
2680         0, 0, 1
2681     };
2682     CvMat T = cvMat(3, 3, CV_64F, t);
2683     cvMatMul( &T, &E2, &E2 );
2684 
2685     int mirror = e2[0] < 0;
2686     double d = MAX(std::sqrt(e2[0]*e2[0] + e2[1]*e2[1]),DBL_EPSILON);
2687     double alpha = e2[0]/d;
2688     double beta = e2[1]/d;
2689     double r[] =
2690     {
2691         alpha, beta, 0,
2692         -beta, alpha, 0,
2693         0, 0, 1
2694     };
2695     CvMat R = cvMat(3, 3, CV_64F, r);
2696     cvMatMul( &R, &T, &T );
2697     cvMatMul( &R, &E2, &E2 );
2698     double invf = fabs(e2[2]) < 1e-6*fabs(e2[0]) ? 0 : -e2[2]/e2[0];
2699     double k[] =
2700     {
2701         1, 0, 0,
2702         0, 1, 0,
2703         invf, 0, 1
2704     };
2705     CvMat K = cvMat(3, 3, CV_64F, k);
2706     cvMatMul( &K, &T, &H2 );
2707     cvMatMul( &K, &E2, &E2 );
2708 
2709     double it[] =
2710     {
2711         1, 0, cx,
2712         0, 1, cy,
2713         0, 0, 1
2714     };
2715     CvMat iT = cvMat( 3, 3, CV_64F, it );
2716     cvMatMul( &iT, &H2, &H2 );
2717 
2718     memcpy( E2.data.db, U.data.db + 6, sizeof(e2));
2719     cvScale( &E2, &E2, e2[2] > 0 ? 1 : -1 );
2720 
2721     double e2_x[] =
2722     {
2723         0, -e2[2], e2[1],
2724        e2[2], 0, -e2[0],
2725        -e2[1], e2[0], 0
2726     };
2727     double e2_111[] =
2728     {
2729         e2[0], e2[0], e2[0],
2730         e2[1], e2[1], e2[1],
2731         e2[2], e2[2], e2[2],
2732     };
2733     CvMat E2_x = cvMat(3, 3, CV_64F, e2_x);
2734     CvMat E2_111 = cvMat(3, 3, CV_64F, e2_111);
2735     cvMatMulAdd(&E2_x, &F, &E2_111, &H0 );
2736     cvMatMul(&H2, &H0, &H0);
2737     CvMat E1=cvMat(3, 1, CV_64F, V.data.db+6);
2738     cvMatMul(&H0, &E1, &E1);
2739 
2740     cvPerspectiveTransform( _m1, _m1, &H0 );
2741     cvPerspectiveTransform( _m2, _m2, &H2 );
2742     CvMat A = cvMat( 1, npoints, CV_64FC3, lines1 ), BxBy, B;
2743     double x[3] = {0};
2744     CvMat X = cvMat( 3, 1, CV_64F, x );
2745     cvConvertPointsHomogeneous( _m1, &A );
2746     cvReshape( &A, &A, 1, npoints );
2747     cvReshape( _m2, &BxBy, 1, npoints );
2748     cvGetCol( &BxBy, &B, 0 );
2749     cvSolve( &A, &B, &X, CV_SVD );
2750 
2751     double ha[] =
2752     {
2753         x[0], x[1], x[2],
2754         0, 1, 0,
2755         0, 0, 1
2756     };
2757     CvMat Ha = cvMat(3, 3, CV_64F, ha);
2758     cvMatMul( &Ha, &H0, &H1 );
2759     cvPerspectiveTransform( _m1, _m1, &Ha );
2760 
2761     if( mirror )
2762     {
2763         double mm[] = { -1, 0, cx*2, 0, -1, cy*2, 0, 0, 1 };
2764         CvMat MM = cvMat(3, 3, CV_64F, mm);
2765         cvMatMul( &MM, &H1, &H1 );
2766         cvMatMul( &MM, &H2, &H2 );
2767     }
2768 
2769     cvConvert( &H1, _H1 );
2770     cvConvert( &H2, _H2 );
2771 
2772     return 1;
2773 }
2774 
2775 
reprojectImageTo3D(InputArray _disparity,OutputArray __3dImage,InputArray _Qmat,bool handleMissingValues,int dtype)2776 void cv::reprojectImageTo3D( InputArray _disparity,
2777                              OutputArray __3dImage, InputArray _Qmat,
2778                              bool handleMissingValues, int dtype )
2779 {
2780     CV_INSTRUMENT_REGION();
2781 
2782     Mat disparity = _disparity.getMat(), Q = _Qmat.getMat();
2783     int stype = disparity.type();
2784 
2785     CV_Assert( stype == CV_8UC1 || stype == CV_16SC1 ||
2786                stype == CV_32SC1 || stype == CV_32FC1 );
2787     CV_Assert( Q.size() == Size(4,4) );
2788 
2789     if( dtype < 0 )
2790         dtype = CV_32FC3;
2791     else
2792     {
2793         dtype = CV_MAKETYPE(CV_MAT_DEPTH(dtype), 3);
2794         CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
2795     }
2796 
2797     __3dImage.create(disparity.size(), CV_MAKETYPE(dtype, 3));
2798     Mat _3dImage = __3dImage.getMat();
2799 
2800     const float bigZ = 10000.f;
2801     Matx44d _Q;
2802     Q.convertTo(_Q, CV_64F);
2803 
2804     int x, cols = disparity.cols;
2805     CV_Assert( cols >= 0 );
2806 
2807     std::vector<float> _sbuf(cols);
2808     std::vector<Vec3f> _dbuf(cols);
2809     float* sbuf = &_sbuf[0];
2810     Vec3f* dbuf = &_dbuf[0];
2811     double minDisparity = FLT_MAX;
2812 
2813     // NOTE: here we quietly assume that at least one pixel in the disparity map is not defined.
2814     // and we set the corresponding Z's to some fixed big value.
2815     if( handleMissingValues )
2816         cv::minMaxIdx( disparity, &minDisparity, 0, 0, 0 );
2817 
2818     for( int y = 0; y < disparity.rows; y++ )
2819     {
2820         float* sptr = sbuf;
2821         Vec3f* dptr = dbuf;
2822 
2823         if( stype == CV_8UC1 )
2824         {
2825             const uchar* sptr0 = disparity.ptr<uchar>(y);
2826             for( x = 0; x < cols; x++ )
2827                 sptr[x] = (float)sptr0[x];
2828         }
2829         else if( stype == CV_16SC1 )
2830         {
2831             const short* sptr0 = disparity.ptr<short>(y);
2832             for( x = 0; x < cols; x++ )
2833                 sptr[x] = (float)sptr0[x];
2834         }
2835         else if( stype == CV_32SC1 )
2836         {
2837             const int* sptr0 = disparity.ptr<int>(y);
2838             for( x = 0; x < cols; x++ )
2839                 sptr[x] = (float)sptr0[x];
2840         }
2841         else
2842             sptr = disparity.ptr<float>(y);
2843 
2844         if( dtype == CV_32FC3 )
2845             dptr = _3dImage.ptr<Vec3f>(y);
2846 
2847         for( x = 0; x < cols; x++)
2848         {
2849             double d = sptr[x];
2850             Vec4d homg_pt = _Q*Vec4d(x, y, d, 1.0);
2851             dptr[x] = Vec3d(homg_pt.val);
2852             dptr[x] /= homg_pt[3];
2853 
2854             if( fabs(d-minDisparity) <= FLT_EPSILON )
2855                 dptr[x][2] = bigZ;
2856         }
2857 
2858         if( dtype == CV_16SC3 )
2859         {
2860             Vec3s* dptr0 = _3dImage.ptr<Vec3s>(y);
2861             for( x = 0; x < cols; x++ )
2862             {
2863                 dptr0[x] = dptr[x];
2864             }
2865         }
2866         else if( dtype == CV_32SC3 )
2867         {
2868             Vec3i* dptr0 = _3dImage.ptr<Vec3i>(y);
2869             for( x = 0; x < cols; x++ )
2870             {
2871                 dptr0[x] = dptr[x];
2872             }
2873         }
2874     }
2875 }
2876 
2877 
cvReprojectImageTo3D(const CvArr * disparityImage,CvArr * _3dImage,const CvMat * matQ,int handleMissingValues)2878 void cvReprojectImageTo3D( const CvArr* disparityImage,
2879                            CvArr* _3dImage, const CvMat* matQ,
2880                            int handleMissingValues )
2881 {
2882     cv::Mat disp = cv::cvarrToMat(disparityImage);
2883     cv::Mat _3dimg = cv::cvarrToMat(_3dImage);
2884     cv::Mat mq = cv::cvarrToMat(matQ);
2885     CV_Assert( disp.size() == _3dimg.size() );
2886     int dtype = _3dimg.type();
2887     CV_Assert( dtype == CV_16SC3 || dtype == CV_32SC3 || dtype == CV_32FC3 );
2888 
2889     cv::reprojectImageTo3D(disp, _3dimg, mq, handleMissingValues != 0, dtype );
2890 }
2891 
2892 
2893 CV_IMPL void
cvRQDecomp3x3(const CvMat * matrixM,CvMat * matrixR,CvMat * matrixQ,CvMat * matrixQx,CvMat * matrixQy,CvMat * matrixQz,CvPoint3D64f * eulerAngles)2894 cvRQDecomp3x3( const CvMat *matrixM, CvMat *matrixR, CvMat *matrixQ,
2895                CvMat *matrixQx, CvMat *matrixQy, CvMat *matrixQz,
2896                CvPoint3D64f *eulerAngles)
2897 {
2898     double matM[3][3], matR[3][3], matQ[3][3];
2899     CvMat M = cvMat(3, 3, CV_64F, matM);
2900     CvMat R = cvMat(3, 3, CV_64F, matR);
2901     CvMat Q = cvMat(3, 3, CV_64F, matQ);
2902     double z, c, s;
2903 
2904     /* Validate parameters. */
2905     CV_Assert( CV_IS_MAT(matrixM) && CV_IS_MAT(matrixR) && CV_IS_MAT(matrixQ) &&
2906         matrixM->cols == 3 && matrixM->rows == 3 &&
2907         CV_ARE_SIZES_EQ(matrixM, matrixR) && CV_ARE_SIZES_EQ(matrixM, matrixQ));
2908 
2909     cvConvert(matrixM, &M);
2910 
2911     /* Find Givens rotation Q_x for x axis (left multiplication). */
2912     /*
2913          ( 1  0  0 )
2914     Qx = ( 0  c  s ), c = m33/sqrt(m32^2 + m33^2), s = m32/sqrt(m32^2 + m33^2)
2915          ( 0 -s  c )
2916     */
2917     s = matM[2][1];
2918     c = matM[2][2];
2919     z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
2920     c *= z;
2921     s *= z;
2922 
2923     double _Qx[3][3] = { {1, 0, 0}, {0, c, s}, {0, -s, c} };
2924     CvMat Qx = cvMat(3, 3, CV_64F, _Qx);
2925 
2926     cvMatMul(&M, &Qx, &R);
2927     assert(fabs(matR[2][1]) < FLT_EPSILON);
2928     matR[2][1] = 0;
2929 
2930     /* Find Givens rotation for y axis. */
2931     /*
2932          ( c  0 -s )
2933     Qy = ( 0  1  0 ), c = m33/sqrt(m31^2 + m33^2), s = -m31/sqrt(m31^2 + m33^2)
2934          ( s  0  c )
2935     */
2936     s = -matR[2][0];
2937     c = matR[2][2];
2938     z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
2939     c *= z;
2940     s *= z;
2941 
2942     double _Qy[3][3] = { {c, 0, -s}, {0, 1, 0}, {s, 0, c} };
2943     CvMat Qy = cvMat(3, 3, CV_64F, _Qy);
2944     cvMatMul(&R, &Qy, &M);
2945 
2946     assert(fabs(matM[2][0]) < FLT_EPSILON);
2947     matM[2][0] = 0;
2948 
2949     /* Find Givens rotation for z axis. */
2950     /*
2951          ( c  s  0 )
2952     Qz = (-s  c  0 ), c = m22/sqrt(m21^2 + m22^2), s = m21/sqrt(m21^2 + m22^2)
2953          ( 0  0  1 )
2954     */
2955 
2956     s = matM[1][0];
2957     c = matM[1][1];
2958     z = 1./std::sqrt(c * c + s * s + DBL_EPSILON);
2959     c *= z;
2960     s *= z;
2961 
2962     double _Qz[3][3] = { {c, s, 0}, {-s, c, 0}, {0, 0, 1} };
2963     CvMat Qz = cvMat(3, 3, CV_64F, _Qz);
2964 
2965     cvMatMul(&M, &Qz, &R);
2966     assert(fabs(matR[1][0]) < FLT_EPSILON);
2967     matR[1][0] = 0;
2968 
2969     // Solve the decomposition ambiguity.
2970     // Diagonal entries of R, except the last one, shall be positive.
2971     // Further rotate R by 180 degree if necessary
2972     if( matR[0][0] < 0 )
2973     {
2974         if( matR[1][1] < 0 )
2975         {
2976             // rotate around z for 180 degree, i.e. a rotation matrix of
2977             // [-1,  0,  0],
2978             // [ 0, -1,  0],
2979             // [ 0,  0,  1]
2980             matR[0][0] *= -1;
2981             matR[0][1] *= -1;
2982             matR[1][1] *= -1;
2983 
2984             _Qz[0][0] *= -1;
2985             _Qz[0][1] *= -1;
2986             _Qz[1][0] *= -1;
2987             _Qz[1][1] *= -1;
2988         }
2989         else
2990         {
2991             // rotate around y for 180 degree, i.e. a rotation matrix of
2992             // [-1,  0,  0],
2993             // [ 0,  1,  0],
2994             // [ 0,  0, -1]
2995             matR[0][0] *= -1;
2996             matR[0][2] *= -1;
2997             matR[1][2] *= -1;
2998             matR[2][2] *= -1;
2999 
3000             cvTranspose( &Qz, &Qz );
3001 
3002             _Qy[0][0] *= -1;
3003             _Qy[0][2] *= -1;
3004             _Qy[2][0] *= -1;
3005             _Qy[2][2] *= -1;
3006         }
3007     }
3008     else if( matR[1][1] < 0 )
3009     {
3010         // ??? for some reason, we never get here ???
3011 
3012         // rotate around x for 180 degree, i.e. a rotation matrix of
3013         // [ 1,  0,  0],
3014         // [ 0, -1,  0],
3015         // [ 0,  0, -1]
3016         matR[0][1] *= -1;
3017         matR[0][2] *= -1;
3018         matR[1][1] *= -1;
3019         matR[1][2] *= -1;
3020         matR[2][2] *= -1;
3021 
3022         cvTranspose( &Qz, &Qz );
3023         cvTranspose( &Qy, &Qy );
3024 
3025         _Qx[1][1] *= -1;
3026         _Qx[1][2] *= -1;
3027         _Qx[2][1] *= -1;
3028         _Qx[2][2] *= -1;
3029     }
3030 
3031     // calculate the euler angle
3032     if( eulerAngles )
3033     {
3034         eulerAngles->x = acos(_Qx[1][1]) * (_Qx[1][2] >= 0 ? 1 : -1) * (180.0 / CV_PI);
3035         eulerAngles->y = acos(_Qy[0][0]) * (_Qy[2][0] >= 0 ? 1 : -1) * (180.0 / CV_PI);
3036         eulerAngles->z = acos(_Qz[0][0]) * (_Qz[0][1] >= 0 ? 1 : -1) * (180.0 / CV_PI);
3037     }
3038 
3039     /* Calculate orthogonal matrix. */
3040     /*
3041     Q = QzT * QyT * QxT
3042     */
3043     cvGEMM( &Qz, &Qy, 1, 0, 0, &M, CV_GEMM_A_T + CV_GEMM_B_T );
3044     cvGEMM( &M, &Qx, 1, 0, 0, &Q, CV_GEMM_B_T );
3045 
3046     /* Save R and Q matrices. */
3047     cvConvert( &R, matrixR );
3048     cvConvert( &Q, matrixQ );
3049 
3050     if( matrixQx )
3051         cvConvert(&Qx, matrixQx);
3052     if( matrixQy )
3053         cvConvert(&Qy, matrixQy);
3054     if( matrixQz )
3055         cvConvert(&Qz, matrixQz);
3056 }
3057 
3058 
3059 CV_IMPL void
cvDecomposeProjectionMatrix(const CvMat * projMatr,CvMat * calibMatr,CvMat * rotMatr,CvMat * posVect,CvMat * rotMatrX,CvMat * rotMatrY,CvMat * rotMatrZ,CvPoint3D64f * eulerAngles)3060 cvDecomposeProjectionMatrix( const CvMat *projMatr, CvMat *calibMatr,
3061                              CvMat *rotMatr, CvMat *posVect,
3062                              CvMat *rotMatrX, CvMat *rotMatrY,
3063                              CvMat *rotMatrZ, CvPoint3D64f *eulerAngles)
3064 {
3065     double tmpProjMatrData[16], tmpMatrixDData[16], tmpMatrixVData[16];
3066     CvMat tmpProjMatr = cvMat(4, 4, CV_64F, tmpProjMatrData);
3067     CvMat tmpMatrixD = cvMat(4, 4, CV_64F, tmpMatrixDData);
3068     CvMat tmpMatrixV = cvMat(4, 4, CV_64F, tmpMatrixVData);
3069     CvMat tmpMatrixM;
3070 
3071     /* Validate parameters. */
3072     if(projMatr == 0 || calibMatr == 0 || rotMatr == 0 || posVect == 0)
3073         CV_Error(CV_StsNullPtr, "Some of parameters is a NULL pointer!");
3074 
3075     if(!CV_IS_MAT(projMatr) || !CV_IS_MAT(calibMatr) || !CV_IS_MAT(rotMatr) || !CV_IS_MAT(posVect))
3076         CV_Error(CV_StsUnsupportedFormat, "Input parameters must be a matrices!");
3077 
3078     if(projMatr->cols != 4 || projMatr->rows != 3)
3079         CV_Error(CV_StsUnmatchedSizes, "Size of projection matrix must be 3x4!");
3080 
3081     if(calibMatr->cols != 3 || calibMatr->rows != 3 || rotMatr->cols != 3 || rotMatr->rows != 3)
3082         CV_Error(CV_StsUnmatchedSizes, "Size of calibration and rotation matrices must be 3x3!");
3083 
3084     if(posVect->cols != 1 || posVect->rows != 4)
3085         CV_Error(CV_StsUnmatchedSizes, "Size of position vector must be 4x1!");
3086 
3087     /* Compute position vector. */
3088     cvSetZero(&tmpProjMatr); // Add zero row to make matrix square.
3089     int i, k;
3090     for(i = 0; i < 3; i++)
3091         for(k = 0; k < 4; k++)
3092             cvmSet(&tmpProjMatr, i, k, cvmGet(projMatr, i, k));
3093 
3094     cvSVD(&tmpProjMatr, &tmpMatrixD, NULL, &tmpMatrixV, CV_SVD_MODIFY_A + CV_SVD_V_T);
3095 
3096     /* Save position vector. */
3097     for(i = 0; i < 4; i++)
3098         cvmSet(posVect, i, 0, cvmGet(&tmpMatrixV, 3, i)); // Solution is last row of V.
3099 
3100     /* Compute calibration and rotation matrices via RQ decomposition. */
3101     cvGetCols(projMatr, &tmpMatrixM, 0, 3); // M is first square matrix of P.
3102 
3103     CV_Assert(cvDet(&tmpMatrixM) != 0.0); // So far only finite cameras could be decomposed, so M has to be nonsingular [det(M) != 0].
3104 
3105     cvRQDecomp3x3(&tmpMatrixM, calibMatr, rotMatr, rotMatrX, rotMatrY, rotMatrZ, eulerAngles);
3106 }
3107 
3108 
3109 
3110 namespace cv
3111 {
3112 
collectCalibrationData(InputArrayOfArrays objectPoints,InputArrayOfArrays imagePoints1,InputArrayOfArrays imagePoints2,Mat & objPtMat,Mat & imgPtMat1,Mat * imgPtMat2,Mat & npoints)3113 static void collectCalibrationData( InputArrayOfArrays objectPoints,
3114                                     InputArrayOfArrays imagePoints1,
3115                                     InputArrayOfArrays imagePoints2,
3116                                     Mat& objPtMat, Mat& imgPtMat1, Mat* imgPtMat2,
3117                                     Mat& npoints )
3118 {
3119     int nimages = (int)objectPoints.total();
3120     int total = 0;
3121     CV_Assert(nimages > 0);
3122     CV_CheckEQ(nimages, (int)imagePoints1.total(), "");
3123     if (imgPtMat2)
3124         CV_CheckEQ(nimages, (int)imagePoints2.total(), "");
3125 
3126     for (int i = 0; i < nimages; i++)
3127     {
3128         Mat objectPoint = objectPoints.getMat(i);
3129         if (objectPoint.empty())
3130             CV_Error(CV_StsBadSize, "objectPoints should not contain empty vector of vectors of points");
3131         int numberOfObjectPoints = objectPoint.checkVector(3, CV_32F);
3132         if (numberOfObjectPoints <= 0)
3133             CV_Error(CV_StsUnsupportedFormat, "objectPoints should contain vector of vectors of points of type Point3f");
3134 
3135         Mat imagePoint1 = imagePoints1.getMat(i);
3136         if (imagePoint1.empty())
3137             CV_Error(CV_StsBadSize, "imagePoints1 should not contain empty vector of vectors of points");
3138         int numberOfImagePoints = imagePoint1.checkVector(2, CV_32F);
3139         if (numberOfImagePoints <= 0)
3140             CV_Error(CV_StsUnsupportedFormat, "imagePoints1 should contain vector of vectors of points of type Point2f");
3141         CV_CheckEQ(numberOfObjectPoints, numberOfImagePoints, "Number of object and image points must be equal");
3142 
3143         total += numberOfObjectPoints;
3144     }
3145 
3146     npoints.create(1, (int)nimages, CV_32S);
3147     objPtMat.create(1, (int)total, CV_32FC3);
3148     imgPtMat1.create(1, (int)total, CV_32FC2);
3149     Point2f* imgPtData2 = 0;
3150 
3151     if (imgPtMat2)
3152     {
3153         imgPtMat2->create(1, (int)total, CV_32FC2);
3154         imgPtData2 = imgPtMat2->ptr<Point2f>();
3155     }
3156 
3157     Point3f* objPtData = objPtMat.ptr<Point3f>();
3158     Point2f* imgPtData1 = imgPtMat1.ptr<Point2f>();
3159 
3160     for (int i = 0, j = 0; i < nimages; i++)
3161     {
3162         Mat objpt = objectPoints.getMat(i);
3163         Mat imgpt1 = imagePoints1.getMat(i);
3164         int numberOfObjectPoints = objpt.checkVector(3, CV_32F);
3165         npoints.at<int>(i) = numberOfObjectPoints;
3166         for (int n = 0; n < numberOfObjectPoints; ++n)
3167         {
3168             objPtData[j + n] = objpt.ptr<Point3f>()[n];
3169             imgPtData1[j + n] = imgpt1.ptr<Point2f>()[n];
3170         }
3171 
3172         if (imgPtData2)
3173         {
3174             Mat imgpt2 = imagePoints2.getMat(i);
3175             int numberOfImage2Points = imgpt2.checkVector(2, CV_32F);
3176             CV_CheckEQ(numberOfObjectPoints, numberOfImage2Points, "Number of object and image(2) points must be equal");
3177             for (int n = 0; n < numberOfImage2Points; ++n)
3178             {
3179                 imgPtData2[j + n] = imgpt2.ptr<Point2f>()[n];
3180             }
3181         }
3182 
3183         j += numberOfObjectPoints;
3184     }
3185 }
3186 
prepareCameraMatrix(Mat & cameraMatrix0,int rtype)3187 static Mat prepareCameraMatrix(Mat& cameraMatrix0, int rtype)
3188 {
3189     Mat cameraMatrix = Mat::eye(3, 3, rtype);
3190     if( cameraMatrix0.size() == cameraMatrix.size() )
3191         cameraMatrix0.convertTo(cameraMatrix, rtype);
3192     return cameraMatrix;
3193 }
3194 
prepareDistCoeffs(Mat & distCoeffs0,int rtype,int outputSize=14)3195 static Mat prepareDistCoeffs(Mat& distCoeffs0, int rtype, int outputSize = 14)
3196 {
3197     CV_Assert((int)distCoeffs0.total() <= outputSize);
3198     Mat distCoeffs = Mat::zeros(distCoeffs0.cols == 1 ? Size(1, outputSize) : Size(outputSize, 1), rtype);
3199     if( distCoeffs0.size() == Size(1, 4) ||
3200        distCoeffs0.size() == Size(1, 5) ||
3201        distCoeffs0.size() == Size(1, 8) ||
3202        distCoeffs0.size() == Size(1, 12) ||
3203        distCoeffs0.size() == Size(1, 14) ||
3204        distCoeffs0.size() == Size(4, 1) ||
3205        distCoeffs0.size() == Size(5, 1) ||
3206        distCoeffs0.size() == Size(8, 1) ||
3207        distCoeffs0.size() == Size(12, 1) ||
3208        distCoeffs0.size() == Size(14, 1) )
3209     {
3210         Mat dstCoeffs(distCoeffs, Rect(0, 0, distCoeffs0.cols, distCoeffs0.rows));
3211         distCoeffs0.convertTo(dstCoeffs, rtype);
3212     }
3213     return distCoeffs;
3214 }
3215 
3216 } // namespace cv
3217 
3218 
Rodrigues(InputArray _src,OutputArray _dst,OutputArray _jacobian)3219 void cv::Rodrigues(InputArray _src, OutputArray _dst, OutputArray _jacobian)
3220 {
3221     CV_INSTRUMENT_REGION();
3222 
3223     Mat src = _src.getMat();
3224     const Size srcSz = src.size();
3225     CV_Check(srcSz, srcSz == Size(3, 1) || srcSz == Size(1, 3) ||
3226              (srcSz == Size(1, 1) && src.channels() == 3) ||
3227              srcSz == Size(3, 3),
3228              "Input matrix must be 1x3 or 3x1 for a rotation vector, or 3x3 for a rotation matrix");
3229 
3230     bool v2m = src.cols == 1 || src.rows == 1;
3231     _dst.create(3, v2m ? 3 : 1, src.depth());
3232     Mat dst = _dst.getMat();
3233     CvMat _csrc = cvMat(src), _cdst = cvMat(dst), _cjacobian;
3234     if( _jacobian.needed() )
3235     {
3236         _jacobian.create(v2m ? Size(9, 3) : Size(3, 9), src.depth());
3237         _cjacobian = cvMat(_jacobian.getMat());
3238     }
3239     bool ok = cvRodrigues2(&_csrc, &_cdst, _jacobian.needed() ? &_cjacobian : 0) > 0;
3240     if( !ok )
3241         dst = Scalar(0);
3242 }
3243 
matMulDeriv(InputArray _Amat,InputArray _Bmat,OutputArray _dABdA,OutputArray _dABdB)3244 void cv::matMulDeriv( InputArray _Amat, InputArray _Bmat,
3245                       OutputArray _dABdA, OutputArray _dABdB )
3246 {
3247     CV_INSTRUMENT_REGION();
3248 
3249     Mat A = _Amat.getMat(), B = _Bmat.getMat();
3250     _dABdA.create(A.rows*B.cols, A.rows*A.cols, A.type());
3251     _dABdB.create(A.rows*B.cols, B.rows*B.cols, A.type());
3252     Mat dABdA = _dABdA.getMat(), dABdB = _dABdB.getMat();
3253     CvMat matA = cvMat(A), matB = cvMat(B), c_dABdA = cvMat(dABdA), c_dABdB = cvMat(dABdB);
3254     cvCalcMatMulDeriv(&matA, &matB, &c_dABdA, &c_dABdB);
3255 }
3256 
3257 
composeRT(InputArray _rvec1,InputArray _tvec1,InputArray _rvec2,InputArray _tvec2,OutputArray _rvec3,OutputArray _tvec3,OutputArray _dr3dr1,OutputArray _dr3dt1,OutputArray _dr3dr2,OutputArray _dr3dt2,OutputArray _dt3dr1,OutputArray _dt3dt1,OutputArray _dt3dr2,OutputArray _dt3dt2)3258 void cv::composeRT( InputArray _rvec1, InputArray _tvec1,
3259                     InputArray _rvec2, InputArray _tvec2,
3260                     OutputArray _rvec3, OutputArray _tvec3,
3261                     OutputArray _dr3dr1, OutputArray _dr3dt1,
3262                     OutputArray _dr3dr2, OutputArray _dr3dt2,
3263                     OutputArray _dt3dr1, OutputArray _dt3dt1,
3264                     OutputArray _dt3dr2, OutputArray _dt3dt2 )
3265 {
3266     Mat rvec1 = _rvec1.getMat(), tvec1 = _tvec1.getMat();
3267     Mat rvec2 = _rvec2.getMat(), tvec2 = _tvec2.getMat();
3268     int rtype = rvec1.type();
3269     _rvec3.create(rvec1.size(), rtype);
3270     _tvec3.create(tvec1.size(), rtype);
3271     Mat rvec3 = _rvec3.getMat(), tvec3 = _tvec3.getMat();
3272 
3273     CvMat c_rvec1 = cvMat(rvec1), c_tvec1 = cvMat(tvec1), c_rvec2 = cvMat(rvec2),
3274           c_tvec2 = cvMat(tvec2), c_rvec3 = cvMat(rvec3), c_tvec3 = cvMat(tvec3);
3275     CvMat c_dr3dr1, c_dr3dt1, c_dr3dr2, c_dr3dt2, c_dt3dr1, c_dt3dt1, c_dt3dr2, c_dt3dt2;
3276     CvMat *p_dr3dr1=0, *p_dr3dt1=0, *p_dr3dr2=0, *p_dr3dt2=0, *p_dt3dr1=0, *p_dt3dt1=0, *p_dt3dr2=0, *p_dt3dt2=0;
3277 #define CV_COMPOSE_RT_PARAM(name) \
3278     Mat name; \
3279     if (_ ## name.needed())\
3280     { \
3281         _ ## name.create(3, 3, rtype); \
3282         name = _ ## name.getMat(); \
3283         p_ ## name = &(c_ ## name = cvMat(name)); \
3284     }
3285 
3286     CV_COMPOSE_RT_PARAM(dr3dr1); CV_COMPOSE_RT_PARAM(dr3dt1);
3287     CV_COMPOSE_RT_PARAM(dr3dr2); CV_COMPOSE_RT_PARAM(dr3dt2);
3288     CV_COMPOSE_RT_PARAM(dt3dr1); CV_COMPOSE_RT_PARAM(dt3dt1);
3289     CV_COMPOSE_RT_PARAM(dt3dr2); CV_COMPOSE_RT_PARAM(dt3dt2);
3290 #undef CV_COMPOSE_RT_PARAM
3291 
3292     cvComposeRT(&c_rvec1, &c_tvec1, &c_rvec2, &c_tvec2, &c_rvec3, &c_tvec3,
3293                 p_dr3dr1, p_dr3dt1, p_dr3dr2, p_dr3dt2,
3294                 p_dt3dr1, p_dt3dt1, p_dt3dr2, p_dt3dt2);
3295 }
3296 
3297 
projectPoints(InputArray _opoints,InputArray _rvec,InputArray _tvec,InputArray _cameraMatrix,InputArray _distCoeffs,OutputArray _ipoints,OutputArray _jacobian,double aspectRatio)3298 void cv::projectPoints( InputArray _opoints,
3299                         InputArray _rvec,
3300                         InputArray _tvec,
3301                         InputArray _cameraMatrix,
3302                         InputArray _distCoeffs,
3303                         OutputArray _ipoints,
3304                         OutputArray _jacobian,
3305                         double aspectRatio )
3306 {
3307     Mat opoints = _opoints.getMat();
3308     int npoints = opoints.checkVector(3), depth = opoints.depth();
3309     if (npoints < 0)
3310         opoints = opoints.t();
3311     npoints = opoints.checkVector(3);
3312     CV_Assert(npoints >= 0 && (depth == CV_32F || depth == CV_64F));
3313 
3314     if (opoints.cols == 3)
3315         opoints = opoints.reshape(3);
3316 
3317     CvMat dpdrot, dpdt, dpdf, dpdc, dpddist;
3318     CvMat *pdpdrot=0, *pdpdt=0, *pdpdf=0, *pdpdc=0, *pdpddist=0;
3319 
3320     _ipoints.create(npoints, 1, CV_MAKETYPE(depth, 2), -1, true);
3321     Mat imagePoints = _ipoints.getMat();
3322     CvMat c_imagePoints = cvMat(imagePoints);
3323     CvMat c_objectPoints = cvMat(opoints);
3324     Mat cameraMatrix = _cameraMatrix.getMat();
3325 
3326     Mat rvec = _rvec.getMat(), tvec = _tvec.getMat();
3327     CvMat c_cameraMatrix = cvMat(cameraMatrix);
3328     CvMat c_rvec = cvMat(rvec), c_tvec = cvMat(tvec);
3329 
3330     double dc0buf[5]={0};
3331     Mat dc0(5,1,CV_64F,dc0buf);
3332     Mat distCoeffs = _distCoeffs.getMat();
3333     if( distCoeffs.empty() )
3334         distCoeffs = dc0;
3335     CvMat c_distCoeffs = cvMat(distCoeffs);
3336     int ndistCoeffs = distCoeffs.rows + distCoeffs.cols - 1;
3337 
3338     Mat jacobian;
3339     if( _jacobian.needed() )
3340     {
3341         _jacobian.create(npoints*2, 3+3+2+2+ndistCoeffs, CV_64F);
3342         jacobian = _jacobian.getMat();
3343         pdpdrot = &(dpdrot = cvMat(jacobian.colRange(0, 3)));
3344         pdpdt = &(dpdt = cvMat(jacobian.colRange(3, 6)));
3345         pdpdf = &(dpdf = cvMat(jacobian.colRange(6, 8)));
3346         pdpdc = &(dpdc = cvMat(jacobian.colRange(8, 10)));
3347         pdpddist = &(dpddist = cvMat(jacobian.colRange(10, 10+ndistCoeffs)));
3348     }
3349 
3350     cvProjectPoints2( &c_objectPoints, &c_rvec, &c_tvec, &c_cameraMatrix, &c_distCoeffs,
3351                       &c_imagePoints, pdpdrot, pdpdt, pdpdf, pdpdc, pdpddist, aspectRatio );
3352 }
3353 
initCameraMatrix2D(InputArrayOfArrays objectPoints,InputArrayOfArrays imagePoints,Size imageSize,double aspectRatio)3354 cv::Mat cv::initCameraMatrix2D( InputArrayOfArrays objectPoints,
3355                                 InputArrayOfArrays imagePoints,
3356                                 Size imageSize, double aspectRatio )
3357 {
3358     CV_INSTRUMENT_REGION();
3359 
3360     Mat objPt, imgPt, npoints, cameraMatrix(3, 3, CV_64F);
3361     collectCalibrationData( objectPoints, imagePoints, noArray(),
3362                             objPt, imgPt, 0, npoints );
3363     CvMat _objPt = cvMat(objPt), _imgPt = cvMat(imgPt), _npoints = cvMat(npoints), _cameraMatrix = cvMat(cameraMatrix);
3364     cvInitIntrinsicParams2D( &_objPt, &_imgPt, &_npoints,
3365                              cvSize(imageSize), &_cameraMatrix, aspectRatio );
3366     return cameraMatrix;
3367 }
3368 
3369 
3370 
calibrateCamera(InputArrayOfArrays _objectPoints,InputArrayOfArrays _imagePoints,Size imageSize,InputOutputArray _cameraMatrix,InputOutputArray _distCoeffs,OutputArrayOfArrays _rvecs,OutputArrayOfArrays _tvecs,int flags,TermCriteria criteria)3371 double cv::calibrateCamera( InputArrayOfArrays _objectPoints,
3372                             InputArrayOfArrays _imagePoints,
3373                             Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
3374                             OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs, int flags, TermCriteria criteria )
3375 {
3376     CV_INSTRUMENT_REGION();
3377 
3378     return calibrateCamera(_objectPoints, _imagePoints, imageSize, _cameraMatrix, _distCoeffs,
3379                                          _rvecs, _tvecs, noArray(), noArray(), noArray(), flags, criteria);
3380 }
3381 
calibrateCamera(InputArrayOfArrays _objectPoints,InputArrayOfArrays _imagePoints,Size imageSize,InputOutputArray _cameraMatrix,InputOutputArray _distCoeffs,OutputArrayOfArrays _rvecs,OutputArrayOfArrays _tvecs,OutputArray stdDeviationsIntrinsics,OutputArray stdDeviationsExtrinsics,OutputArray _perViewErrors,int flags,TermCriteria criteria)3382 double cv::calibrateCamera(InputArrayOfArrays _objectPoints,
3383                             InputArrayOfArrays _imagePoints,
3384                             Size imageSize, InputOutputArray _cameraMatrix, InputOutputArray _distCoeffs,
3385                             OutputArrayOfArrays _rvecs, OutputArrayOfArrays _tvecs,
3386                             OutputArray stdDeviationsIntrinsics,
3387                             OutputArray stdDeviationsExtrinsics,
3388                             OutputArray _perViewErrors, int flags, TermCriteria criteria )
3389 {
3390     CV_INSTRUMENT_REGION();
3391 
3392     int rtype = CV_64F;
3393     Mat cameraMatrix = _cameraMatrix.getMat();
3394     cameraMatrix = prepareCameraMatrix(cameraMatrix, rtype);
3395     Mat distCoeffs = _distCoeffs.getMat();
3396     distCoeffs = (flags & CALIB_THIN_PRISM_MODEL) && !(flags & CALIB_TILTED_MODEL)  ? prepareDistCoeffs(distCoeffs, rtype, 12) :
3397                                                       prepareDistCoeffs(distCoeffs, rtype);
3398     if( !(flags & CALIB_RATIONAL_MODEL) &&
3399     (!(flags & CALIB_THIN_PRISM_MODEL)) &&
3400     (!(flags & CALIB_TILTED_MODEL)))
3401         distCoeffs = distCoeffs.rows == 1 ? distCoeffs.colRange(0, 5) : distCoeffs.rowRange(0, 5);
3402 
3403     int nimages = int(_objectPoints.total());
3404     CV_Assert( nimages > 0 );
3405     Mat objPt, imgPt, npoints, rvecM, tvecM, stdDeviationsM, errorsM;
3406 
3407     bool rvecs_needed = _rvecs.needed(), tvecs_needed = _tvecs.needed(),
3408             stddev_needed = stdDeviationsIntrinsics.needed(), errors_needed = _perViewErrors.needed(),
3409             stddev_ext_needed = stdDeviationsExtrinsics.needed();
3410 
3411     bool rvecs_mat_vec = _rvecs.isMatVector();
3412     bool tvecs_mat_vec = _tvecs.isMatVector();
3413 
3414     if( rvecs_needed )
3415     {
3416         _rvecs.create(nimages, 1, CV_64FC3);
3417 
3418         if(rvecs_mat_vec)
3419             rvecM.create(nimages, 3, CV_64F);
3420         else
3421             rvecM = _rvecs.getMat();
3422     }
3423 
3424     if( tvecs_needed )
3425     {
3426         _tvecs.create(nimages, 1, CV_64FC3);
3427 
3428         if(tvecs_mat_vec)
3429             tvecM.create(nimages, 3, CV_64F);
3430         else
3431             tvecM = _tvecs.getMat();
3432     }
3433 
3434     if( stddev_needed || stddev_ext_needed )
3435     {
3436         stdDeviationsM.create(nimages*6 + CV_CALIB_NINTRINSIC, 1, CV_64F);
3437     }
3438 
3439     if( errors_needed )
3440     {
3441         _perViewErrors.create(nimages, 1, CV_64F);
3442         errorsM = _perViewErrors.getMat();
3443     }
3444 
3445     collectCalibrationData( _objectPoints, _imagePoints, noArray(),
3446                             objPt, imgPt, 0, npoints );
3447     CvMat c_objPt = cvMat(objPt), c_imgPt = cvMat(imgPt), c_npoints = cvMat(npoints);
3448     CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
3449     CvMat c_rvecM = cvMat(rvecM), c_tvecM = cvMat(tvecM), c_stdDev = cvMat(stdDeviationsM), c_errors = cvMat(errorsM);
3450 
3451     double reprojErr = cvCalibrateCamera2Internal(&c_objPt, &c_imgPt, &c_npoints, cvSize(imageSize),
3452                                           &c_cameraMatrix, &c_distCoeffs,
3453                                           rvecs_needed ? &c_rvecM : NULL,
3454                                           tvecs_needed ? &c_tvecM : NULL,
3455                                           stddev_needed ? &c_stdDev : NULL,
3456                                           errors_needed ? &c_errors : NULL, flags, cvTermCriteria(criteria));
3457 
3458     if( stddev_needed )
3459     {
3460         stdDeviationsIntrinsics.create(CV_CALIB_NINTRINSIC, 1, CV_64F);
3461         Mat stdDeviationsIntrinsicsMat = stdDeviationsIntrinsics.getMat();
3462         std::memcpy(stdDeviationsIntrinsicsMat.ptr(), stdDeviationsM.ptr(),
3463                     CV_CALIB_NINTRINSIC*sizeof(double));
3464     }
3465 
3466     if ( stddev_ext_needed )
3467     {
3468         stdDeviationsExtrinsics.create(nimages*6, 1, CV_64F);
3469         Mat stdDeviationsExtrinsicsMat = stdDeviationsExtrinsics.getMat();
3470         std::memcpy(stdDeviationsExtrinsicsMat.ptr(),
3471                     stdDeviationsM.ptr() + CV_CALIB_NINTRINSIC*sizeof(double),
3472                     nimages*6*sizeof(double));
3473     }
3474 
3475     // overly complicated and inefficient rvec/ tvec handling to support vector<Mat>
3476     for(int i = 0; i < nimages; i++ )
3477     {
3478         if( rvecs_needed && rvecs_mat_vec)
3479         {
3480             _rvecs.create(3, 1, CV_64F, i, true);
3481             Mat rv = _rvecs.getMat(i);
3482             memcpy(rv.ptr(), rvecM.ptr(i), 3*sizeof(double));
3483         }
3484         if( tvecs_needed && tvecs_mat_vec)
3485         {
3486             _tvecs.create(3, 1, CV_64F, i, true);
3487             Mat tv = _tvecs.getMat(i);
3488             memcpy(tv.ptr(), tvecM.ptr(i), 3*sizeof(double));
3489         }
3490     }
3491 
3492     cameraMatrix.copyTo(_cameraMatrix);
3493     distCoeffs.copyTo(_distCoeffs);
3494 
3495     return reprojErr;
3496 }
3497 
3498 
calibrationMatrixValues(InputArray _cameraMatrix,Size imageSize,double apertureWidth,double apertureHeight,double & fovx,double & fovy,double & focalLength,Point2d & principalPoint,double & aspectRatio)3499 void cv::calibrationMatrixValues( InputArray _cameraMatrix, Size imageSize,
3500                                   double apertureWidth, double apertureHeight,
3501                                   double& fovx, double& fovy, double& focalLength,
3502                                   Point2d& principalPoint, double& aspectRatio )
3503 {
3504     CV_INSTRUMENT_REGION();
3505 
3506     if(_cameraMatrix.size() != Size(3, 3))
3507         CV_Error(CV_StsUnmatchedSizes, "Size of cameraMatrix must be 3x3!");
3508 
3509     Matx33d K = _cameraMatrix.getMat();
3510 
3511     CV_DbgAssert(imageSize.width != 0 && imageSize.height != 0 && K(0, 0) != 0.0 && K(1, 1) != 0.0);
3512 
3513     /* Calculate pixel aspect ratio. */
3514     aspectRatio = K(1, 1) / K(0, 0);
3515 
3516     /* Calculate number of pixel per realworld unit. */
3517     double mx, my;
3518     if(apertureWidth != 0.0 && apertureHeight != 0.0) {
3519         mx = imageSize.width / apertureWidth;
3520         my = imageSize.height / apertureHeight;
3521     } else {
3522         mx = 1.0;
3523         my = aspectRatio;
3524     }
3525 
3526     /* Calculate fovx and fovy. */
3527     fovx = atan2(K(0, 2), K(0, 0)) + atan2(imageSize.width  - K(0, 2), K(0, 0));
3528     fovy = atan2(K(1, 2), K(1, 1)) + atan2(imageSize.height - K(1, 2), K(1, 1));
3529     fovx *= 180.0 / CV_PI;
3530     fovy *= 180.0 / CV_PI;
3531 
3532     /* Calculate focal length. */
3533     focalLength = K(0, 0) / mx;
3534 
3535     /* Calculate principle point. */
3536     principalPoint = Point2d(K(0, 2) / mx, K(1, 2) / my);
3537 }
3538 
stereoCalibrate(InputArrayOfArrays _objectPoints,InputArrayOfArrays _imagePoints1,InputArrayOfArrays _imagePoints2,InputOutputArray _cameraMatrix1,InputOutputArray _distCoeffs1,InputOutputArray _cameraMatrix2,InputOutputArray _distCoeffs2,Size imageSize,OutputArray _Rmat,OutputArray _Tmat,OutputArray _Emat,OutputArray _Fmat,int flags,TermCriteria criteria)3539 double cv::stereoCalibrate( InputArrayOfArrays _objectPoints,
3540                           InputArrayOfArrays _imagePoints1,
3541                           InputArrayOfArrays _imagePoints2,
3542                           InputOutputArray _cameraMatrix1, InputOutputArray _distCoeffs1,
3543                           InputOutputArray _cameraMatrix2, InputOutputArray _distCoeffs2,
3544                           Size imageSize, OutputArray _Rmat, OutputArray _Tmat,
3545                           OutputArray _Emat, OutputArray _Fmat, int flags,
3546                           TermCriteria criteria)
3547 {
3548     if (flags & CALIB_USE_EXTRINSIC_GUESS)
3549         CV_Error(Error::StsBadFlag, "stereoCalibrate does not support CALIB_USE_EXTRINSIC_GUESS.");
3550 
3551     Mat Rmat, Tmat;
3552     double ret = stereoCalibrate(_objectPoints, _imagePoints1, _imagePoints2, _cameraMatrix1, _distCoeffs1,
3553                                  _cameraMatrix2, _distCoeffs2, imageSize, Rmat, Tmat, _Emat, _Fmat,
3554                                  noArray(), flags, criteria);
3555     Rmat.copyTo(_Rmat);
3556     Tmat.copyTo(_Tmat);
3557     return ret;
3558 }
3559 
stereoCalibrate(InputArrayOfArrays _objectPoints,InputArrayOfArrays _imagePoints1,InputArrayOfArrays _imagePoints2,InputOutputArray _cameraMatrix1,InputOutputArray _distCoeffs1,InputOutputArray _cameraMatrix2,InputOutputArray _distCoeffs2,Size imageSize,InputOutputArray _Rmat,InputOutputArray _Tmat,OutputArray _Emat,OutputArray _Fmat,OutputArray _perViewErrors,int flags,TermCriteria criteria)3560 double cv::stereoCalibrate( InputArrayOfArrays _objectPoints,
3561                           InputArrayOfArrays _imagePoints1,
3562                           InputArrayOfArrays _imagePoints2,
3563                           InputOutputArray _cameraMatrix1, InputOutputArray _distCoeffs1,
3564                           InputOutputArray _cameraMatrix2, InputOutputArray _distCoeffs2,
3565                           Size imageSize, InputOutputArray _Rmat, InputOutputArray _Tmat,
3566                           OutputArray _Emat, OutputArray _Fmat,
3567                           OutputArray _perViewErrors, int flags ,
3568                           TermCriteria criteria)
3569 {
3570     int rtype = CV_64F;
3571     Mat cameraMatrix1 = _cameraMatrix1.getMat();
3572     Mat cameraMatrix2 = _cameraMatrix2.getMat();
3573     Mat distCoeffs1 = _distCoeffs1.getMat();
3574     Mat distCoeffs2 = _distCoeffs2.getMat();
3575     cameraMatrix1 = prepareCameraMatrix(cameraMatrix1, rtype);
3576     cameraMatrix2 = prepareCameraMatrix(cameraMatrix2, rtype);
3577     distCoeffs1 = prepareDistCoeffs(distCoeffs1, rtype);
3578     distCoeffs2 = prepareDistCoeffs(distCoeffs2, rtype);
3579 
3580     if( !(flags & CALIB_RATIONAL_MODEL) &&
3581     (!(flags & CALIB_THIN_PRISM_MODEL)) &&
3582     (!(flags & CALIB_TILTED_MODEL)))
3583     {
3584         distCoeffs1 = distCoeffs1.rows == 1 ? distCoeffs1.colRange(0, 5) : distCoeffs1.rowRange(0, 5);
3585         distCoeffs2 = distCoeffs2.rows == 1 ? distCoeffs2.colRange(0, 5) : distCoeffs2.rowRange(0, 5);
3586     }
3587 
3588     if((flags & CALIB_USE_EXTRINSIC_GUESS) == 0)
3589     {
3590         _Rmat.create(3, 3, rtype);
3591         _Tmat.create(3, 1, rtype);
3592     }
3593 
3594     Mat objPt, imgPt, imgPt2, npoints;
3595 
3596     collectCalibrationData( _objectPoints, _imagePoints1, _imagePoints2,
3597                             objPt, imgPt, &imgPt2, npoints );
3598     CvMat c_objPt = cvMat(objPt), c_imgPt = cvMat(imgPt), c_imgPt2 = cvMat(imgPt2), c_npoints = cvMat(npoints);
3599     CvMat c_cameraMatrix1 = cvMat(cameraMatrix1), c_distCoeffs1 = cvMat(distCoeffs1);
3600     CvMat c_cameraMatrix2 = cvMat(cameraMatrix2), c_distCoeffs2 = cvMat(distCoeffs2);
3601     Mat matR_ = _Rmat.getMat(), matT_ = _Tmat.getMat();
3602     CvMat c_matR = cvMat(matR_), c_matT = cvMat(matT_), c_matE, c_matF, c_matErr;
3603 
3604     bool E_needed = _Emat.needed(), F_needed = _Fmat.needed(), errors_needed = _perViewErrors.needed();
3605 
3606     Mat matE_, matF_, matErr_;
3607     if( E_needed )
3608     {
3609         _Emat.create(3, 3, rtype);
3610         matE_ = _Emat.getMat();
3611         c_matE = cvMat(matE_);
3612     }
3613     if( F_needed )
3614     {
3615         _Fmat.create(3, 3, rtype);
3616         matF_ = _Fmat.getMat();
3617         c_matF = cvMat(matF_);
3618     }
3619 
3620     if( errors_needed )
3621     {
3622         int nimages = int(_objectPoints.total());
3623         _perViewErrors.create(nimages, 2, CV_64F);
3624         matErr_ = _perViewErrors.getMat();
3625         c_matErr = cvMat(matErr_);
3626     }
3627 
3628     double err = cvStereoCalibrateImpl(&c_objPt, &c_imgPt, &c_imgPt2, &c_npoints, &c_cameraMatrix1,
3629                                        &c_distCoeffs1, &c_cameraMatrix2, &c_distCoeffs2, cvSize(imageSize), &c_matR,
3630                                        &c_matT, E_needed ? &c_matE : NULL, F_needed ? &c_matF : NULL,
3631                                        errors_needed ? &c_matErr : NULL, flags, cvTermCriteria(criteria));
3632 
3633     cameraMatrix1.copyTo(_cameraMatrix1);
3634     cameraMatrix2.copyTo(_cameraMatrix2);
3635     distCoeffs1.copyTo(_distCoeffs1);
3636     distCoeffs2.copyTo(_distCoeffs2);
3637 
3638     return err;
3639 }
3640 
3641 
stereoRectify(InputArray _cameraMatrix1,InputArray _distCoeffs1,InputArray _cameraMatrix2,InputArray _distCoeffs2,Size imageSize,InputArray _Rmat,InputArray _Tmat,OutputArray _Rmat1,OutputArray _Rmat2,OutputArray _Pmat1,OutputArray _Pmat2,OutputArray _Qmat,int flags,double alpha,Size newImageSize,Rect * validPixROI1,Rect * validPixROI2)3642 void cv::stereoRectify( InputArray _cameraMatrix1, InputArray _distCoeffs1,
3643                         InputArray _cameraMatrix2, InputArray _distCoeffs2,
3644                         Size imageSize, InputArray _Rmat, InputArray _Tmat,
3645                         OutputArray _Rmat1, OutputArray _Rmat2,
3646                         OutputArray _Pmat1, OutputArray _Pmat2,
3647                         OutputArray _Qmat, int flags,
3648                         double alpha, Size newImageSize,
3649                         Rect* validPixROI1, Rect* validPixROI2 )
3650 {
3651     Mat cameraMatrix1 = _cameraMatrix1.getMat(), cameraMatrix2 = _cameraMatrix2.getMat();
3652     Mat distCoeffs1 = _distCoeffs1.getMat(), distCoeffs2 = _distCoeffs2.getMat();
3653     Mat Rmat = _Rmat.getMat(), Tmat = _Tmat.getMat();
3654     CvMat c_cameraMatrix1 = cvMat(cameraMatrix1);
3655     CvMat c_cameraMatrix2 = cvMat(cameraMatrix2);
3656     CvMat c_distCoeffs1 = cvMat(distCoeffs1);
3657     CvMat c_distCoeffs2 = cvMat(distCoeffs2);
3658     CvMat c_R = cvMat(Rmat), c_T = cvMat(Tmat);
3659 
3660     int rtype = CV_64F;
3661     _Rmat1.create(3, 3, rtype);
3662     _Rmat2.create(3, 3, rtype);
3663     _Pmat1.create(3, 4, rtype);
3664     _Pmat2.create(3, 4, rtype);
3665     Mat R1 = _Rmat1.getMat(), R2 = _Rmat2.getMat(), P1 = _Pmat1.getMat(), P2 = _Pmat2.getMat(), Q;
3666     CvMat c_R1 = cvMat(R1), c_R2 = cvMat(R2), c_P1 = cvMat(P1), c_P2 = cvMat(P2);
3667     CvMat c_Q, *p_Q = 0;
3668 
3669     if( _Qmat.needed() )
3670     {
3671         _Qmat.create(4, 4, rtype);
3672         p_Q = &(c_Q = cvMat(Q = _Qmat.getMat()));
3673     }
3674 
3675     CvMat *p_distCoeffs1 = distCoeffs1.empty() ? NULL : &c_distCoeffs1;
3676     CvMat *p_distCoeffs2 = distCoeffs2.empty() ? NULL : &c_distCoeffs2;
3677     cvStereoRectify( &c_cameraMatrix1, &c_cameraMatrix2, p_distCoeffs1, p_distCoeffs2,
3678         cvSize(imageSize), &c_R, &c_T, &c_R1, &c_R2, &c_P1, &c_P2, p_Q, flags, alpha,
3679         cvSize(newImageSize), (CvRect*)validPixROI1, (CvRect*)validPixROI2);
3680 }
3681 
stereoRectifyUncalibrated(InputArray _points1,InputArray _points2,InputArray _Fmat,Size imgSize,OutputArray _Hmat1,OutputArray _Hmat2,double threshold)3682 bool cv::stereoRectifyUncalibrated( InputArray _points1, InputArray _points2,
3683                                     InputArray _Fmat, Size imgSize,
3684                                     OutputArray _Hmat1, OutputArray _Hmat2, double threshold )
3685 {
3686     CV_INSTRUMENT_REGION();
3687 
3688     int rtype = CV_64F;
3689     _Hmat1.create(3, 3, rtype);
3690     _Hmat2.create(3, 3, rtype);
3691     Mat F = _Fmat.getMat();
3692     Mat points1 = _points1.getMat(), points2 = _points2.getMat();
3693     CvMat c_pt1 = cvMat(points1), c_pt2 = cvMat(points2);
3694     Mat H1 = _Hmat1.getMat(), H2 = _Hmat2.getMat();
3695     CvMat c_F, *p_F=0, c_H1 = cvMat(H1), c_H2 = cvMat(H2);
3696     if( F.size() == Size(3, 3) )
3697         p_F = &(c_F = cvMat(F));
3698     return cvStereoRectifyUncalibrated(&c_pt1, &c_pt2, p_F, cvSize(imgSize), &c_H1, &c_H2, threshold) > 0;
3699 }
3700 
getOptimalNewCameraMatrix(InputArray _cameraMatrix,InputArray _distCoeffs,Size imgSize,double alpha,Size newImgSize,Rect * validPixROI,bool centerPrincipalPoint)3701 cv::Mat cv::getOptimalNewCameraMatrix( InputArray _cameraMatrix,
3702                                        InputArray _distCoeffs,
3703                                        Size imgSize, double alpha, Size newImgSize,
3704                                        Rect* validPixROI, bool centerPrincipalPoint )
3705 {
3706     CV_INSTRUMENT_REGION();
3707 
3708     Mat cameraMatrix = _cameraMatrix.getMat(), distCoeffs = _distCoeffs.getMat();
3709     CvMat c_cameraMatrix = cvMat(cameraMatrix), c_distCoeffs = cvMat(distCoeffs);
3710 
3711     Mat newCameraMatrix(3, 3, CV_MAT_TYPE(c_cameraMatrix.type));
3712     CvMat c_newCameraMatrix = cvMat(newCameraMatrix);
3713 
3714     cvGetOptimalNewCameraMatrix(&c_cameraMatrix, &c_distCoeffs, cvSize(imgSize),
3715                                 alpha, &c_newCameraMatrix,
3716                                 cvSize(newImgSize), (CvRect*)validPixROI, (int)centerPrincipalPoint);
3717     return newCameraMatrix;
3718 }
3719 
3720 
RQDecomp3x3(InputArray _Mmat,OutputArray _Rmat,OutputArray _Qmat,OutputArray _Qx,OutputArray _Qy,OutputArray _Qz)3721 cv::Vec3d cv::RQDecomp3x3( InputArray _Mmat,
3722                            OutputArray _Rmat,
3723                            OutputArray _Qmat,
3724                            OutputArray _Qx,
3725                            OutputArray _Qy,
3726                            OutputArray _Qz )
3727 {
3728     CV_INSTRUMENT_REGION();
3729 
3730     Mat M = _Mmat.getMat();
3731     _Rmat.create(3, 3, M.type());
3732     _Qmat.create(3, 3, M.type());
3733     Mat Rmat = _Rmat.getMat();
3734     Mat Qmat = _Qmat.getMat();
3735     Vec3d eulerAngles;
3736 
3737     CvMat matM = cvMat(M), matR = cvMat(Rmat), matQ = cvMat(Qmat);
3738 #define CV_RQDecomp3x3_PARAM(name) \
3739     Mat name; \
3740     CvMat c_ ## name, *p ## name = NULL; \
3741     if( _ ## name.needed() ) \
3742     { \
3743         _ ## name.create(3, 3, M.type()); \
3744         name = _ ## name.getMat(); \
3745         c_ ## name = cvMat(name); p ## name = &c_ ## name; \
3746     }
3747 
3748     CV_RQDecomp3x3_PARAM(Qx);
3749     CV_RQDecomp3x3_PARAM(Qy);
3750     CV_RQDecomp3x3_PARAM(Qz);
3751 #undef CV_RQDecomp3x3_PARAM
3752     cvRQDecomp3x3(&matM, &matR, &matQ, pQx, pQy, pQz, (CvPoint3D64f*)&eulerAngles[0]);
3753     return eulerAngles;
3754 }
3755 
3756 
decomposeProjectionMatrix(InputArray _projMatrix,OutputArray _cameraMatrix,OutputArray _rotMatrix,OutputArray _transVect,OutputArray _rotMatrixX,OutputArray _rotMatrixY,OutputArray _rotMatrixZ,OutputArray _eulerAngles)3757 void cv::decomposeProjectionMatrix( InputArray _projMatrix, OutputArray _cameraMatrix,
3758                                     OutputArray _rotMatrix, OutputArray _transVect,
3759                                     OutputArray _rotMatrixX, OutputArray _rotMatrixY,
3760                                     OutputArray _rotMatrixZ, OutputArray _eulerAngles )
3761 {
3762     CV_INSTRUMENT_REGION();
3763 
3764     Mat projMatrix = _projMatrix.getMat();
3765     int type = projMatrix.type();
3766     _cameraMatrix.create(3, 3, type);
3767     _rotMatrix.create(3, 3, type);
3768     _transVect.create(4, 1, type);
3769     Mat cameraMatrix = _cameraMatrix.getMat();
3770     Mat rotMatrix = _rotMatrix.getMat();
3771     Mat transVect = _transVect.getMat();
3772     CvMat c_projMatrix = cvMat(projMatrix), c_cameraMatrix = cvMat(cameraMatrix);
3773     CvMat c_rotMatrix = cvMat(rotMatrix), c_transVect = cvMat(transVect);
3774     CvPoint3D64f *p_eulerAngles = 0;
3775 
3776 #define CV_decomposeProjectionMatrix_PARAM(name) \
3777     Mat name; \
3778     CvMat c_ ## name, *p_ ## name = NULL; \
3779     if( _ ## name.needed() ) \
3780     { \
3781         _ ## name.create(3, 3, type); \
3782         name = _ ## name.getMat(); \
3783         c_ ## name = cvMat(name); p_ ## name = &c_ ## name; \
3784     }
3785 
3786     CV_decomposeProjectionMatrix_PARAM(rotMatrixX);
3787     CV_decomposeProjectionMatrix_PARAM(rotMatrixY);
3788     CV_decomposeProjectionMatrix_PARAM(rotMatrixZ);
3789 #undef CV_decomposeProjectionMatrix_PARAM
3790 
3791     if( _eulerAngles.needed() )
3792     {
3793         _eulerAngles.create(3, 1, CV_64F, -1, true);
3794         p_eulerAngles = _eulerAngles.getMat().ptr<CvPoint3D64f>();
3795     }
3796 
3797     cvDecomposeProjectionMatrix(&c_projMatrix, &c_cameraMatrix, &c_rotMatrix,
3798                                 &c_transVect, p_rotMatrixX, p_rotMatrixY,
3799                                 p_rotMatrixZ, p_eulerAngles);
3800 }
3801 
3802 
3803 namespace cv
3804 {
3805 
adjust3rdMatrix(InputArrayOfArrays _imgpt1_0,InputArrayOfArrays _imgpt3_0,const Mat & cameraMatrix1,const Mat & distCoeffs1,const Mat & cameraMatrix3,const Mat & distCoeffs3,const Mat & R1,const Mat & R3,const Mat & P1,Mat & P3)3806 static void adjust3rdMatrix(InputArrayOfArrays _imgpt1_0,
3807                             InputArrayOfArrays _imgpt3_0,
3808                             const Mat& cameraMatrix1, const Mat& distCoeffs1,
3809                             const Mat& cameraMatrix3, const Mat& distCoeffs3,
3810                             const Mat& R1, const Mat& R3, const Mat& P1, Mat& P3 )
3811 {
3812     size_t n1 = _imgpt1_0.total(), n3 = _imgpt3_0.total();
3813     std::vector<Point2f> imgpt1, imgpt3;
3814 
3815     for( int i = 0; i < (int)std::min(n1, n3); i++ )
3816     {
3817         Mat pt1 = _imgpt1_0.getMat(i), pt3 = _imgpt3_0.getMat(i);
3818         int ni1 = pt1.checkVector(2, CV_32F), ni3 = pt3.checkVector(2, CV_32F);
3819         CV_Assert( ni1 > 0 && ni1 == ni3 );
3820         const Point2f* pt1data = pt1.ptr<Point2f>();
3821         const Point2f* pt3data = pt3.ptr<Point2f>();
3822         std::copy(pt1data, pt1data + ni1, std::back_inserter(imgpt1));
3823         std::copy(pt3data, pt3data + ni3, std::back_inserter(imgpt3));
3824     }
3825 
3826     undistortPoints(imgpt1, imgpt1, cameraMatrix1, distCoeffs1, R1, P1);
3827     undistortPoints(imgpt3, imgpt3, cameraMatrix3, distCoeffs3, R3, P3);
3828 
3829     double y1_ = 0, y2_ = 0, y1y1_ = 0, y1y2_ = 0;
3830     size_t n = imgpt1.size();
3831 
3832     for( size_t i = 0; i < n; i++ )
3833     {
3834         double y1 = imgpt3[i].y, y2 = imgpt1[i].y;
3835 
3836         y1_ += y1; y2_ += y2;
3837         y1y1_ += y1*y1; y1y2_ += y1*y2;
3838     }
3839 
3840     y1_ /= n;
3841     y2_ /= n;
3842     y1y1_ /= n;
3843     y1y2_ /= n;
3844 
3845     double a = (y1y2_ - y1_*y2_)/(y1y1_ - y1_*y1_);
3846     double b = y2_ - a*y1_;
3847 
3848     P3.at<double>(0,0) *= a;
3849     P3.at<double>(1,1) *= a;
3850     P3.at<double>(0,2) = P3.at<double>(0,2)*a;
3851     P3.at<double>(1,2) = P3.at<double>(1,2)*a + b;
3852     P3.at<double>(0,3) *= a;
3853     P3.at<double>(1,3) *= a;
3854 }
3855 
3856 }
3857 
rectify3Collinear(InputArray _cameraMatrix1,InputArray _distCoeffs1,InputArray _cameraMatrix2,InputArray _distCoeffs2,InputArray _cameraMatrix3,InputArray _distCoeffs3,InputArrayOfArrays _imgpt1,InputArrayOfArrays _imgpt3,Size imageSize,InputArray _Rmat12,InputArray _Tmat12,InputArray _Rmat13,InputArray _Tmat13,OutputArray _Rmat1,OutputArray _Rmat2,OutputArray _Rmat3,OutputArray _Pmat1,OutputArray _Pmat2,OutputArray _Pmat3,OutputArray _Qmat,double alpha,Size newImgSize,Rect * roi1,Rect * roi2,int flags)3858 float cv::rectify3Collinear( InputArray _cameraMatrix1, InputArray _distCoeffs1,
3859                    InputArray _cameraMatrix2, InputArray _distCoeffs2,
3860                    InputArray _cameraMatrix3, InputArray _distCoeffs3,
3861                    InputArrayOfArrays _imgpt1,
3862                    InputArrayOfArrays _imgpt3,
3863                    Size imageSize, InputArray _Rmat12, InputArray _Tmat12,
3864                    InputArray _Rmat13, InputArray _Tmat13,
3865                    OutputArray _Rmat1, OutputArray _Rmat2, OutputArray _Rmat3,
3866                    OutputArray _Pmat1, OutputArray _Pmat2, OutputArray _Pmat3,
3867                    OutputArray _Qmat,
3868                    double alpha, Size newImgSize,
3869                    Rect* roi1, Rect* roi2, int flags )
3870 {
3871     // first, rectify the 1-2 stereo pair
3872     stereoRectify( _cameraMatrix1, _distCoeffs1, _cameraMatrix2, _distCoeffs2,
3873                    imageSize, _Rmat12, _Tmat12, _Rmat1, _Rmat2, _Pmat1, _Pmat2, _Qmat,
3874                    flags, alpha, newImgSize, roi1, roi2 );
3875 
3876     Mat R12 = _Rmat12.getMat(), R13 = _Rmat13.getMat(), T12 = _Tmat12.getMat(), T13 = _Tmat13.getMat();
3877 
3878     _Rmat3.create(3, 3, CV_64F);
3879     _Pmat3.create(3, 4, CV_64F);
3880 
3881     Mat P1 = _Pmat1.getMat(), P2 = _Pmat2.getMat();
3882     Mat R3 = _Rmat3.getMat(), P3 = _Pmat3.getMat();
3883 
3884     // recompute rectification transforms for cameras 1 & 2.
3885     Mat om, r_r, r_r13;
3886 
3887     if( R13.size() != Size(3,3) )
3888         Rodrigues(R13, r_r13);
3889     else
3890         R13.copyTo(r_r13);
3891 
3892     if( R12.size() == Size(3,3) )
3893         Rodrigues(R12, om);
3894     else
3895         R12.copyTo(om);
3896 
3897     om *= -0.5;
3898     Rodrigues(om, r_r); // rotate cameras to same orientation by averaging
3899     Mat_<double> t12 = r_r * T12;
3900 
3901     int idx = fabs(t12(0,0)) > fabs(t12(1,0)) ? 0 : 1;
3902     double c = t12(idx,0), nt = norm(t12, CV_L2);
3903     CV_Assert(fabs(nt) > 0);
3904     Mat_<double> uu = Mat_<double>::zeros(3,1);
3905     uu(idx, 0) = c > 0 ? 1 : -1;
3906 
3907     // calculate global Z rotation
3908     Mat_<double> ww = t12.cross(uu), wR;
3909     double nw = norm(ww, CV_L2);
3910     CV_Assert(fabs(nw) > 0);
3911     ww *= acos(fabs(c)/nt)/nw;
3912     Rodrigues(ww, wR);
3913 
3914     // now rotate camera 3 to make its optical axis parallel to cameras 1 and 2.
3915     R3 = wR*r_r.t()*r_r13.t();
3916     Mat_<double> t13 = R3 * T13;
3917 
3918     P2.copyTo(P3);
3919     Mat t = P3.col(3);
3920     t13.copyTo(t);
3921     P3.at<double>(0,3) *= P3.at<double>(0,0);
3922     P3.at<double>(1,3) *= P3.at<double>(1,1);
3923 
3924     if( !_imgpt1.empty() && !_imgpt3.empty() )
3925         adjust3rdMatrix(_imgpt1, _imgpt3, _cameraMatrix1.getMat(), _distCoeffs1.getMat(),
3926                         _cameraMatrix3.getMat(), _distCoeffs3.getMat(), _Rmat1.getMat(), R3, P1, P3);
3927 
3928     return (float)((P3.at<double>(idx,3)/P3.at<double>(idx,idx))/
3929                    (P2.at<double>(idx,3)/P2.at<double>(idx,idx)));
3930 }
3931 
3932 
3933 /* End of file. */
3934