• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                        Intel License Agreement
11 //                For Open Source Computer Vision Library
12 //
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
15 //
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
18 //
19 //   * Redistribution's of source code must retain the above copyright notice,
20 //     this list of conditions and the following disclaimer.
21 //
22 //   * Redistribution's in binary form must reproduce the above copyright notice,
23 //     this list of conditions and the following disclaimer in the documentation
24 //     and/or other materials provided with the distribution.
25 //
26 //   * The name of Intel Corporation may not be used to endorse or promote products
27 //     derived from this software without specific prior written permission.
28 //
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
39 //
40 //M*/
41 
42 #ifndef __CVAUX__H__
43 #define __CVAUX__H__
44 
45 #include "cv.h"
46 
47 #ifdef __cplusplus
48 extern "C" {
49 #endif
50 
51 CVAPI(CvSeq*) cvSegmentImage( const CvArr* srcarr, CvArr* dstarr,
52                                     double canny_threshold,
53                                     double ffill_threshold,
54                                     CvMemStorage* storage );
55 
56 /****************************************************************************************\
57 *                                  Eigen objects                                         *
58 \****************************************************************************************/
59 
60 typedef int (CV_CDECL * CvCallback)(int index, void* buffer, void* user_data);
61 typedef union
62 {
63     CvCallback callback;
64     void* data;
65 }
66 CvInput;
67 
68 #define CV_EIGOBJ_NO_CALLBACK     0
69 #define CV_EIGOBJ_INPUT_CALLBACK  1
70 #define CV_EIGOBJ_OUTPUT_CALLBACK 2
71 #define CV_EIGOBJ_BOTH_CALLBACK   3
72 
73 /* Calculates covariation matrix of a set of arrays */
74 CVAPI(void)  cvCalcCovarMatrixEx( int nObjects, void* input, int ioFlags,
75                                   int ioBufSize, uchar* buffer, void* userData,
76                                   IplImage* avg, float* covarMatrix );
77 
78 /* Calculates eigen values and vectors of covariation matrix of a set of
79    arrays */
80 CVAPI(void)  cvCalcEigenObjects( int nObjects, void* input, void* output,
81                                  int ioFlags, int ioBufSize, void* userData,
82                                  CvTermCriteria* calcLimit, IplImage* avg,
83                                  float* eigVals );
84 
85 /* Calculates dot product (obj - avg) * eigObj (i.e. projects image to eigen vector) */
86 CVAPI(double)  cvCalcDecompCoeff( IplImage* obj, IplImage* eigObj, IplImage* avg );
87 
88 /* Projects image to eigen space (finds all decomposion coefficients */
89 CVAPI(void)  cvEigenDecomposite( IplImage* obj, int nEigObjs, void* eigInput,
90                                  int ioFlags, void* userData, IplImage* avg,
91                                  float* coeffs );
92 
93 /* Projects original objects used to calculate eigen space basis to that space */
94 CVAPI(void)  cvEigenProjection( void* eigInput, int nEigObjs, int ioFlags,
95                                 void* userData, float* coeffs, IplImage* avg,
96                                 IplImage* proj );
97 
98 /****************************************************************************************\
99 *                                       1D/2D HMM                                        *
100 \****************************************************************************************/
101 
102 typedef struct CvImgObsInfo
103 {
104     int obs_x;
105     int obs_y;
106     int obs_size;
107     float* obs;//consequtive observations
108 
109     int* state;/* arr of pairs superstate/state to which observation belong */
110     int* mix;  /* number of mixture to which observation belong */
111 
112 }
113 CvImgObsInfo;/*struct for 1 image*/
114 
115 typedef CvImgObsInfo Cv1DObsInfo;
116 
117 typedef struct CvEHMMState
118 {
119     int num_mix;        /*number of mixtures in this state*/
120     float* mu;          /*mean vectors corresponding to each mixture*/
121     float* inv_var;     /* square root of inversed variances corresp. to each mixture*/
122     float* log_var_val; /* sum of 0.5 (LN2PI + ln(variance[i]) ) for i=1,n */
123     float* weight;      /*array of mixture weights. Summ of all weights in state is 1. */
124 
125 }
126 CvEHMMState;
127 
128 typedef struct CvEHMM
129 {
130     int level; /* 0 - lowest(i.e its states are real states), ..... */
131     int num_states; /* number of HMM states */
132     float*  transP;/*transition probab. matrices for states */
133     float** obsProb; /* if level == 0 - array of brob matrices corresponding to hmm
134                         if level == 1 - martix of matrices */
135     union
136     {
137         CvEHMMState* state; /* if level == 0 points to real states array,
138                                if not - points to embedded hmms */
139         struct CvEHMM* ehmm; /* pointer to an embedded model or NULL, if it is a leaf */
140     } u;
141 
142 }
143 CvEHMM;
144 
145 /*CVAPI(int)  icvCreate1DHMM( CvEHMM** this_hmm,
146                                    int state_number, int* num_mix, int obs_size );
147 
148 CVAPI(int)  icvRelease1DHMM( CvEHMM** phmm );
149 
150 CVAPI(int)  icvUniform1DSegm( Cv1DObsInfo* obs_info, CvEHMM* hmm );
151 
152 CVAPI(int)  icvInit1DMixSegm( Cv1DObsInfo** obs_info_array, int num_img, CvEHMM* hmm);
153 
154 CVAPI(int)  icvEstimate1DHMMStateParams( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm);
155 
156 CVAPI(int)  icvEstimate1DObsProb( CvImgObsInfo* obs_info, CvEHMM* hmm );
157 
158 CVAPI(int)  icvEstimate1DTransProb( Cv1DObsInfo** obs_info_array,
159                                            int num_seq,
160                                            CvEHMM* hmm );
161 
162 CVAPI(float)  icvViterbi( Cv1DObsInfo* obs_info, CvEHMM* hmm);
163 
164 CVAPI(int)  icv1DMixSegmL2( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm );*/
165 
166 /*********************************** Embedded HMMs *************************************/
167 
168 /* Creates 2D HMM */
169 CVAPI(CvEHMM*)  cvCreate2DHMM( int* stateNumber, int* numMix, int obsSize );
170 
171 /* Releases HMM */
172 CVAPI(void)  cvRelease2DHMM( CvEHMM** hmm );
173 
174 #define CV_COUNT_OBS(roi, win, delta, numObs )                                       \
175 {                                                                                    \
176    (numObs)->width  =((roi)->width  -(win)->width  +(delta)->width)/(delta)->width;  \
177    (numObs)->height =((roi)->height -(win)->height +(delta)->height)/(delta)->height;\
178 }
179 
180 /* Creates storage for observation vectors */
181 CVAPI(CvImgObsInfo*)  cvCreateObsInfo( CvSize numObs, int obsSize );
182 
183 /* Releases storage for observation vectors */
184 CVAPI(void)  cvReleaseObsInfo( CvImgObsInfo** obs_info );
185 
186 
187 /* The function takes an image on input and and returns the sequnce of observations
188    to be used with an embedded HMM; Each observation is top-left block of DCT
189    coefficient matrix */
190 CVAPI(void)  cvImgToObs_DCT( const CvArr* arr, float* obs, CvSize dctSize,
191                              CvSize obsSize, CvSize delta );
192 
193 
194 /* Uniformly segments all observation vectors extracted from image */
195 CVAPI(void)  cvUniformImgSegm( CvImgObsInfo* obs_info, CvEHMM* ehmm );
196 
197 /* Does mixture segmentation of the states of embedded HMM */
198 CVAPI(void)  cvInitMixSegm( CvImgObsInfo** obs_info_array,
199                             int num_img, CvEHMM* hmm );
200 
201 /* Function calculates means, variances, weights of every Gaussian mixture
202    of every low-level state of embedded HMM */
203 CVAPI(void)  cvEstimateHMMStateParams( CvImgObsInfo** obs_info_array,
204                                        int num_img, CvEHMM* hmm );
205 
206 /* Function computes transition probability matrices of embedded HMM
207    given observations segmentation */
208 CVAPI(void)  cvEstimateTransProb( CvImgObsInfo** obs_info_array,
209                                   int num_img, CvEHMM* hmm );
210 
211 /* Function computes probabilities of appearing observations at any state
212    (i.e. computes P(obs|state) for every pair(obs,state)) */
213 CVAPI(void)  cvEstimateObsProb( CvImgObsInfo* obs_info,
214                                 CvEHMM* hmm );
215 
216 /* Runs Viterbi algorithm for embedded HMM */
217 CVAPI(float)  cvEViterbi( CvImgObsInfo* obs_info, CvEHMM* hmm );
218 
219 
220 /* Function clusters observation vectors from several images
221    given observations segmentation.
222    Euclidean distance used for clustering vectors.
223    Centers of clusters are given means of every mixture */
224 CVAPI(void)  cvMixSegmL2( CvImgObsInfo** obs_info_array,
225                           int num_img, CvEHMM* hmm );
226 
227 /****************************************************************************************\
228 *               A few functions from old stereo gesture recognition demosions            *
229 \****************************************************************************************/
230 
231 /* Creates hand mask image given several points on the hand */
232 CVAPI(void)  cvCreateHandMask( CvSeq* hand_points,
233                                    IplImage *img_mask, CvRect *roi);
234 
235 /* Finds hand region in range image data */
236 CVAPI(void)  cvFindHandRegion (CvPoint3D32f* points, int count,
237                                 CvSeq* indexs,
238                                 float* line, CvSize2D32f size, int flag,
239                                 CvPoint3D32f* center,
240                                 CvMemStorage* storage, CvSeq **numbers);
241 
242 /* Finds hand region in range image data (advanced version) */
243 CVAPI(void)  cvFindHandRegionA( CvPoint3D32f* points, int count,
244                                 CvSeq* indexs,
245                                 float* line, CvSize2D32f size, int jc,
246                                 CvPoint3D32f* center,
247                                 CvMemStorage* storage, CvSeq **numbers);
248 
249 /****************************************************************************************\
250 *                           Additional operations on Subdivisions                        *
251 \****************************************************************************************/
252 
253 // paints voronoi diagram: just demo function
254 CVAPI(void)  icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst );
255 
256 // checks planar subdivision for correctness. It is not an absolute check,
257 // but it verifies some relations between quad-edges
258 CVAPI(int)   icvSubdiv2DCheck( CvSubdiv2D* subdiv );
259 
260 // returns squared distance between two 2D points with floating-point coordinates.
icvSqDist2D32f(CvPoint2D32f pt1,CvPoint2D32f pt2)261 CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 )
262 {
263     double dx = pt1.x - pt2.x;
264     double dy = pt1.y - pt2.y;
265 
266     return dx*dx + dy*dy;
267 }
268 
269 
270 /****************************************************************************************\
271 *                           More operations on sequences                                 *
272 \****************************************************************************************/
273 
274 /*****************************************************************************************/
275 
276 #define CV_CURRENT_INT( reader ) (*((int *)(reader).ptr))
277 #define CV_PREV_INT( reader ) (*((int *)(reader).prev_elem))
278 
279 #define  CV_GRAPH_WEIGHTED_VERTEX_FIELDS() CV_GRAPH_VERTEX_FIELDS()\
280     float weight;
281 
282 #define  CV_GRAPH_WEIGHTED_EDGE_FIELDS() CV_GRAPH_EDGE_FIELDS()
283 
284 typedef struct CvGraphWeightedVtx
285 {
286     CV_GRAPH_WEIGHTED_VERTEX_FIELDS()
287 }
288 CvGraphWeightedVtx;
289 
290 typedef struct CvGraphWeightedEdge
291 {
292     CV_GRAPH_WEIGHTED_EDGE_FIELDS()
293 }
294 CvGraphWeightedEdge;
295 
296 typedef enum CvGraphWeightType
297 {
298     CV_NOT_WEIGHTED,
299     CV_WEIGHTED_VTX,
300     CV_WEIGHTED_EDGE,
301     CV_WEIGHTED_ALL
302 } CvGraphWeightType;
303 
304 
305 /*****************************************************************************************/
306 
307 
308 /*******************************Stereo correspondence*************************************/
309 
310 typedef struct CvCliqueFinder
311 {
312     CvGraph* graph;
313     int**    adj_matr;
314     int N; //graph size
315 
316     // stacks, counters etc/
317     int k; //stack size
318     int* current_comp;
319     int** All;
320 
321     int* ne;
322     int* ce;
323     int* fixp; //node with minimal disconnections
324     int* nod;
325     int* s; //for selected candidate
326     int status;
327     int best_score;
328     int weighted;
329     int weighted_edges;
330     float best_weight;
331     float* edge_weights;
332     float* vertex_weights;
333     float* cur_weight;
334     float* cand_weight;
335 
336 } CvCliqueFinder;
337 
338 #define CLIQUE_TIME_OFF 2
339 #define CLIQUE_FOUND 1
340 #define CLIQUE_END   0
341 
342 /*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse,
343                                    int weighted CV_DEFAULT(0),  int weighted_edges CV_DEFAULT(0));
344 CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) );
345 CVAPI(void) cvEndFindCliques( CvCliqueFinder* finder );
346 
347 CVAPI(void) cvBronKerbosch( CvGraph* graph );*/
348 
349 
350 /*F///////////////////////////////////////////////////////////////////////////////////////
351 //
352 //    Name:    cvSubgraphWeight
353 //    Purpose: finds weight of subgraph in a graph
354 //    Context:
355 //    Parameters:
356 //      graph - input graph.
357 //      subgraph - sequence of pairwise different ints.  These are indices of vertices of subgraph.
358 //      weight_type - describes the way we measure weight.
359 //            one of the following:
360 //            CV_NOT_WEIGHTED - weight of a clique is simply its size
361 //            CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices
362 //            CV_WEIGHTED_EDGE - the same but edges
363 //            CV_WEIGHTED_ALL - the same but both edges and vertices
364 //      weight_vtx - optional vector of floats, with size = graph->total.
365 //            If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL
366 //            weights of vertices must be provided.  If weight_vtx not zero
367 //            these weights considered to be here, otherwise function assumes
368 //            that vertices of graph are inherited from CvGraphWeightedVtx.
369 //      weight_edge - optional matrix of floats, of width and height = graph->total.
370 //            If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
371 //            weights of edges ought to be supplied.  If weight_edge is not zero
372 //            function finds them here, otherwise function expects
373 //            edges of graph to be inherited from CvGraphWeightedEdge.
374 //            If this parameter is not zero structure of the graph is determined from matrix
375 //            rather than from CvGraphEdge's.  In particular, elements corresponding to
376 //            absent edges should be zero.
377 //    Returns:
378 //      weight of subgraph.
379 //    Notes:
380 //F*/
381 /*CVAPI(float) cvSubgraphWeight( CvGraph *graph, CvSeq *subgraph,
382                                   CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED),
383                                   CvVect32f weight_vtx CV_DEFAULT(0),
384                                   CvMatr32f weight_edge CV_DEFAULT(0) );*/
385 
386 
387 /*F///////////////////////////////////////////////////////////////////////////////////////
388 //
389 //    Name:    cvFindCliqueEx
390 //    Purpose: tries to find clique with maximum possible weight in a graph
391 //    Context:
392 //    Parameters:
393 //      graph - input graph.
394 //      storage - memory storage to be used by the result.
395 //      is_complementary - optional flag showing whether function should seek for clique
396 //            in complementary graph.
397 //      weight_type - describes our notion about weight.
398 //            one of the following:
399 //            CV_NOT_WEIGHTED - weight of a clique is simply its size
400 //            CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices
401 //            CV_WEIGHTED_EDGE - the same but edges
402 //            CV_WEIGHTED_ALL - the same but both edges and vertices
403 //      weight_vtx - optional vector of floats, with size = graph->total.
404 //            If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL
405 //            weights of vertices must be provided.  If weight_vtx not zero
406 //            these weights considered to be here, otherwise function assumes
407 //            that vertices of graph are inherited from CvGraphWeightedVtx.
408 //      weight_edge - optional matrix of floats, of width and height = graph->total.
409 //            If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
410 //            weights of edges ought to be supplied.  If weight_edge is not zero
411 //            function finds them here, otherwise function expects
412 //            edges of graph to be inherited from CvGraphWeightedEdge.
413 //            Note that in case of CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
414 //            nonzero is_complementary implies nonzero weight_edge.
415 //      start_clique - optional sequence of pairwise different ints.  They are indices of
416 //            vertices that shall be present in the output clique.
417 //      subgraph_of_ban - optional sequence of (maybe equal) ints.  They are indices of
418 //            vertices that shall not be present in the output clique.
419 //      clique_weight_ptr - optional output parameter.  Weight of found clique stored here.
420 //      num_generations - optional number of generations in evolutionary part of algorithm,
421 //            zero forces to return first found clique.
422 //      quality - optional parameter determining degree of required quality/speed tradeoff.
423 //            Must be in the range from 0 to 9.
424 //            0 is fast and dirty, 9 is slow but hopefully yields good clique.
425 //    Returns:
426 //      sequence of pairwise different ints.
427 //      These are indices of vertices that form found clique.
428 //    Notes:
429 //      in cases of CV_WEIGHTED_EDGE and CV_WEIGHTED_ALL weights should be nonnegative.
430 //      start_clique has a priority over subgraph_of_ban.
431 //F*/
432 /*CVAPI(CvSeq*) cvFindCliqueEx( CvGraph *graph, CvMemStorage *storage,
433                                  int is_complementary CV_DEFAULT(0),
434                                  CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED),
435                                  CvVect32f weight_vtx CV_DEFAULT(0),
436                                  CvMatr32f weight_edge CV_DEFAULT(0),
437                                  CvSeq *start_clique CV_DEFAULT(0),
438                                  CvSeq *subgraph_of_ban CV_DEFAULT(0),
439                                  float *clique_weight_ptr CV_DEFAULT(0),
440                                  int num_generations CV_DEFAULT(3),
441                                  int quality CV_DEFAULT(2) );*/
442 
443 
444 #define CV_UNDEF_SC_PARAM         12345 //default value of parameters
445 
446 #define CV_IDP_BIRCHFIELD_PARAM1  25
447 #define CV_IDP_BIRCHFIELD_PARAM2  5
448 #define CV_IDP_BIRCHFIELD_PARAM3  12
449 #define CV_IDP_BIRCHFIELD_PARAM4  15
450 #define CV_IDP_BIRCHFIELD_PARAM5  25
451 
452 
453 #define  CV_DISPARITY_BIRCHFIELD  0
454 
455 
456 /*F///////////////////////////////////////////////////////////////////////////
457 //
458 //    Name:    cvFindStereoCorrespondence
459 //    Purpose: find stereo correspondence on stereo-pair
460 //    Context:
461 //    Parameters:
462 //      leftImage - left image of stereo-pair (format 8uC1).
463 //      rightImage - right image of stereo-pair (format 8uC1).
464 //   mode - mode of correspondence retrieval (now CV_DISPARITY_BIRCHFIELD only)
465 //      dispImage - destination disparity image
466 //      maxDisparity - maximal disparity
467 //      param1, param2, param3, param4, param5 - parameters of algorithm
468 //    Returns:
469 //    Notes:
470 //      Images must be rectified.
471 //      All images must have format 8uC1.
472 //F*/
473 CVAPI(void)
474 cvFindStereoCorrespondence(
475                    const  CvArr* leftImage, const  CvArr* rightImage,
476                    int     mode,
477                    CvArr*  dispImage,
478                    int     maxDisparity,
479                    double  param1 CV_DEFAULT(CV_UNDEF_SC_PARAM),
480                    double  param2 CV_DEFAULT(CV_UNDEF_SC_PARAM),
481                    double  param3 CV_DEFAULT(CV_UNDEF_SC_PARAM),
482                    double  param4 CV_DEFAULT(CV_UNDEF_SC_PARAM),
483                    double  param5 CV_DEFAULT(CV_UNDEF_SC_PARAM) );
484 
485 /*****************************************************************************************/
486 /************ Epiline functions *******************/
487 
488 
489 
490 typedef struct CvStereoLineCoeff
491 {
492     double Xcoef;
493     double XcoefA;
494     double XcoefB;
495     double XcoefAB;
496 
497     double Ycoef;
498     double YcoefA;
499     double YcoefB;
500     double YcoefAB;
501 
502     double Zcoef;
503     double ZcoefA;
504     double ZcoefB;
505     double ZcoefAB;
506 }CvStereoLineCoeff;
507 
508 
509 typedef struct CvCamera
510 {
511     float   imgSize[2]; /* size of the camera view, used during calibration */
512     float   matrix[9]; /* intinsic camera parameters:  [ fx 0 cx; 0 fy cy; 0 0 1 ] */
513     float   distortion[4]; /* distortion coefficients - two coefficients for radial distortion
514                               and another two for tangential: [ k1 k2 p1 p2 ] */
515     float   rotMatr[9];
516     float   transVect[3]; /* rotation matrix and transition vector relatively
517                              to some reference point in the space. */
518 }
519 CvCamera;
520 
521 typedef struct CvStereoCamera
522 {
523     CvCamera* camera[2]; /* two individual camera parameters */
524     float fundMatr[9]; /* fundamental matrix */
525 
526     /* New part for stereo */
527     CvPoint3D32f epipole[2];
528     CvPoint2D32f quad[2][4]; /* coordinates of destination quadrangle after
529                                 epipolar geometry rectification */
530     double coeffs[2][3][3];/* coefficients for transformation */
531     CvPoint2D32f border[2][4];
532     CvSize warpSize;
533     CvStereoLineCoeff* lineCoeffs;
534     int needSwapCameras;/* flag set to 1 if need to swap cameras for good reconstruction */
535     float rotMatrix[9];
536     float transVector[3];
537 }
538 CvStereoCamera;
539 
540 
541 typedef struct CvContourOrientation
542 {
543     float egvals[2];
544     float egvects[4];
545 
546     float max, min; // minimum and maximum projections
547     int imax, imin;
548 } CvContourOrientation;
549 
550 #define CV_CAMERA_TO_WARP 1
551 #define CV_WARP_TO_CAMERA 2
552 
553 CVAPI(int) icvConvertWarpCoordinates(double coeffs[3][3],
554                                 CvPoint2D32f* cameraPoint,
555                                 CvPoint2D32f* warpPoint,
556                                 int direction);
557 
558 CVAPI(int) icvGetSymPoint3D(  CvPoint3D64f pointCorner,
559                             CvPoint3D64f point1,
560                             CvPoint3D64f point2,
561                             CvPoint3D64f *pointSym2);
562 
563 CVAPI(void) icvGetPieceLength3D(CvPoint3D64f point1,CvPoint3D64f point2,double* dist);
564 
565 CVAPI(int) icvCompute3DPoint(    double alpha,double betta,
566                             CvStereoLineCoeff* coeffs,
567                             CvPoint3D64f* point);
568 
569 CVAPI(int) icvCreateConvertMatrVect( CvMatr64d     rotMatr1,
570                                 CvMatr64d     transVect1,
571                                 CvMatr64d     rotMatr2,
572                                 CvMatr64d     transVect2,
573                                 CvMatr64d     convRotMatr,
574                                 CvMatr64d     convTransVect);
575 
576 CVAPI(int) icvConvertPointSystem(CvPoint3D64f  M2,
577                             CvPoint3D64f* M1,
578                             CvMatr64d     rotMatr,
579                             CvMatr64d     transVect
580                             );
581 
582 CVAPI(int) icvComputeCoeffForStereo(  CvStereoCamera* stereoCamera);
583 
584 CVAPI(int) icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross);
585 CVAPI(int) icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross);
586 CVAPI(float) icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point);
587 CVAPI(int) icvStereoCalibration( int numImages,
588                             int* nums,
589                             CvSize imageSize,
590                             CvPoint2D32f* imagePoints1,
591                             CvPoint2D32f* imagePoints2,
592                             CvPoint3D32f* objectPoints,
593                             CvStereoCamera* stereoparams
594                            );
595 
596 
597 CVAPI(int) icvComputeRestStereoParams(CvStereoCamera *stereoparams);
598 
599 CVAPI(void) cvComputePerspectiveMap( const double coeffs[3][3], CvArr* rectMapX, CvArr* rectMapY );
600 
601 CVAPI(int) icvComCoeffForLine(   CvPoint2D64f point1,
602                             CvPoint2D64f point2,
603                             CvPoint2D64f point3,
604                             CvPoint2D64f point4,
605                             CvMatr64d    camMatr1,
606                             CvMatr64d    rotMatr1,
607                             CvMatr64d    transVect1,
608                             CvMatr64d    camMatr2,
609                             CvMatr64d    rotMatr2,
610                             CvMatr64d    transVect2,
611                             CvStereoLineCoeff*    coeffs,
612                             int* needSwapCameras);
613 
614 CVAPI(int) icvGetDirectionForPoint(  CvPoint2D64f point,
615                                 CvMatr64d camMatr,
616                                 CvPoint3D64f* direct);
617 
618 CVAPI(int) icvGetCrossLines(CvPoint3D64f point11,CvPoint3D64f point12,
619                        CvPoint3D64f point21,CvPoint3D64f point22,
620                        CvPoint3D64f* midPoint);
621 
622 CVAPI(int) icvComputeStereoLineCoeffs(   CvPoint3D64f pointA,
623                                     CvPoint3D64f pointB,
624                                     CvPoint3D64f pointCam1,
625                                     double gamma,
626                                     CvStereoLineCoeff*    coeffs);
627 
628 /*CVAPI(int) icvComputeFundMatrEpipoles ( CvMatr64d camMatr1,
629                                     CvMatr64d     rotMatr1,
630                                     CvVect64d     transVect1,
631                                     CvMatr64d     camMatr2,
632                                     CvMatr64d     rotMatr2,
633                                     CvVect64d     transVect2,
634                                     CvPoint2D64f* epipole1,
635                                     CvPoint2D64f* epipole2,
636                                     CvMatr64d     fundMatr);*/
637 
638 CVAPI(int) icvGetAngleLine( CvPoint2D64f startPoint, CvSize imageSize,CvPoint2D64f *point1,CvPoint2D64f *point2);
639 
640 CVAPI(void) icvGetCoefForPiece(   CvPoint2D64f p_start,CvPoint2D64f p_end,
641                         double *a,double *b,double *c,
642                         int* result);
643 
644 /*CVAPI(void) icvGetCommonArea( CvSize imageSize,
645                     CvPoint2D64f epipole1,CvPoint2D64f epipole2,
646                     CvMatr64d fundMatr,
647                     CvVect64d coeff11,CvVect64d coeff12,
648                     CvVect64d coeff21,CvVect64d coeff22,
649                     int* result);*/
650 
651 CVAPI(void) icvComputeeInfiniteProject1(CvMatr64d    rotMatr,
652                                      CvMatr64d    camMatr1,
653                                      CvMatr64d    camMatr2,
654                                      CvPoint2D32f point1,
655                                      CvPoint2D32f *point2);
656 
657 CVAPI(void) icvComputeeInfiniteProject2(CvMatr64d    rotMatr,
658                                      CvMatr64d    camMatr1,
659                                      CvMatr64d    camMatr2,
660                                      CvPoint2D32f* point1,
661                                      CvPoint2D32f point2);
662 
663 CVAPI(void) icvGetCrossDirectDirect(  CvVect64d direct1,CvVect64d direct2,
664                             CvPoint2D64f *cross,int* result);
665 
666 CVAPI(void) icvGetCrossPieceDirect(   CvPoint2D64f p_start,CvPoint2D64f p_end,
667                             double a,double b,double c,
668                             CvPoint2D64f *cross,int* result);
669 
670 CVAPI(void) icvGetCrossPiecePiece( CvPoint2D64f p1_start,CvPoint2D64f p1_end,
671                             CvPoint2D64f p2_start,CvPoint2D64f p2_end,
672                             CvPoint2D64f* cross,
673                             int* result);
674 
675 CVAPI(void) icvGetPieceLength(CvPoint2D64f point1,CvPoint2D64f point2,double* dist);
676 
677 CVAPI(void) icvGetCrossRectDirect(    CvSize imageSize,
678                             double a,double b,double c,
679                             CvPoint2D64f *start,CvPoint2D64f *end,
680                             int* result);
681 
682 CVAPI(void) icvProjectPointToImage(   CvPoint3D64f point,
683                             CvMatr64d camMatr,CvMatr64d rotMatr,CvVect64d transVect,
684                             CvPoint2D64f* projPoint);
685 
686 CVAPI(void) icvGetQuadsTransform( CvSize        imageSize,
687                         CvMatr64d     camMatr1,
688                         CvMatr64d     rotMatr1,
689                         CvVect64d     transVect1,
690                         CvMatr64d     camMatr2,
691                         CvMatr64d     rotMatr2,
692                         CvVect64d     transVect2,
693                         CvSize*       warpSize,
694                         double quad1[4][2],
695                         double quad2[4][2],
696                         CvMatr64d     fundMatr,
697                         CvPoint3D64f* epipole1,
698                         CvPoint3D64f* epipole2
699                         );
700 
701 CVAPI(void) icvGetQuadsTransformStruct(  CvStereoCamera* stereoCamera);
702 
703 CVAPI(void) icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera);
704 
705 CVAPI(void) icvGetCutPiece(   CvVect64d areaLineCoef1,CvVect64d areaLineCoef2,
706                     CvPoint2D64f epipole,
707                     CvSize imageSize,
708                     CvPoint2D64f* point11,CvPoint2D64f* point12,
709                     CvPoint2D64f* point21,CvPoint2D64f* point22,
710                     int* result);
711 
712 CVAPI(void) icvGetMiddleAnglePoint(   CvPoint2D64f basePoint,
713                             CvPoint2D64f point1,CvPoint2D64f point2,
714                             CvPoint2D64f* midPoint);
715 
716 CVAPI(void) icvGetNormalDirect(CvVect64d direct,CvPoint2D64f point,CvVect64d normDirect);
717 
718 CVAPI(double) icvGetVect(CvPoint2D64f basePoint,CvPoint2D64f point1,CvPoint2D64f point2);
719 
720 CVAPI(void) icvProjectPointToDirect(  CvPoint2D64f point,CvVect64d lineCoeff,
721                             CvPoint2D64f* projectPoint);
722 
723 CVAPI(void) icvGetDistanceFromPointToDirect( CvPoint2D64f point,CvVect64d lineCoef,double*dist);
724 
725 CVAPI(IplImage*) icvCreateIsometricImage( IplImage* src, IplImage* dst,
726                               int desired_depth, int desired_num_channels );
727 
728 CVAPI(void) cvDeInterlace( const CvArr* frame, CvArr* fieldEven, CvArr* fieldOdd );
729 
730 /*CVAPI(int) icvSelectBestRt(           int           numImages,
731                                     int*          numPoints,
732                                     CvSize        imageSize,
733                                     CvPoint2D32f* imagePoints1,
734                                     CvPoint2D32f* imagePoints2,
735                                     CvPoint3D32f* objectPoints,
736 
737                                     CvMatr32f     cameraMatrix1,
738                                     CvVect32f     distortion1,
739                                     CvMatr32f     rotMatrs1,
740                                     CvVect32f     transVects1,
741 
742                                     CvMatr32f     cameraMatrix2,
743                                     CvVect32f     distortion2,
744                                     CvMatr32f     rotMatrs2,
745                                     CvVect32f     transVects2,
746 
747                                     CvMatr32f     bestRotMatr,
748                                     CvVect32f     bestTransVect
749                                     );*/
750 
751 /****************************************************************************************\
752 *                                   Contour Morphing                                     *
753 \****************************************************************************************/
754 
755 /* finds correspondence between two contours */
756 CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1,
757                                      const CvSeq* contour2,
758                                      CvMemStorage* storage);
759 
760 /* morphs contours using the pre-calculated correspondence:
761    alpha=0 ~ contour1, alpha=1 ~ contour2 */
762 CvSeq* cvMorphContours( const CvSeq* contour1, const CvSeq* contour2,
763                         CvSeq* corr, double alpha,
764                         CvMemStorage* storage );
765 
766 /****************************************************************************************\
767 *                                    Texture Descriptors                                 *
768 \****************************************************************************************/
769 
770 #define CV_GLCM_OPTIMIZATION_NONE                   -2
771 #define CV_GLCM_OPTIMIZATION_LUT                    -1
772 #define CV_GLCM_OPTIMIZATION_HISTOGRAM              0
773 
774 #define CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST    10
775 #define CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST    11
776 #define CV_GLCMDESC_OPTIMIZATION_HISTOGRAM          4
777 
778 #define CV_GLCMDESC_ENTROPY                         0
779 #define CV_GLCMDESC_ENERGY                          1
780 #define CV_GLCMDESC_HOMOGENITY                      2
781 #define CV_GLCMDESC_CONTRAST                        3
782 #define CV_GLCMDESC_CLUSTERTENDENCY                 4
783 #define CV_GLCMDESC_CLUSTERSHADE                    5
784 #define CV_GLCMDESC_CORRELATION                     6
785 #define CV_GLCMDESC_CORRELATIONINFO1                7
786 #define CV_GLCMDESC_CORRELATIONINFO2                8
787 #define CV_GLCMDESC_MAXIMUMPROBABILITY              9
788 
789 #define CV_GLCM_ALL                                 0
790 #define CV_GLCM_GLCM                                1
791 #define CV_GLCM_DESC                                2
792 
793 typedef struct CvGLCM CvGLCM;
794 
795 CVAPI(CvGLCM*) cvCreateGLCM( const IplImage* srcImage,
796                                 int stepMagnitude,
797                                 const int* stepDirections CV_DEFAULT(0),
798                                 int numStepDirections CV_DEFAULT(0),
799                                 int optimizationType CV_DEFAULT(CV_GLCM_OPTIMIZATION_NONE));
800 
801 CVAPI(void) cvReleaseGLCM( CvGLCM** GLCM, int flag CV_DEFAULT(CV_GLCM_ALL));
802 
803 CVAPI(void) cvCreateGLCMDescriptors( CvGLCM* destGLCM,
804                                         int descriptorOptimizationType
805                                         CV_DEFAULT(CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST));
806 
807 CVAPI(double) cvGetGLCMDescriptor( CvGLCM* GLCM, int step, int descriptor );
808 
809 CVAPI(void) cvGetGLCMDescriptorStatistics( CvGLCM* GLCM, int descriptor,
810                                               double* average, double* standardDeviation );
811 
812 CVAPI(IplImage*) cvCreateGLCMImage( CvGLCM* GLCM, int step );
813 
814 /****************************************************************************************\
815 *                                  Face eyes&mouth tracking                              *
816 \****************************************************************************************/
817 
818 
819 typedef struct CvFaceTracker CvFaceTracker;
820 
821 #define CV_NUM_FACE_ELEMENTS    3
822 enum CV_FACE_ELEMENTS
823 {
824     CV_FACE_MOUTH = 0,
825     CV_FACE_LEFT_EYE = 1,
826     CV_FACE_RIGHT_EYE = 2
827 };
828 
829 CVAPI(CvFaceTracker*) cvInitFaceTracker(CvFaceTracker* pFaceTracking, const IplImage* imgGray,
830                                                 CvRect* pRects, int nRects);
831 CVAPI(int) cvTrackFace( CvFaceTracker* pFaceTracker, IplImage* imgGray,
832                               CvRect* pRects, int nRects,
833                               CvPoint* ptRotate, double* dbAngleRotate);
834 CVAPI(void) cvReleaseFaceTracker(CvFaceTracker** ppFaceTracker);
835 
836 
837 typedef struct CvFace
838 {
839     CvRect MouthRect;
840     CvRect LeftEyeRect;
841     CvRect RightEyeRect;
842 } CvFaceData;
843 
844 CvSeq * cvFindFace(IplImage * Image,CvMemStorage* storage);
845 CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage);
846 
847 
848 /****************************************************************************************\
849 *                                         3D Tracker                                     *
850 \****************************************************************************************/
851 
852 typedef unsigned char CvBool;
853 
854 typedef struct
855 {
856     int id;
857     CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float
858 } Cv3dTracker2dTrackedObject;
859 
cv3dTracker2dTrackedObject(int id,CvPoint2D32f p)860 CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint2D32f p)
861 {
862     Cv3dTracker2dTrackedObject r;
863     r.id = id;
864     r.p = p;
865     return r;
866 }
867 
868 typedef struct
869 {
870     int id;
871     CvPoint3D32f p;             // location of the tracked object
872 } Cv3dTrackerTrackedObject;
873 
cv3dTrackerTrackedObject(int id,CvPoint3D32f p)874 CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f p)
875 {
876     Cv3dTrackerTrackedObject r;
877     r.id = id;
878     r.p = p;
879     return r;
880 }
881 
882 typedef struct
883 {
884     CvBool valid;
885     float mat[4][4];              /* maps camera coordinates to world coordinates */
886     CvPoint2D32f principal_point; /* copied from intrinsics so this structure */
887                                   /* has all the info we need */
888 } Cv3dTrackerCameraInfo;
889 
890 typedef struct
891 {
892     CvPoint2D32f principal_point;
893     float focal_length[2];
894     float distortion[4];
895 } Cv3dTrackerCameraIntrinsics;
896 
897 CVAPI(CvBool) cv3dTrackerCalibrateCameras(int num_cameras,
898                      const Cv3dTrackerCameraIntrinsics camera_intrinsics[], /* size is num_cameras */
899                      CvSize etalon_size,
900                      float square_size,
901                      IplImage *samples[],                                   /* size is num_cameras */
902                      Cv3dTrackerCameraInfo camera_info[]);                  /* size is num_cameras */
903 
904 CVAPI(int)  cv3dTrackerLocateObjects(int num_cameras, int num_objects,
905                    const Cv3dTrackerCameraInfo camera_info[],        /* size is num_cameras */
906                    const Cv3dTracker2dTrackedObject tracking_info[], /* size is num_objects*num_cameras */
907                    Cv3dTrackerTrackedObject tracked_objects[]);      /* size is num_objects */
908 /****************************************************************************************
909  tracking_info is a rectangular array; one row per camera, num_objects elements per row.
910  The id field of any unused slots must be -1. Ids need not be ordered or consecutive. On
911  completion, the return value is the number of objects located; i.e., the number of objects
912  visible by more than one camera. The id field of any unused slots in tracked objects is
913  set to -1.
914 ****************************************************************************************/
915 
916 
917 /****************************************************************************************\
918 *                           Skeletons and Linear-Contour Models                          *
919 \****************************************************************************************/
920 
921 typedef enum CvLeeParameters
922 {
923     CV_LEE_INT = 0,
924     CV_LEE_FLOAT = 1,
925     CV_LEE_DOUBLE = 2,
926     CV_LEE_AUTO = -1,
927     CV_LEE_ERODE = 0,
928     CV_LEE_ZOOM = 1,
929     CV_LEE_NON = 2
930 } CvLeeParameters;
931 
932 #define CV_NEXT_VORONOISITE2D( SITE ) ((SITE)->edge[0]->site[((SITE)->edge[0]->site[0] == (SITE))])
933 #define CV_PREV_VORONOISITE2D( SITE ) ((SITE)->edge[1]->site[((SITE)->edge[1]->site[0] == (SITE))])
934 #define CV_FIRST_VORONOIEDGE2D( SITE ) ((SITE)->edge[0])
935 #define CV_LAST_VORONOIEDGE2D( SITE ) ((SITE)->edge[1])
936 #define CV_NEXT_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[(EDGE)->site[0] != (SITE)])
937 #define CV_PREV_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[2 + ((EDGE)->site[0] != (SITE))])
938 #define CV_VORONOIEDGE2D_BEGINNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] != (SITE))])
939 #define CV_VORONOIEDGE2D_ENDNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] == (SITE))])
940 #define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))])
941 
942 #define CV_VORONOISITE2D_FIELDS()    \
943     struct CvVoronoiNode2D *node[2]; \
944     struct CvVoronoiEdge2D *edge[2];
945 
946 typedef struct CvVoronoiSite2D
947 {
948     CV_VORONOISITE2D_FIELDS()
949     struct CvVoronoiSite2D *next[2];
950 } CvVoronoiSite2D;
951 
952 #define CV_VORONOIEDGE2D_FIELDS()    \
953     struct CvVoronoiNode2D *node[2]; \
954     struct CvVoronoiSite2D *site[2]; \
955     struct CvVoronoiEdge2D *next[4];
956 
957 typedef struct CvVoronoiEdge2D
958 {
959     CV_VORONOIEDGE2D_FIELDS()
960 } CvVoronoiEdge2D;
961 
962 #define CV_VORONOINODE2D_FIELDS()       \
963     CV_SET_ELEM_FIELDS(CvVoronoiNode2D) \
964     CvPoint2D32f pt;                    \
965     float radius;
966 
967 typedef struct CvVoronoiNode2D
968 {
969     CV_VORONOINODE2D_FIELDS()
970 } CvVoronoiNode2D;
971 
972 #define CV_VORONOIDIAGRAM2D_FIELDS() \
973     CV_GRAPH_FIELDS()                \
974     CvSet *sites;
975 
976 typedef struct CvVoronoiDiagram2D
977 {
978     CV_VORONOIDIAGRAM2D_FIELDS()
979 } CvVoronoiDiagram2D;
980 
981 /* Computes Voronoi Diagram for given polygons with holes */
982 CVAPI(int)  cvVoronoiDiagramFromContour(CvSeq* ContourSeq,
983                                            CvVoronoiDiagram2D** VoronoiDiagram,
984                                            CvMemStorage* VoronoiStorage,
985                                            CvLeeParameters contour_type CV_DEFAULT(CV_LEE_INT),
986                                            int contour_orientation CV_DEFAULT(-1),
987                                            int attempt_number CV_DEFAULT(10));
988 
989 /* Computes Voronoi Diagram for domains in given image */
990 CVAPI(int)  cvVoronoiDiagramFromImage(IplImage* pImage,
991                                          CvSeq** ContourSeq,
992                                          CvVoronoiDiagram2D** VoronoiDiagram,
993                                          CvMemStorage* VoronoiStorage,
994                                          CvLeeParameters regularization_method CV_DEFAULT(CV_LEE_NON),
995                                          float approx_precision CV_DEFAULT(CV_LEE_AUTO));
996 
997 /* Deallocates the storage */
998 CVAPI(void) cvReleaseVoronoiStorage(CvVoronoiDiagram2D* VoronoiDiagram,
999                                           CvMemStorage** pVoronoiStorage);
1000 
1001 /*********************** Linear-Contour Model ****************************/
1002 
1003 struct CvLCMEdge;
1004 struct CvLCMNode;
1005 
1006 typedef struct CvLCMEdge
1007 {
1008     CV_GRAPH_EDGE_FIELDS()
1009     CvSeq* chain;
1010     float width;
1011     int index1;
1012     int index2;
1013 } CvLCMEdge;
1014 
1015 typedef struct CvLCMNode
1016 {
1017     CV_GRAPH_VERTEX_FIELDS()
1018     CvContour* contour;
1019 } CvLCMNode;
1020 
1021 
1022 /* Computes hybrid model from Voronoi Diagram */
1023 CVAPI(CvGraph*) cvLinearContorModelFromVoronoiDiagram(CvVoronoiDiagram2D* VoronoiDiagram,
1024                                                          float maxWidth);
1025 
1026 /* Releases hybrid model storage */
1027 CVAPI(int) cvReleaseLinearContorModelStorage(CvGraph** Graph);
1028 
1029 
1030 /* two stereo-related functions */
1031 
1032 CVAPI(void) cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f vertex[4], double matrix[3][3],
1033                                               CvArr* rectMap );
1034 
1035 /*CVAPI(void) cvInitStereoRectification( CvStereoCamera* params,
1036                                              CvArr* rectMap1, CvArr* rectMap2,
1037                                              int do_undistortion );*/
1038 
1039 /*************************** View Morphing Functions ************************/
1040 
1041 /* The order of the function corresponds to the order they should appear in
1042    the view morphing pipeline */
1043 
1044 /* Finds ending points of scanlines on left and right images of stereo-pair */
1045 CVAPI(void)  cvMakeScanlines( const CvMatrix3* matrix, CvSize  img_size,
1046                               int*  scanlines1, int*  scanlines2,
1047                               int*  lengths1, int*  lengths2,
1048                               int*  line_count );
1049 
1050 /* Grab pixel values from scanlines and stores them sequentially
1051    (some sort of perspective image transform) */
1052 CVAPI(void)  cvPreWarpImage( int       line_count,
1053                              IplImage* img,
1054                              uchar*    dst,
1055                              int*      dst_nums,
1056                              int*      scanlines);
1057 
1058 /* Approximate each grabbed scanline by a sequence of runs
1059    (lossy run-length compression) */
1060 CVAPI(void)  cvFindRuns( int    line_count,
1061                          uchar* prewarp1,
1062                          uchar* prewarp2,
1063                          int*   line_lengths1,
1064                          int*   line_lengths2,
1065                          int*   runs1,
1066                          int*   runs2,
1067                          int*   num_runs1,
1068                          int*   num_runs2);
1069 
1070 /* Compares two sets of compressed scanlines */
1071 CVAPI(void)  cvDynamicCorrespondMulti( int  line_count,
1072                                        int* first,
1073                                        int* first_runs,
1074                                        int* second,
1075                                        int* second_runs,
1076                                        int* first_corr,
1077                                        int* second_corr);
1078 
1079 /* Finds scanline ending coordinates for some intermediate "virtual" camera position */
1080 CVAPI(void)  cvMakeAlphaScanlines( int*  scanlines1,
1081                                    int*  scanlines2,
1082                                    int*  scanlinesA,
1083                                    int*  lengths,
1084                                    int   line_count,
1085                                    float alpha);
1086 
1087 /* Blends data of the left and right image scanlines to get
1088    pixel values of "virtual" image scanlines */
1089 CVAPI(void)  cvMorphEpilinesMulti( int    line_count,
1090                                    uchar* first_pix,
1091                                    int*   first_num,
1092                                    uchar* second_pix,
1093                                    int*   second_num,
1094                                    uchar* dst_pix,
1095                                    int*   dst_num,
1096                                    float  alpha,
1097                                    int*   first,
1098                                    int*   first_runs,
1099                                    int*   second,
1100                                    int*   second_runs,
1101                                    int*   first_corr,
1102                                    int*   second_corr);
1103 
1104 /* Does reverse warping of the morphing result to make
1105    it fill the destination image rectangle */
1106 CVAPI(void)  cvPostWarpImage( int       line_count,
1107                               uchar*    src,
1108                               int*      src_nums,
1109                               IplImage* img,
1110                               int*      scanlines);
1111 
1112 /* Deletes Moire (missed pixels that appear due to discretization) */
1113 CVAPI(void)  cvDeleteMoire( IplImage*  img );
1114 
1115 
1116 /****************************************************************************************\
1117 *                           Background/foreground segmentation                           *
1118 \****************************************************************************************/
1119 
1120 /* We discriminate between foreground and background pixels
1121  * by building and maintaining a model of the background.
1122  * Any pixel which does not fit this model is then deemed
1123  * to be foreground.
1124  *
1125  * At present we support two core background models,
1126  * one of which has two variations:
1127  *
1128  *  o CV_BG_MODEL_FGD: latest and greatest algorithm, described in
1129  *
1130  *	 Foreground Object Detection from Videos Containing Complex Background.
1131  *	 Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
1132  *	 ACM MM2003 9p
1133  *
1134  *  o CV_BG_MODEL_FGD_SIMPLE:
1135  *       A code comment describes this as a simplified version of the above,
1136  *       but the code is in fact currently identical
1137  *
1138  *  o CV_BG_MODEL_MOG: "Mixture of Gaussians", older algorithm, described in
1139  *
1140  *       Moving target classification and tracking from real-time video.
1141  *       A Lipton, H Fujijoshi, R Patil
1142  *       Proceedings IEEE Workshop on Application of Computer Vision pp 8-14 1998
1143  *
1144  *       Learning patterns of activity using real-time tracking
1145  *       C Stauffer and W Grimson  August 2000
1146  *       IEEE Transactions on Pattern Analysis and Machine Intelligence 22(8):747-757
1147  */
1148 
1149 
1150 #define CV_BG_MODEL_FGD		0
1151 #define CV_BG_MODEL_MOG		1			/* "Mixture of Gaussians".	*/
1152 #define CV_BG_MODEL_FGD_SIMPLE	2
1153 
1154 struct CvBGStatModel;
1155 
1156 typedef void (CV_CDECL * CvReleaseBGStatModel)( struct CvBGStatModel** bg_model );
1157 typedef int (CV_CDECL * CvUpdateBGStatModel)( IplImage* curr_frame, struct CvBGStatModel* bg_model );
1158 
1159 #define CV_BG_STAT_MODEL_FIELDS()                                                   \
1160     int             type; /*type of BG model*/                                      \
1161     CvReleaseBGStatModel release;                                                   \
1162     CvUpdateBGStatModel update;                                                     \
1163     IplImage*       background;   /*8UC3 reference background image*/               \
1164     IplImage*       foreground;   /*8UC1 foreground image*/                         \
1165     IplImage**      layers;       /*8UC3 reference background image, can be null */ \
1166     int             layer_count;  /* can be zero */                                 \
1167     CvMemStorage*   storage;      /*storage for �foreground_regions�*/              \
1168     CvSeq*          foreground_regions /*foreground object contours*/
1169 
1170 typedef struct CvBGStatModel
1171 {
1172     CV_BG_STAT_MODEL_FIELDS();
1173 }
1174 CvBGStatModel;
1175 
1176 //
1177 
1178 // Releases memory used by BGStatModel
cvReleaseBGStatModel(CvBGStatModel ** bg_model)1179 CV_INLINE void cvReleaseBGStatModel( CvBGStatModel** bg_model )
1180 {
1181     if( bg_model && *bg_model && (*bg_model)->release )
1182         (*bg_model)->release( bg_model );
1183 }
1184 
1185 // Updates statistical model and returns number of found foreground regions
cvUpdateBGStatModel(IplImage * current_frame,CvBGStatModel * bg_model)1186 CV_INLINE int cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel*  bg_model )
1187 {
1188     return bg_model && bg_model->update ? bg_model->update( current_frame, bg_model ) : 0;
1189 }
1190 
1191 // Performs FG post-processing using segmentation
1192 // (all pixels of a region will be classified as foreground if majority of pixels of the region are FG).
1193 // parameters:
1194 //      segments - pointer to result of segmentation (for example MeanShiftSegmentation)
1195 //      bg_model - pointer to CvBGStatModel structure
1196 CVAPI(void) cvRefineForegroundMaskBySegm( CvSeq* segments, CvBGStatModel*  bg_model );
1197 
1198 /* Common use change detection function */
1199 CVAPI(int)  cvChangeDetection( IplImage*  prev_frame,
1200                                IplImage*  curr_frame,
1201                                IplImage*  change_mask );
1202 
1203 /*
1204   Interface of ACM MM2003 algorithm
1205 */
1206 
1207 /* Default parameters of foreground detection algorithm: */
1208 #define  CV_BGFG_FGD_LC              128
1209 #define  CV_BGFG_FGD_N1C             15
1210 #define  CV_BGFG_FGD_N2C             25
1211 
1212 #define  CV_BGFG_FGD_LCC             64
1213 #define  CV_BGFG_FGD_N1CC            25
1214 #define  CV_BGFG_FGD_N2CC            40
1215 
1216 /* Background reference image update parameter: */
1217 #define  CV_BGFG_FGD_ALPHA_1         0.1f
1218 
1219 /* stat model update parameter
1220  * 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG)
1221  */
1222 #define  CV_BGFG_FGD_ALPHA_2         0.005f
1223 
1224 /* start value for alpha parameter (to fast initiate statistic model) */
1225 #define  CV_BGFG_FGD_ALPHA_3         0.1f
1226 
1227 #define  CV_BGFG_FGD_DELTA           2
1228 
1229 #define  CV_BGFG_FGD_T               0.9f
1230 
1231 #define  CV_BGFG_FGD_MINAREA         15.f
1232 
1233 #define  CV_BGFG_FGD_BG_UPDATE_TRESH 0.5f
1234 
1235 /* See the above-referenced Li/Huang/Gu/Tian paper
1236  * for a full description of these background-model
1237  * tuning parameters.
1238  *
1239  * Nomenclature:  'c'  == "color", a three-component red/green/blue vector.
1240  *                         We use histograms of these to model the range of
1241  *                         colors we've seen at a given background pixel.
1242  *
1243  *                'cc' == "color co-occurrence", a six-component vector giving
1244  *                         RGB color for both this frame and preceding frame.
1245  *                             We use histograms of these to model the range of
1246  *                         color CHANGES we've seen at a given background pixel.
1247  */
1248 typedef struct CvFGDStatModelParams
1249 {
1250     int    Lc;			/* Quantized levels per 'color' component. Power of two, typically 32, 64 or 128.				*/
1251     int    N1c;			/* Number of color vectors used to model normal background color variation at a given pixel.			*/
1252     int    N2c;			/* Number of color vectors retained at given pixel.  Must be > N1c, typically ~ 5/3 of N1c.			*/
1253 				/* Used to allow the first N1c vectors to adapt over time to changing background.				*/
1254 
1255     int    Lcc;			/* Quantized levels per 'color co-occurrence' component.  Power of two, typically 16, 32 or 64.			*/
1256     int    N1cc;		/* Number of color co-occurrence vectors used to model normal background color variation at a given pixel.	*/
1257     int    N2cc;		/* Number of color co-occurrence vectors retained at given pixel.  Must be > N1cc, typically ~ 5/3 of N1cc.	*/
1258 				/* Used to allow the first N1cc vectors to adapt over time to changing background.				*/
1259 
1260     int    is_obj_without_holes;/* If TRUE we ignore holes within foreground blobs. Defaults to TRUE.						*/
1261     int    perform_morphing;	/* Number of erode-dilate-erode foreground-blob cleanup iterations.						*/
1262 				/* These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1.			*/
1263 
1264     float  alpha1;		/* How quickly we forget old background pixel values seen.  Typically set to 0.1  				*/
1265     float  alpha2;		/* "Controls speed of feature learning". Depends on T. Typical value circa 0.005. 				*/
1266     float  alpha3;		/* Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1.				*/
1267 
1268     float  delta;		/* Affects color and color co-occurrence quantization, typically set to 2.					*/
1269     float  T;			/* "A percentage value which determines when new features can be recognized as new background." (Typically 0.9).*/
1270     float  minArea;		/* Discard foreground blobs whose bounding box is smaller than this threshold.					*/
1271 }
1272 CvFGDStatModelParams;
1273 
1274 typedef struct CvBGPixelCStatTable
1275 {
1276     float          Pv, Pvb;
1277     uchar          v[3];
1278 }
1279 CvBGPixelCStatTable;
1280 
1281 typedef struct CvBGPixelCCStatTable
1282 {
1283     float          Pv, Pvb;
1284     uchar          v[6];
1285 }
1286 CvBGPixelCCStatTable;
1287 
1288 typedef struct CvBGPixelStat
1289 {
1290     float                 Pbc;
1291     float                 Pbcc;
1292     CvBGPixelCStatTable*  ctable;
1293     CvBGPixelCCStatTable* cctable;
1294     uchar                 is_trained_st_model;
1295     uchar                 is_trained_dyn_model;
1296 }
1297 CvBGPixelStat;
1298 
1299 
1300 typedef struct CvFGDStatModel
1301 {
1302     CV_BG_STAT_MODEL_FIELDS();
1303     CvBGPixelStat*         pixel_stat;
1304     IplImage*              Ftd;
1305     IplImage*              Fbd;
1306     IplImage*              prev_frame;
1307     CvFGDStatModelParams   params;
1308 }
1309 CvFGDStatModel;
1310 
1311 /* Creates FGD model */
1312 CVAPI(CvBGStatModel*) cvCreateFGDStatModel( IplImage* first_frame,
1313                     CvFGDStatModelParams* parameters CV_DEFAULT(NULL));
1314 
1315 /*
1316    Interface of Gaussian mixture algorithm
1317 
1318    "An improved adaptive background mixture model for real-time tracking with shadow detection"
1319    P. KadewTraKuPong and R. Bowden,
1320    Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
1321    http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
1322 */
1323 
1324 /* Note:  "MOG" == "Mixture Of Gaussians": */
1325 
1326 #define CV_BGFG_MOG_MAX_NGAUSSIANS 500
1327 
1328 /* default parameters of gaussian background detection algorithm */
1329 #define CV_BGFG_MOG_BACKGROUND_THRESHOLD     0.7     /* threshold sum of weights for background test */
1330 #define CV_BGFG_MOG_STD_THRESHOLD            2.5     /* lambda=2.5 is 99% */
1331 #define CV_BGFG_MOG_WINDOW_SIZE              200     /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */
1332 #define CV_BGFG_MOG_NGAUSSIANS               5       /* = K = number of Gaussians in mixture */
1333 #define CV_BGFG_MOG_WEIGHT_INIT              0.05
1334 #define CV_BGFG_MOG_SIGMA_INIT               30
1335 #define CV_BGFG_MOG_MINAREA                  15.f
1336 
1337 
1338 #define CV_BGFG_MOG_NCOLORS                  3
1339 
1340 typedef struct CvGaussBGStatModelParams
1341 {
1342     int     win_size;               /* = 1/alpha */
1343     int     n_gauss;
1344     double  bg_threshold, std_threshold, minArea;
1345     double  weight_init, variance_init;
1346 }CvGaussBGStatModelParams;
1347 
1348 typedef struct CvGaussBGValues
1349 {
1350     int         match_sum;
1351     double      weight;
1352     double      variance[CV_BGFG_MOG_NCOLORS];
1353     double      mean[CV_BGFG_MOG_NCOLORS];
1354 }
1355 CvGaussBGValues;
1356 
1357 typedef struct CvGaussBGPoint
1358 {
1359     CvGaussBGValues* g_values;
1360 }
1361 CvGaussBGPoint;
1362 
1363 
1364 typedef struct CvGaussBGModel
1365 {
1366     CV_BG_STAT_MODEL_FIELDS();
1367     CvGaussBGStatModelParams   params;
1368     CvGaussBGPoint*            g_point;
1369     int                        countFrames;
1370 }
1371 CvGaussBGModel;
1372 
1373 
1374 /* Creates Gaussian mixture background model */
1375 CVAPI(CvBGStatModel*) cvCreateGaussianBGModel( IplImage* first_frame,
1376                 CvGaussBGStatModelParams* parameters CV_DEFAULT(NULL));
1377 
1378 
1379 typedef struct CvBGCodeBookElem
1380 {
1381     struct CvBGCodeBookElem* next;
1382     int tLastUpdate;
1383     int stale;
1384     uchar boxMin[3];
1385     uchar boxMax[3];
1386     uchar learnMin[3];
1387     uchar learnMax[3];
1388 }
1389 CvBGCodeBookElem;
1390 
1391 typedef struct CvBGCodeBookModel
1392 {
1393     CvSize size;
1394     int t;
1395     uchar cbBounds[3];
1396     uchar modMin[3];
1397     uchar modMax[3];
1398     CvBGCodeBookElem** cbmap;
1399     CvMemStorage* storage;
1400     CvBGCodeBookElem* freeList;
1401 }
1402 CvBGCodeBookModel;
1403 
1404 CVAPI(CvBGCodeBookModel*) cvCreateBGCodeBookModel();
1405 CVAPI(void) cvReleaseBGCodeBookModel( CvBGCodeBookModel** model );
1406 
1407 CVAPI(void) cvBGCodeBookUpdate( CvBGCodeBookModel* model, const CvArr* image,
1408                                 CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
1409                                 const CvArr* mask CV_DEFAULT(0) );
1410 
1411 CVAPI(int) cvBGCodeBookDiff( const CvBGCodeBookModel* model, const CvArr* image,
1412                              CvArr* fgmask, CvRect roi CV_DEFAULT(cvRect(0,0,0,0)) );
1413 
1414 CVAPI(void) cvBGCodeBookClearStale( CvBGCodeBookModel* model, int staleThresh,
1415                                     CvRect roi CV_DEFAULT(cvRect(0,0,0,0)),
1416                                     const CvArr* mask CV_DEFAULT(0) );
1417 
1418 CVAPI(CvSeq*) cvSegmentFGMask( CvArr *fgmask, int poly1Hull0 CV_DEFAULT(1),
1419                                float perimScale CV_DEFAULT(4.f),
1420                                CvMemStorage* storage CV_DEFAULT(0),
1421                                CvPoint offset CV_DEFAULT(cvPoint(0,0)));
1422 
1423 #ifdef __cplusplus
1424 }
1425 #endif
1426 
1427 #ifdef __cplusplus
1428 
1429 /****************************************************************************************\
1430 *                                   Calibration engine                                   *
1431 \****************************************************************************************/
1432 
1433 typedef enum CvCalibEtalonType
1434 {
1435     CV_CALIB_ETALON_USER = -1,
1436     CV_CALIB_ETALON_CHESSBOARD = 0,
1437     CV_CALIB_ETALON_CHECKERBOARD = CV_CALIB_ETALON_CHESSBOARD
1438 }
1439 CvCalibEtalonType;
1440 
1441 class CV_EXPORTS CvCalibFilter
1442 {
1443 public:
1444     /* Constructor & destructor */
1445     CvCalibFilter();
1446     virtual ~CvCalibFilter();
1447 
1448     /* Sets etalon type - one for all cameras.
1449        etalonParams is used in case of pre-defined etalons (such as chessboard).
1450        Number of elements in etalonParams is determined by etalonType.
1451        E.g., if etalon type is CV_ETALON_TYPE_CHESSBOARD then:
1452          etalonParams[0] is number of squares per one side of etalon
1453          etalonParams[1] is number of squares per another side of etalon
1454          etalonParams[2] is linear size of squares in the board in arbitrary units.
1455        pointCount & points are used in case of
1456        CV_CALIB_ETALON_USER (user-defined) etalon. */
1457     virtual bool
1458         SetEtalon( CvCalibEtalonType etalonType, double* etalonParams,
1459                    int pointCount = 0, CvPoint2D32f* points = 0 );
1460 
1461     /* Retrieves etalon parameters/or and points */
1462     virtual CvCalibEtalonType
1463         GetEtalon( int* paramCount = 0, const double** etalonParams = 0,
1464                    int* pointCount = 0, const CvPoint2D32f** etalonPoints = 0 ) const;
1465 
1466     /* Sets number of cameras calibrated simultaneously. It is equal to 1 initially */
1467     virtual void SetCameraCount( int cameraCount );
1468 
1469     /* Retrieves number of cameras */
GetCameraCount()1470     int GetCameraCount() const { return cameraCount; }
1471 
1472     /* Starts cameras calibration */
1473     virtual bool SetFrames( int totalFrames );
1474 
1475     /* Stops cameras calibration */
1476     virtual void Stop( bool calibrate = false );
1477 
1478     /* Retrieves number of cameras */
IsCalibrated()1479     bool IsCalibrated() const { return isCalibrated; }
1480 
1481     /* Feeds another serie of snapshots (one per each camera) to filter.
1482        Etalon points on these images are found automatically.
1483        If the function can't locate points, it returns false */
1484     virtual bool FindEtalon( IplImage** imgs );
1485 
1486     /* The same but takes matrices */
1487     virtual bool FindEtalon( CvMat** imgs );
1488 
1489     /* Lower-level function for feeding filter with already found etalon points.
1490        Array of point arrays for each camera is passed. */
1491     virtual bool Push( const CvPoint2D32f** points = 0 );
1492 
1493     /* Returns total number of accepted frames and, optionally,
1494        total number of frames to collect */
1495     virtual int GetFrameCount( int* framesTotal = 0 ) const;
1496 
1497     /* Retrieves camera parameters for specified camera.
1498        If camera is not calibrated the function returns 0 */
1499     virtual const CvCamera* GetCameraParams( int idx = 0 ) const;
1500 
1501     virtual const CvStereoCamera* GetStereoParams() const;
1502 
1503     /* Sets camera parameters for all cameras */
1504     virtual bool SetCameraParams( CvCamera* params );
1505 
1506     /* Saves all camera parameters to file */
1507     virtual bool SaveCameraParams( const char* filename );
1508 
1509     /* Loads all camera parameters from file */
1510     virtual bool LoadCameraParams( const char* filename );
1511 
1512     /* Undistorts images using camera parameters. Some of src pointers can be NULL. */
1513     virtual bool Undistort( IplImage** src, IplImage** dst );
1514 
1515     /* Undistorts images using camera parameters. Some of src pointers can be NULL. */
1516     virtual bool Undistort( CvMat** src, CvMat** dst );
1517 
1518     /* Returns array of etalon points detected/partally detected
1519        on the latest frame for idx-th camera */
1520     virtual bool GetLatestPoints( int idx, CvPoint2D32f** pts,
1521                                                   int* count, bool* found );
1522 
1523     /* Draw the latest detected/partially detected etalon */
1524     virtual void DrawPoints( IplImage** dst );
1525 
1526     /* Draw the latest detected/partially detected etalon */
1527     virtual void DrawPoints( CvMat** dst );
1528 
1529     virtual bool Rectify( IplImage** srcarr, IplImage** dstarr );
1530     virtual bool Rectify( CvMat** srcarr, CvMat** dstarr );
1531 
1532 protected:
1533 
1534     enum { MAX_CAMERAS = 3 };
1535 
1536     /* etalon data */
1537     CvCalibEtalonType  etalonType;
1538     int     etalonParamCount;
1539     double* etalonParams;
1540     int     etalonPointCount;
1541     CvPoint2D32f* etalonPoints;
1542     CvSize  imgSize;
1543     CvMat*  grayImg;
1544     CvMat*  tempImg;
1545     CvMemStorage* storage;
1546 
1547     /* camera data */
1548     int     cameraCount;
1549     CvCamera cameraParams[MAX_CAMERAS];
1550     CvStereoCamera stereo;
1551     CvPoint2D32f* points[MAX_CAMERAS];
1552     CvMat*  undistMap[MAX_CAMERAS][2];
1553     CvMat*  undistImg;
1554     int     latestCounts[MAX_CAMERAS];
1555     CvPoint2D32f* latestPoints[MAX_CAMERAS];
1556     CvMat*  rectMap[MAX_CAMERAS][2];
1557 
1558     /* Added by Valery */
1559     //CvStereoCamera stereoParams;
1560 
1561     int     maxPoints;
1562     int     framesTotal;
1563     int     framesAccepted;
1564     bool    isCalibrated;
1565 };
1566 
1567 #include "cvaux.hpp"
1568 #include "cvvidsurv.hpp"
1569 /*#include "cvmat.hpp"*/
1570 #endif
1571 
1572 #endif
1573 
1574 /* End of file. */
1575