1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
51 CVAPI(CvSeq*) cvSegmentImage( const CvArr* srcarr, CvArr* dstarr,
52 double canny_threshold,
53 double ffill_threshold,
54 CvMemStorage* storage );
56 /****************************************************************************************\
58 \****************************************************************************************/
60 typedef int (CV_CDECL * CvCallback)(int index, void* buffer, void* user_data);
68 #define CV_EIGOBJ_NO_CALLBACK 0
69 #define CV_EIGOBJ_INPUT_CALLBACK 1
70 #define CV_EIGOBJ_OUTPUT_CALLBACK 2
71 #define CV_EIGOBJ_BOTH_CALLBACK 3
73 /* Calculates covariation matrix of a set of arrays */
74 CVAPI(void) cvCalcCovarMatrixEx( int nObjects, void* input, int ioFlags,
75 int ioBufSize, uchar* buffer, void* userData,
76 IplImage* avg, float* covarMatrix );
78 /* Calculates eigen values and vectors of covariation matrix of a set of
80 CVAPI(void) cvCalcEigenObjects( int nObjects, void* input, void* output,
81 int ioFlags, int ioBufSize, void* userData,
82 CvTermCriteria* calcLimit, IplImage* avg,
85 /* Calculates dot product (obj - avg) * eigObj (i.e. projects image to eigen vector) */
86 CVAPI(double) cvCalcDecompCoeff( IplImage* obj, IplImage* eigObj, IplImage* avg );
88 /* Projects image to eigen space (finds all decomposion coefficients */
89 CVAPI(void) cvEigenDecomposite( IplImage* obj, int nEigObjs, void* eigInput,
90 int ioFlags, void* userData, IplImage* avg,
93 /* Projects original objects used to calculate eigen space basis to that space */
94 CVAPI(void) cvEigenProjection( void* eigInput, int nEigObjs, int ioFlags,
95 void* userData, float* coeffs, IplImage* avg,
98 /****************************************************************************************\
100 \****************************************************************************************/
102 typedef struct CvImgObsInfo
107 float* obs;//consequtive observations
109 int* state;/* arr of pairs superstate/state to which observation belong */
110 int* mix; /* number of mixture to which observation belong */
113 CvImgObsInfo;/*struct for 1 image*/
115 typedef CvImgObsInfo Cv1DObsInfo;
117 typedef struct CvEHMMState
119 int num_mix; /*number of mixtures in this state*/
120 float* mu; /*mean vectors corresponding to each mixture*/
121 float* inv_var; /* square root of inversed variances corresp. to each mixture*/
122 float* log_var_val; /* sum of 0.5 (LN2PI + ln(variance[i]) ) for i=1,n */
123 float* weight; /*array of mixture weights. Summ of all weights in state is 1. */
128 typedef struct CvEHMM
130 int level; /* 0 - lowest(i.e its states are real states), ..... */
131 int num_states; /* number of HMM states */
132 float* transP;/*transition probab. matrices for states */
133 float** obsProb; /* if level == 0 - array of brob matrices corresponding to hmm
134 if level == 1 - martix of matrices */
137 CvEHMMState* state; /* if level == 0 points to real states array,
138 if not - points to embedded hmms */
139 struct CvEHMM* ehmm; /* pointer to an embedded model or NULL, if it is a leaf */
145 /*CVAPI(int) icvCreate1DHMM( CvEHMM** this_hmm,
146 int state_number, int* num_mix, int obs_size );
148 CVAPI(int) icvRelease1DHMM( CvEHMM** phmm );
150 CVAPI(int) icvUniform1DSegm( Cv1DObsInfo* obs_info, CvEHMM* hmm );
152 CVAPI(int) icvInit1DMixSegm( Cv1DObsInfo** obs_info_array, int num_img, CvEHMM* hmm);
154 CVAPI(int) icvEstimate1DHMMStateParams( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm);
156 CVAPI(int) icvEstimate1DObsProb( CvImgObsInfo* obs_info, CvEHMM* hmm );
158 CVAPI(int) icvEstimate1DTransProb( Cv1DObsInfo** obs_info_array,
162 CVAPI(float) icvViterbi( Cv1DObsInfo* obs_info, CvEHMM* hmm);
164 CVAPI(int) icv1DMixSegmL2( CvImgObsInfo** obs_info_array, int num_img, CvEHMM* hmm );*/
166 /*********************************** Embedded HMMs *************************************/
169 CVAPI(CvEHMM*) cvCreate2DHMM( int* stateNumber, int* numMix, int obsSize );
172 CVAPI(void) cvRelease2DHMM( CvEHMM** hmm );
174 #define CV_COUNT_OBS(roi, win, delta, numObs ) \
176 (numObs)->width =((roi)->width -(win)->width +(delta)->width)/(delta)->width; \
177 (numObs)->height =((roi)->height -(win)->height +(delta)->height)/(delta)->height;\
180 /* Creates storage for observation vectors */
181 CVAPI(CvImgObsInfo*) cvCreateObsInfo( CvSize numObs, int obsSize );
183 /* Releases storage for observation vectors */
184 CVAPI(void) cvReleaseObsInfo( CvImgObsInfo** obs_info );
187 /* The function takes an image on input and and returns the sequnce of observations
188 to be used with an embedded HMM; Each observation is top-left block of DCT
189 coefficient matrix */
190 CVAPI(void) cvImgToObs_DCT( const CvArr* arr, float* obs, CvSize dctSize,
191 CvSize obsSize, CvSize delta );
194 /* Uniformly segments all observation vectors extracted from image */
195 CVAPI(void) cvUniformImgSegm( CvImgObsInfo* obs_info, CvEHMM* ehmm );
197 /* Does mixture segmentation of the states of embedded HMM */
198 CVAPI(void) cvInitMixSegm( CvImgObsInfo** obs_info_array,
199 int num_img, CvEHMM* hmm );
201 /* Function calculates means, variances, weights of every Gaussian mixture
202 of every low-level state of embedded HMM */
203 CVAPI(void) cvEstimateHMMStateParams( CvImgObsInfo** obs_info_array,
204 int num_img, CvEHMM* hmm );
206 /* Function computes transition probability matrices of embedded HMM
207 given observations segmentation */
208 CVAPI(void) cvEstimateTransProb( CvImgObsInfo** obs_info_array,
209 int num_img, CvEHMM* hmm );
211 /* Function computes probabilities of appearing observations at any state
212 (i.e. computes P(obs|state) for every pair(obs,state)) */
213 CVAPI(void) cvEstimateObsProb( CvImgObsInfo* obs_info,
216 /* Runs Viterbi algorithm for embedded HMM */
217 CVAPI(float) cvEViterbi( CvImgObsInfo* obs_info, CvEHMM* hmm );
220 /* Function clusters observation vectors from several images
221 given observations segmentation.
222 Euclidean distance used for clustering vectors.
223 Centers of clusters are given means of every mixture */
224 CVAPI(void) cvMixSegmL2( CvImgObsInfo** obs_info_array,
225 int num_img, CvEHMM* hmm );
227 /****************************************************************************************\
228 * A few functions from old stereo gesture recognition demosions *
229 \****************************************************************************************/
231 /* Creates hand mask image given several points on the hand */
232 CVAPI(void) cvCreateHandMask( CvSeq* hand_points,
233 IplImage *img_mask, CvRect *roi);
235 /* Finds hand region in range image data */
236 CVAPI(void) cvFindHandRegion (CvPoint3D32f* points, int count,
238 float* line, CvSize2D32f size, int flag,
239 CvPoint3D32f* center,
240 CvMemStorage* storage, CvSeq **numbers);
242 /* Finds hand region in range image data (advanced version) */
243 CVAPI(void) cvFindHandRegionA( CvPoint3D32f* points, int count,
245 float* line, CvSize2D32f size, int jc,
246 CvPoint3D32f* center,
247 CvMemStorage* storage, CvSeq **numbers);
249 /****************************************************************************************\
250 * Additional operations on Subdivisions *
251 \****************************************************************************************/
253 // paints voronoi diagram: just demo function
254 CVAPI(void) icvDrawMosaic( CvSubdiv2D* subdiv, IplImage* src, IplImage* dst );
256 // checks planar subdivision for correctness. It is not an absolute check,
257 // but it verifies some relations between quad-edges
258 CVAPI(int) icvSubdiv2DCheck( CvSubdiv2D* subdiv );
260 // returns squared distance between two 2D points with floating-point coordinates.
261 CV_INLINE double icvSqDist2D32f( CvPoint2D32f pt1, CvPoint2D32f pt2 )
263 double dx = pt1.x - pt2.x;
264 double dy = pt1.y - pt2.y;
266 return dx*dx + dy*dy;
270 /****************************************************************************************\
271 * More operations on sequences *
272 \****************************************************************************************/
274 /*****************************************************************************************/
276 #define CV_CURRENT_INT( reader ) (*((int *)(reader).ptr))
277 #define CV_PREV_INT( reader ) (*((int *)(reader).prev_elem))
279 #define CV_GRAPH_WEIGHTED_VERTEX_FIELDS() CV_GRAPH_VERTEX_FIELDS()\
282 #define CV_GRAPH_WEIGHTED_EDGE_FIELDS() CV_GRAPH_EDGE_FIELDS()
284 typedef struct CvGraphWeightedVtx
286 CV_GRAPH_WEIGHTED_VERTEX_FIELDS()
290 typedef struct CvGraphWeightedEdge
292 CV_GRAPH_WEIGHTED_EDGE_FIELDS()
296 typedef enum CvGraphWeightType
305 /*****************************************************************************************/
308 /*******************************Stereo correspondence*************************************/
310 typedef struct CvCliqueFinder
316 // stacks, counters etc/
323 int* fixp; //node with minimal disconnections
325 int* s; //for selected candidate
332 float* vertex_weights;
338 #define CLIQUE_TIME_OFF 2
339 #define CLIQUE_FOUND 1
342 /*CVAPI(void) cvStartFindCliques( CvGraph* graph, CvCliqueFinder* finder, int reverse,
343 int weighted CV_DEFAULT(0), int weighted_edges CV_DEFAULT(0));
344 CVAPI(int) cvFindNextMaximalClique( CvCliqueFinder* finder, int* clock_rest CV_DEFAULT(0) );
345 CVAPI(void) cvEndFindCliques( CvCliqueFinder* finder );
347 CVAPI(void) cvBronKerbosch( CvGraph* graph );*/
350 /*F///////////////////////////////////////////////////////////////////////////////////////
352 // Name: cvSubgraphWeight
353 // Purpose: finds weight of subgraph in a graph
356 // graph - input graph.
357 // subgraph - sequence of pairwise different ints. These are indices of vertices of subgraph.
358 // weight_type - describes the way we measure weight.
359 // one of the following:
360 // CV_NOT_WEIGHTED - weight of a clique is simply its size
361 // CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices
362 // CV_WEIGHTED_EDGE - the same but edges
363 // CV_WEIGHTED_ALL - the same but both edges and vertices
364 // weight_vtx - optional vector of floats, with size = graph->total.
365 // If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL
366 // weights of vertices must be provided. If weight_vtx not zero
367 // these weights considered to be here, otherwise function assumes
368 // that vertices of graph are inherited from CvGraphWeightedVtx.
369 // weight_edge - optional matrix of floats, of width and height = graph->total.
370 // If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
371 // weights of edges ought to be supplied. If weight_edge is not zero
372 // function finds them here, otherwise function expects
373 // edges of graph to be inherited from CvGraphWeightedEdge.
374 // If this parameter is not zero structure of the graph is determined from matrix
375 // rather than from CvGraphEdge's. In particular, elements corresponding to
376 // absent edges should be zero.
378 // weight of subgraph.
381 /*CVAPI(float) cvSubgraphWeight( CvGraph *graph, CvSeq *subgraph,
382 CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED),
383 CvVect32f weight_vtx CV_DEFAULT(0),
384 CvMatr32f weight_edge CV_DEFAULT(0) );*/
387 /*F///////////////////////////////////////////////////////////////////////////////////////
389 // Name: cvFindCliqueEx
390 // Purpose: tries to find clique with maximum possible weight in a graph
393 // graph - input graph.
394 // storage - memory storage to be used by the result.
395 // is_complementary - optional flag showing whether function should seek for clique
396 // in complementary graph.
397 // weight_type - describes our notion about weight.
398 // one of the following:
399 // CV_NOT_WEIGHTED - weight of a clique is simply its size
400 // CV_WEIGHTED_VTX - weight of a clique is the sum of weights of its vertices
401 // CV_WEIGHTED_EDGE - the same but edges
402 // CV_WEIGHTED_ALL - the same but both edges and vertices
403 // weight_vtx - optional vector of floats, with size = graph->total.
404 // If weight_type is either CV_WEIGHTED_VTX or CV_WEIGHTED_ALL
405 // weights of vertices must be provided. If weight_vtx not zero
406 // these weights considered to be here, otherwise function assumes
407 // that vertices of graph are inherited from CvGraphWeightedVtx.
408 // weight_edge - optional matrix of floats, of width and height = graph->total.
409 // If weight_type is either CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
410 // weights of edges ought to be supplied. If weight_edge is not zero
411 // function finds them here, otherwise function expects
412 // edges of graph to be inherited from CvGraphWeightedEdge.
413 // Note that in case of CV_WEIGHTED_EDGE or CV_WEIGHTED_ALL
414 // nonzero is_complementary implies nonzero weight_edge.
415 // start_clique - optional sequence of pairwise different ints. They are indices of
416 // vertices that shall be present in the output clique.
417 // subgraph_of_ban - optional sequence of (maybe equal) ints. They are indices of
418 // vertices that shall not be present in the output clique.
419 // clique_weight_ptr - optional output parameter. Weight of found clique stored here.
420 // num_generations - optional number of generations in evolutionary part of algorithm,
421 // zero forces to return first found clique.
422 // quality - optional parameter determining degree of required quality/speed tradeoff.
423 // Must be in the range from 0 to 9.
424 // 0 is fast and dirty, 9 is slow but hopefully yields good clique.
426 // sequence of pairwise different ints.
427 // These are indices of vertices that form found clique.
429 // in cases of CV_WEIGHTED_EDGE and CV_WEIGHTED_ALL weights should be nonnegative.
430 // start_clique has a priority over subgraph_of_ban.
432 /*CVAPI(CvSeq*) cvFindCliqueEx( CvGraph *graph, CvMemStorage *storage,
433 int is_complementary CV_DEFAULT(0),
434 CvGraphWeightType weight_type CV_DEFAULT(CV_NOT_WEIGHTED),
435 CvVect32f weight_vtx CV_DEFAULT(0),
436 CvMatr32f weight_edge CV_DEFAULT(0),
437 CvSeq *start_clique CV_DEFAULT(0),
438 CvSeq *subgraph_of_ban CV_DEFAULT(0),
439 float *clique_weight_ptr CV_DEFAULT(0),
440 int num_generations CV_DEFAULT(3),
441 int quality CV_DEFAULT(2) );*/
444 #define CV_UNDEF_SC_PARAM 12345 //default value of parameters
446 #define CV_IDP_BIRCHFIELD_PARAM1 25
447 #define CV_IDP_BIRCHFIELD_PARAM2 5
448 #define CV_IDP_BIRCHFIELD_PARAM3 12
449 #define CV_IDP_BIRCHFIELD_PARAM4 15
450 #define CV_IDP_BIRCHFIELD_PARAM5 25
453 #define CV_DISPARITY_BIRCHFIELD 0
456 /*F///////////////////////////////////////////////////////////////////////////
458 // Name: cvFindStereoCorrespondence
459 // Purpose: find stereo correspondence on stereo-pair
462 // leftImage - left image of stereo-pair (format 8uC1).
463 // rightImage - right image of stereo-pair (format 8uC1).
464 // mode - mode of correspondence retrieval (now CV_DISPARITY_BIRCHFIELD only)
465 // dispImage - destination disparity image
466 // maxDisparity - maximal disparity
467 // param1, param2, param3, param4, param5 - parameters of algorithm
470 // Images must be rectified.
471 // All images must have format 8uC1.
474 cvFindStereoCorrespondence(
475 const CvArr* leftImage, const CvArr* rightImage,
479 double param1 CV_DEFAULT(CV_UNDEF_SC_PARAM),
480 double param2 CV_DEFAULT(CV_UNDEF_SC_PARAM),
481 double param3 CV_DEFAULT(CV_UNDEF_SC_PARAM),
482 double param4 CV_DEFAULT(CV_UNDEF_SC_PARAM),
483 double param5 CV_DEFAULT(CV_UNDEF_SC_PARAM) );
485 /*****************************************************************************************/
486 /************ Epiline functions *******************/
490 typedef struct CvStereoLineCoeff
509 typedef struct CvCamera
511 float imgSize[2]; /* size of the camera view, used during calibration */
512 float matrix[9]; /* intinsic camera parameters: [ fx 0 cx; 0 fy cy; 0 0 1 ] */
513 float distortion[4]; /* distortion coefficients - two coefficients for radial distortion
514 and another two for tangential: [ k1 k2 p1 p2 ] */
516 float transVect[3]; /* rotation matrix and transition vector relatively
517 to some reference point in the space. */
521 typedef struct CvStereoCamera
523 CvCamera* camera[2]; /* two individual camera parameters */
524 float fundMatr[9]; /* fundamental matrix */
526 /* New part for stereo */
527 CvPoint3D32f epipole[2];
528 CvPoint2D32f quad[2][4]; /* coordinates of destination quadrangle after
529 epipolar geometry rectification */
530 double coeffs[2][3][3];/* coefficients for transformation */
531 CvPoint2D32f border[2][4];
533 CvStereoLineCoeff* lineCoeffs;
534 int needSwapCameras;/* flag set to 1 if need to swap cameras for good reconstruction */
536 float transVector[3];
541 typedef struct CvContourOrientation
546 float max, min; // minimum and maximum projections
548 } CvContourOrientation;
550 #define CV_CAMERA_TO_WARP 1
551 #define CV_WARP_TO_CAMERA 2
553 CVAPI(int) icvConvertWarpCoordinates(double coeffs[3][3],
554 CvPoint2D32f* cameraPoint,
555 CvPoint2D32f* warpPoint,
558 CVAPI(int) icvGetSymPoint3D( CvPoint3D64f pointCorner,
561 CvPoint3D64f *pointSym2);
563 CVAPI(void) icvGetPieceLength3D(CvPoint3D64f point1,CvPoint3D64f point2,double* dist);
565 CVAPI(int) icvCompute3DPoint( double alpha,double betta,
566 CvStereoLineCoeff* coeffs,
567 CvPoint3D64f* point);
569 CVAPI(int) icvCreateConvertMatrVect( CvMatr64d rotMatr1,
570 CvMatr64d transVect1,
572 CvMatr64d transVect2,
573 CvMatr64d convRotMatr,
574 CvMatr64d convTransVect);
576 CVAPI(int) icvConvertPointSystem(CvPoint3D64f M2,
582 CVAPI(int) icvComputeCoeffForStereo( CvStereoCamera* stereoCamera);
584 CVAPI(int) icvGetCrossPieceVector(CvPoint2D32f p1_start,CvPoint2D32f p1_end,CvPoint2D32f v2_start,CvPoint2D32f v2_end,CvPoint2D32f *cross);
585 CVAPI(int) icvGetCrossLineDirect(CvPoint2D32f p1,CvPoint2D32f p2,float a,float b,float c,CvPoint2D32f* cross);
586 CVAPI(float) icvDefinePointPosition(CvPoint2D32f point1,CvPoint2D32f point2,CvPoint2D32f point);
587 CVAPI(int) icvStereoCalibration( int numImages,
590 CvPoint2D32f* imagePoints1,
591 CvPoint2D32f* imagePoints2,
592 CvPoint3D32f* objectPoints,
593 CvStereoCamera* stereoparams
597 CVAPI(int) icvComputeRestStereoParams(CvStereoCamera *stereoparams);
599 CVAPI(void) cvComputePerspectiveMap( const double coeffs[3][3], CvArr* rectMapX, CvArr* rectMapY );
601 CVAPI(int) icvComCoeffForLine( CvPoint2D64f point1,
607 CvMatr64d transVect1,
610 CvMatr64d transVect2,
611 CvStereoLineCoeff* coeffs,
612 int* needSwapCameras);
614 CVAPI(int) icvGetDirectionForPoint( CvPoint2D64f point,
616 CvPoint3D64f* direct);
618 CVAPI(int) icvGetCrossLines(CvPoint3D64f point11,CvPoint3D64f point12,
619 CvPoint3D64f point21,CvPoint3D64f point22,
620 CvPoint3D64f* midPoint);
622 CVAPI(int) icvComputeStereoLineCoeffs( CvPoint3D64f pointA,
624 CvPoint3D64f pointCam1,
626 CvStereoLineCoeff* coeffs);
628 /*CVAPI(int) icvComputeFundMatrEpipoles ( CvMatr64d camMatr1,
630 CvVect64d transVect1,
633 CvVect64d transVect2,
634 CvPoint2D64f* epipole1,
635 CvPoint2D64f* epipole2,
636 CvMatr64d fundMatr);*/
638 CVAPI(int) icvGetAngleLine( CvPoint2D64f startPoint, CvSize imageSize,CvPoint2D64f *point1,CvPoint2D64f *point2);
640 CVAPI(void) icvGetCoefForPiece( CvPoint2D64f p_start,CvPoint2D64f p_end,
641 double *a,double *b,double *c,
644 /*CVAPI(void) icvGetCommonArea( CvSize imageSize,
645 CvPoint2D64f epipole1,CvPoint2D64f epipole2,
647 CvVect64d coeff11,CvVect64d coeff12,
648 CvVect64d coeff21,CvVect64d coeff22,
651 CVAPI(void) icvComputeeInfiniteProject1(CvMatr64d rotMatr,
655 CvPoint2D32f *point2);
657 CVAPI(void) icvComputeeInfiniteProject2(CvMatr64d rotMatr,
660 CvPoint2D32f* point1,
661 CvPoint2D32f point2);
663 CVAPI(void) icvGetCrossDirectDirect( CvVect64d direct1,CvVect64d direct2,
664 CvPoint2D64f *cross,int* result);
666 CVAPI(void) icvGetCrossPieceDirect( CvPoint2D64f p_start,CvPoint2D64f p_end,
667 double a,double b,double c,
668 CvPoint2D64f *cross,int* result);
670 CVAPI(void) icvGetCrossPiecePiece( CvPoint2D64f p1_start,CvPoint2D64f p1_end,
671 CvPoint2D64f p2_start,CvPoint2D64f p2_end,
675 CVAPI(void) icvGetPieceLength(CvPoint2D64f point1,CvPoint2D64f point2,double* dist);
677 CVAPI(void) icvGetCrossRectDirect( CvSize imageSize,
678 double a,double b,double c,
679 CvPoint2D64f *start,CvPoint2D64f *end,
682 CVAPI(void) icvProjectPointToImage( CvPoint3D64f point,
683 CvMatr64d camMatr,CvMatr64d rotMatr,CvVect64d transVect,
684 CvPoint2D64f* projPoint);
686 CVAPI(void) icvGetQuadsTransform( CvSize imageSize,
689 CvVect64d transVect1,
692 CvVect64d transVect2,
697 CvPoint3D64f* epipole1,
698 CvPoint3D64f* epipole2
701 CVAPI(void) icvGetQuadsTransformStruct( CvStereoCamera* stereoCamera);
703 CVAPI(void) icvComputeStereoParamsForCameras(CvStereoCamera* stereoCamera);
705 CVAPI(void) icvGetCutPiece( CvVect64d areaLineCoef1,CvVect64d areaLineCoef2,
706 CvPoint2D64f epipole,
708 CvPoint2D64f* point11,CvPoint2D64f* point12,
709 CvPoint2D64f* point21,CvPoint2D64f* point22,
712 CVAPI(void) icvGetMiddleAnglePoint( CvPoint2D64f basePoint,
713 CvPoint2D64f point1,CvPoint2D64f point2,
714 CvPoint2D64f* midPoint);
716 CVAPI(void) icvGetNormalDirect(CvVect64d direct,CvPoint2D64f point,CvVect64d normDirect);
718 CVAPI(double) icvGetVect(CvPoint2D64f basePoint,CvPoint2D64f point1,CvPoint2D64f point2);
720 CVAPI(void) icvProjectPointToDirect( CvPoint2D64f point,CvVect64d lineCoeff,
721 CvPoint2D64f* projectPoint);
723 CVAPI(void) icvGetDistanceFromPointToDirect( CvPoint2D64f point,CvVect64d lineCoef,double*dist);
725 CVAPI(IplImage*) icvCreateIsometricImage( IplImage* src, IplImage* dst,
726 int desired_depth, int desired_num_channels );
728 CVAPI(void) cvDeInterlace( const CvArr* frame, CvArr* fieldEven, CvArr* fieldOdd );
730 /*CVAPI(int) icvSelectBestRt( int numImages,
733 CvPoint2D32f* imagePoints1,
734 CvPoint2D32f* imagePoints2,
735 CvPoint3D32f* objectPoints,
737 CvMatr32f cameraMatrix1,
738 CvVect32f distortion1,
740 CvVect32f transVects1,
742 CvMatr32f cameraMatrix2,
743 CvVect32f distortion2,
745 CvVect32f transVects2,
747 CvMatr32f bestRotMatr,
748 CvVect32f bestTransVect
751 /****************************************************************************************\
753 \****************************************************************************************/
755 /* finds correspondence between two contours */
756 CvSeq* cvCalcContoursCorrespondence( const CvSeq* contour1,
757 const CvSeq* contour2,
758 CvMemStorage* storage);
760 /* morphs contours using the pre-calculated correspondence:
761 alpha=0 ~ contour1, alpha=1 ~ contour2 */
762 CvSeq* cvMorphContours( const CvSeq* contour1, const CvSeq* contour2,
763 CvSeq* corr, double alpha,
764 CvMemStorage* storage );
766 /****************************************************************************************\
767 * Texture Descriptors *
768 \****************************************************************************************/
770 #define CV_GLCM_OPTIMIZATION_NONE -2
771 #define CV_GLCM_OPTIMIZATION_LUT -1
772 #define CV_GLCM_OPTIMIZATION_HISTOGRAM 0
774 #define CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST 10
775 #define CV_GLCMDESC_OPTIMIZATION_ALLOWTRIPLENEST 11
776 #define CV_GLCMDESC_OPTIMIZATION_HISTOGRAM 4
778 #define CV_GLCMDESC_ENTROPY 0
779 #define CV_GLCMDESC_ENERGY 1
780 #define CV_GLCMDESC_HOMOGENITY 2
781 #define CV_GLCMDESC_CONTRAST 3
782 #define CV_GLCMDESC_CLUSTERTENDENCY 4
783 #define CV_GLCMDESC_CLUSTERSHADE 5
784 #define CV_GLCMDESC_CORRELATION 6
785 #define CV_GLCMDESC_CORRELATIONINFO1 7
786 #define CV_GLCMDESC_CORRELATIONINFO2 8
787 #define CV_GLCMDESC_MAXIMUMPROBABILITY 9
789 #define CV_GLCM_ALL 0
790 #define CV_GLCM_GLCM 1
791 #define CV_GLCM_DESC 2
793 typedef struct CvGLCM CvGLCM;
795 CVAPI(CvGLCM*) cvCreateGLCM( const IplImage* srcImage,
797 const int* stepDirections CV_DEFAULT(0),
798 int numStepDirections CV_DEFAULT(0),
799 int optimizationType CV_DEFAULT(CV_GLCM_OPTIMIZATION_NONE));
801 CVAPI(void) cvReleaseGLCM( CvGLCM** GLCM, int flag CV_DEFAULT(CV_GLCM_ALL));
803 CVAPI(void) cvCreateGLCMDescriptors( CvGLCM* destGLCM,
804 int descriptorOptimizationType
805 CV_DEFAULT(CV_GLCMDESC_OPTIMIZATION_ALLOWDOUBLENEST));
807 CVAPI(double) cvGetGLCMDescriptor( CvGLCM* GLCM, int step, int descriptor );
809 CVAPI(void) cvGetGLCMDescriptorStatistics( CvGLCM* GLCM, int descriptor,
810 double* average, double* standardDeviation );
812 CVAPI(IplImage*) cvCreateGLCMImage( CvGLCM* GLCM, int step );
814 /****************************************************************************************\
815 * Face eyes&mouth tracking *
816 \****************************************************************************************/
819 typedef struct CvFaceTracker CvFaceTracker;
821 #define CV_NUM_FACE_ELEMENTS 3
822 enum CV_FACE_ELEMENTS
825 CV_FACE_LEFT_EYE = 1,
826 CV_FACE_RIGHT_EYE = 2
829 CVAPI(CvFaceTracker*) cvInitFaceTracker(CvFaceTracker* pFaceTracking, const IplImage* imgGray,
830 CvRect* pRects, int nRects);
831 CVAPI(int) cvTrackFace( CvFaceTracker* pFaceTracker, IplImage* imgGray,
832 CvRect* pRects, int nRects,
833 CvPoint* ptRotate, double* dbAngleRotate);
834 CVAPI(void) cvReleaseFaceTracker(CvFaceTracker** ppFaceTracker);
837 typedef struct CvFace
844 CvSeq * cvFindFace(IplImage * Image,CvMemStorage* storage);
845 CvSeq * cvPostBoostingFindFace(IplImage * Image,CvMemStorage* storage);
848 /****************************************************************************************\
850 \****************************************************************************************/
852 typedef unsigned char CvBool;
857 CvPoint2D32f p; // pgruebele: So we do not loose precision, this needs to be float
858 } Cv3dTracker2dTrackedObject;
860 CV_INLINE Cv3dTracker2dTrackedObject cv3dTracker2dTrackedObject(int id, CvPoint2D32f p)
862 Cv3dTracker2dTrackedObject r;
871 CvPoint3D32f p; // location of the tracked object
872 } Cv3dTrackerTrackedObject;
874 CV_INLINE Cv3dTrackerTrackedObject cv3dTrackerTrackedObject(int id, CvPoint3D32f p)
876 Cv3dTrackerTrackedObject r;
885 float mat[4][4]; /* maps camera coordinates to world coordinates */
886 CvPoint2D32f principal_point; /* copied from intrinsics so this structure */
887 /* has all the info we need */
888 } Cv3dTrackerCameraInfo;
892 CvPoint2D32f principal_point;
893 float focal_length[2];
895 } Cv3dTrackerCameraIntrinsics;
897 CVAPI(CvBool) cv3dTrackerCalibrateCameras(int num_cameras,
898 const Cv3dTrackerCameraIntrinsics camera_intrinsics[], /* size is num_cameras */
901 IplImage *samples[], /* size is num_cameras */
902 Cv3dTrackerCameraInfo camera_info[]); /* size is num_cameras */
904 CVAPI(int) cv3dTrackerLocateObjects(int num_cameras, int num_objects,
905 const Cv3dTrackerCameraInfo camera_info[], /* size is num_cameras */
906 const Cv3dTracker2dTrackedObject tracking_info[], /* size is num_objects*num_cameras */
907 Cv3dTrackerTrackedObject tracked_objects[]); /* size is num_objects */
908 /****************************************************************************************
909 tracking_info is a rectangular array; one row per camera, num_objects elements per row.
910 The id field of any unused slots must be -1. Ids need not be ordered or consecutive. On
911 completion, the return value is the number of objects located; i.e., the number of objects
912 visible by more than one camera. The id field of any unused slots in tracked objects is
914 ****************************************************************************************/
917 /****************************************************************************************\
918 * Skeletons and Linear-Contour Models *
919 \****************************************************************************************/
921 typedef enum CvLeeParameters
932 #define CV_NEXT_VORONOISITE2D( SITE ) ((SITE)->edge[0]->site[((SITE)->edge[0]->site[0] == (SITE))])
933 #define CV_PREV_VORONOISITE2D( SITE ) ((SITE)->edge[1]->site[((SITE)->edge[1]->site[0] == (SITE))])
934 #define CV_FIRST_VORONOIEDGE2D( SITE ) ((SITE)->edge[0])
935 #define CV_LAST_VORONOIEDGE2D( SITE ) ((SITE)->edge[1])
936 #define CV_NEXT_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[(EDGE)->site[0] != (SITE)])
937 #define CV_PREV_VORONOIEDGE2D( EDGE, SITE ) ((EDGE)->next[2 + ((EDGE)->site[0] != (SITE))])
938 #define CV_VORONOIEDGE2D_BEGINNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] != (SITE))])
939 #define CV_VORONOIEDGE2D_ENDNODE( EDGE, SITE ) ((EDGE)->node[((EDGE)->site[0] == (SITE))])
940 #define CV_TWIN_VORONOISITE2D( SITE, EDGE ) ( (EDGE)->site[((EDGE)->site[0] == (SITE))])
942 #define CV_VORONOISITE2D_FIELDS() \
943 struct CvVoronoiNode2D *node[2]; \
944 struct CvVoronoiEdge2D *edge[2];
946 typedef struct CvVoronoiSite2D
948 CV_VORONOISITE2D_FIELDS()
949 struct CvVoronoiSite2D *next[2];
952 #define CV_VORONOIEDGE2D_FIELDS() \
953 struct CvVoronoiNode2D *node[2]; \
954 struct CvVoronoiSite2D *site[2]; \
955 struct CvVoronoiEdge2D *next[4];
957 typedef struct CvVoronoiEdge2D
959 CV_VORONOIEDGE2D_FIELDS()
962 #define CV_VORONOINODE2D_FIELDS() \
963 CV_SET_ELEM_FIELDS(CvVoronoiNode2D) \
967 typedef struct CvVoronoiNode2D
969 CV_VORONOINODE2D_FIELDS()
972 #define CV_VORONOIDIAGRAM2D_FIELDS() \
976 typedef struct CvVoronoiDiagram2D
978 CV_VORONOIDIAGRAM2D_FIELDS()
979 } CvVoronoiDiagram2D;
981 /* Computes Voronoi Diagram for given polygons with holes */
982 CVAPI(int) cvVoronoiDiagramFromContour(CvSeq* ContourSeq,
983 CvVoronoiDiagram2D** VoronoiDiagram,
984 CvMemStorage* VoronoiStorage,
985 CvLeeParameters contour_type CV_DEFAULT(CV_LEE_INT),
986 int contour_orientation CV_DEFAULT(-1),
987 int attempt_number CV_DEFAULT(10));
989 /* Computes Voronoi Diagram for domains in given image */
990 CVAPI(int) cvVoronoiDiagramFromImage(IplImage* pImage,
992 CvVoronoiDiagram2D** VoronoiDiagram,
993 CvMemStorage* VoronoiStorage,
994 CvLeeParameters regularization_method CV_DEFAULT(CV_LEE_NON),
995 float approx_precision CV_DEFAULT(CV_LEE_AUTO));
997 /* Deallocates the storage */
998 CVAPI(void) cvReleaseVoronoiStorage(CvVoronoiDiagram2D* VoronoiDiagram,
999 CvMemStorage** pVoronoiStorage);
1001 /*********************** Linear-Contour Model ****************************/
1006 typedef struct CvLCMEdge
1008 CV_GRAPH_EDGE_FIELDS()
1015 typedef struct CvLCMNode
1017 CV_GRAPH_VERTEX_FIELDS()
1022 /* Computes hybrid model from Voronoi Diagram */
1023 CVAPI(CvGraph*) cvLinearContorModelFromVoronoiDiagram(CvVoronoiDiagram2D* VoronoiDiagram,
1026 /* Releases hybrid model storage */
1027 CVAPI(int) cvReleaseLinearContorModelStorage(CvGraph** Graph);
1030 /* two stereo-related functions */
1032 CVAPI(void) cvInitPerspectiveTransform( CvSize size, const CvPoint2D32f vertex[4], double matrix[3][3],
1035 /*CVAPI(void) cvInitStereoRectification( CvStereoCamera* params,
1036 CvArr* rectMap1, CvArr* rectMap2,
1037 int do_undistortion );*/
1039 /*************************** View Morphing Functions ************************/
1041 /* The order of the function corresponds to the order they should appear in
1042 the view morphing pipeline */
1044 /* Finds ending points of scanlines on left and right images of stereo-pair */
1045 CVAPI(void) cvMakeScanlines( const CvMatrix3* matrix, CvSize img_size,
1046 int* scanlines1, int* scanlines2,
1047 int* lengths1, int* lengths2,
1050 /* Grab pixel values from scanlines and stores them sequentially
1051 (some sort of perspective image transform) */
1052 CVAPI(void) cvPreWarpImage( int line_count,
1058 /* Approximate each grabbed scanline by a sequence of runs
1059 (lossy run-length compression) */
1060 CVAPI(void) cvFindRuns( int line_count,
1070 /* Compares two sets of compressed scanlines */
1071 CVAPI(void) cvDynamicCorrespondMulti( int line_count,
1079 /* Finds scanline ending coordinates for some intermediate "virtual" camera position */
1080 CVAPI(void) cvMakeAlphaScanlines( int* scanlines1,
1087 /* Blends data of the left and right image scanlines to get
1088 pixel values of "virtual" image scanlines */
1089 CVAPI(void) cvMorphEpilinesMulti( int line_count,
1104 /* Does reverse warping of the morphing result to make
1105 it fill the destination image rectangle */
1106 CVAPI(void) cvPostWarpImage( int line_count,
1112 /* Deletes Moire (missed pixels that appear due to discretization) */
1113 CVAPI(void) cvDeleteMoire( IplImage* img );
1116 /****************************************************************************************\
1117 * Background/foreground segmentation *
1118 \****************************************************************************************/
1120 /* We discriminate between foreground and background pixels
1121 * by building and maintaining a model of the background.
1122 * Any pixel which does not fit this model is then deemed
1125 * At present we support two core background models,
1126 * one of which has two variations:
1128 * o CV_BG_MODEL_FGD: latest and greatest algorithm, described in
1130 * Foreground Object Detection from Videos Containing Complex Background.
1131 * Liyuan Li, Weimin Huang, Irene Y.H. Gu, and Qi Tian.
1133 * http://muq.org/~cynbe/bib/foreground-object-detection-from-videos-containing-complex-background.pdf
1135 * o CV_BG_MODEL_FGD_SIMPLE:
1136 * A code comment describes this as a simplified version of the above,
1137 * but the code is in fact currently identical. (Cynbe 2008-05-25)
1139 * o CV_BG_MODEL_MOG: "Mixture of Gaussians", older algorithm, described in
1141 * Moving target classification and tracking from real-time video.
1142 * A Lipton, H Fujijoshi, R Patil
1143 * Proceedings IEEE Workshop on Application of Computer Vision pp 8-14 1998
1144 * http://www.vision.cs.chubu.ac.jp/04/pdf/VSAM02.pdf
1146 * Learning patterns of activity using real-time tracking
1147 * C Stauffer and W Grimson August 2000
1148 * IEEE Transactions on Pattern Analysis and Machine Intelligence 22(8):747-757
1149 * http://people.csail.mit.edu/people/stauffer/Home/_papers/vsam-pami-tracking.ps
1151 * Additional background may be found on the Wiki page
1153 * http://opencvlibrary.sourceforge.net/VideoSurveillance
1155 * which in particular recommends the Intel semi-popular overview article
1157 * Computer Vision Workload Analysis: Case Study of Video Surveillance Systems
1158 * Chen et al, Intel Technology Journal V09:02 , 2005 12p
1159 * http://developer.intel.com/technology/itj/2005/volume09issue02/art02_computer_vision/vol09_art02.pdf
1161 * which has both a good overview of the blobtracker software in particular,
1162 * and also many references to introductory (and advanced) papers on computer vision.
1167 #define CV_BG_MODEL_FGD 0
1168 #define CV_BG_MODEL_MOG 1 /* "Mixture of Gaussians". */
1169 #define CV_BG_MODEL_FGD_SIMPLE 2
1171 struct CvBGStatModel;
1173 typedef void (CV_CDECL * CvReleaseBGStatModel)( struct CvBGStatModel** bg_model );
1174 typedef int (CV_CDECL * CvUpdateBGStatModel)( IplImage* curr_frame, struct CvBGStatModel* bg_model );
1176 #define CV_BG_STAT_MODEL_FIELDS() \
1177 int type; /*type of BG model*/ \
1178 CvReleaseBGStatModel release; \
1179 CvUpdateBGStatModel update; \
1180 IplImage* background; /*8UC3 reference background image*/ \
1181 IplImage* foreground; /*8UC1 foreground image*/ \
1182 IplImage** layers; /*8UC3 reference background image, can be null */ \
1183 int layer_count; /* can be zero */ \
1184 CvMemStorage* storage; /*storage for
\93foreground_regions
\94*/ \
1185 CvSeq* foreground_regions /*foreground object contours*/
1187 typedef struct CvBGStatModel
1189 CV_BG_STAT_MODEL_FIELDS();
1195 // Releases memory used by BGStatModel
1196 CV_INLINE void cvReleaseBGStatModel( CvBGStatModel** bg_model )
1198 if( bg_model && *bg_model && (*bg_model)->release )
1199 (*bg_model)->release( bg_model );
1202 // Updates statistical model and returns number of found foreground regions
1203 CV_INLINE int cvUpdateBGStatModel( IplImage* current_frame, CvBGStatModel* bg_model )
1205 return bg_model && bg_model->update ? bg_model->update( current_frame, bg_model ) : 0;
1208 // Performs FG post-processing using segmentation
1209 // (all pixels of a region will be classified as foreground if majority of pixels of the region are FG).
1211 // segments - pointer to result of segmentation (for example MeanShiftSegmentation)
1212 // bg_model - pointer to CvBGStatModel structure
1213 CVAPI(void) cvRefineForegroundMaskBySegm( CvSeq* segments, CvBGStatModel* bg_model );
1215 /* Common use change detection function */
1216 CVAPI(int) cvChangeDetection( IplImage* prev_frame,
1217 IplImage* curr_frame,
1218 IplImage* change_mask );
1221 Interface of ACM MM2003 algorithm
1224 /* Default parameters of foreground detection algorithm: */
1225 #define CV_BGFG_FGD_LC 128
1226 #define CV_BGFG_FGD_N1C 15
1227 #define CV_BGFG_FGD_N2C 25
1229 #define CV_BGFG_FGD_LCC 64
1230 #define CV_BGFG_FGD_N1CC 25
1231 #define CV_BGFG_FGD_N2CC 40
1233 /* Background reference image update parameter: */
1234 #define CV_BGFG_FGD_ALPHA_1 0.1f
1236 /* stat model update parameter
1237 * 0.002f ~ 1K frame(~45sec), 0.005 ~ 18sec (if 25fps and absolutely static BG)
1239 #define CV_BGFG_FGD_ALPHA_2 0.005f
1241 /* start value for alpha parameter (to fast initiate statistic model) */
1242 #define CV_BGFG_FGD_ALPHA_3 0.1f
1244 #define CV_BGFG_FGD_DELTA 2
1246 #define CV_BGFG_FGD_T 0.9f
1248 #define CV_BGFG_FGD_MINAREA 15.f
1250 #define CV_BGFG_FGD_BG_UPDATE_TRESH 0.5f
1252 /* See the above-referenced Li/Huang/Gu/Tian paper
1253 * for a full description of these background-model
1254 * tuning parameters.
1256 * Nomenclature: 'c' == "color", a three-component red/green/blue vector.
1257 * We use histograms of these to model the range of
1258 * colors we've seen at a given background pixel.
1260 * 'cc' == "color co-occurrence", a six-component vector giving
1261 * RGB color for both this frame and preceding frame.
1262 * We use histograms of these to model the range of
1263 * color CHANGES we've seen at a given background pixel.
1265 typedef struct CvFGDStatModelParams
1267 int Lc; /* Quantized levels per 'color' component. Power of two, typically 32, 64 or 128. */
1268 int N1c; /* Number of color vectors used to model normal background color variation at a given pixel. */
1269 int N2c; /* Number of color vectors retained at given pixel. Must be > N1c, typically ~ 5/3 of N1c. */
1270 /* Used to allow the first N1c vectors to adapt over time to changing background. */
1272 int Lcc; /* Quantized levels per 'color co-occurrence' component. Power of two, typically 16, 32 or 64. */
1273 int N1cc; /* Number of color co-occurrence vectors used to model normal background color variation at a given pixel. */
1274 int N2cc; /* Number of color co-occurrence vectors retained at given pixel. Must be > N1cc, typically ~ 5/3 of N1cc. */
1275 /* Used to allow the first N1cc vectors to adapt over time to changing background. */
1277 int is_obj_without_holes;/* If TRUE we ignore holes within foreground blobs. Defaults to TRUE. */
1278 int perform_morphing; /* Number of erode-dilate-erode foreground-blob cleanup iterations. */
1279 /* These erase one-pixel junk blobs and merge almost-touching blobs. Default value is 1. */
1281 float alpha1; /* How quickly we forget old background pixel values seen. Typically set to 0.1 */
1282 float alpha2; /* "Controls speed of feature learning". Depends on T. Typical value circa 0.005. */
1283 float alpha3; /* Alternate to alpha2, used (e.g.) for quicker initial convergence. Typical value 0.1. */
1285 float delta; /* Affects color and color co-occurrence quantization, typically set to 2. */
1286 float T; /* "A percentage value which determines when new features can be recognized as new background." (Typically 0.9).*/
1287 float minArea; /* Discard foreground blobs whose bounding box is smaller than this threshold. */
1289 CvFGDStatModelParams;
1291 typedef struct CvBGPixelCStatTable
1296 CvBGPixelCStatTable;
1298 typedef struct CvBGPixelCCStatTable
1303 CvBGPixelCCStatTable;
1305 typedef struct CvBGPixelStat
1309 CvBGPixelCStatTable* ctable;
1310 CvBGPixelCCStatTable* cctable;
1311 uchar is_trained_st_model;
1312 uchar is_trained_dyn_model;
1317 typedef struct CvFGDStatModel
1319 CV_BG_STAT_MODEL_FIELDS();
1320 CvBGPixelStat* pixel_stat;
1323 IplImage* prev_frame;
1324 CvFGDStatModelParams params;
1328 /* Creates FGD model */
1329 CVAPI(CvBGStatModel*) cvCreateFGDStatModel( IplImage* first_frame,
1330 CvFGDStatModelParams* parameters CV_DEFAULT(NULL));
1333 Interface of Gaussian mixture algorithm
1335 "An improved adaptive background mixture model for real-time tracking with shadow detection"
1336 P. KadewTraKuPong and R. Bowden,
1337 Proc. 2nd European Workshp on Advanced Video-Based Surveillance Systems, 2001."
1338 http://personal.ee.surrey.ac.uk/Personal/R.Bowden/publications/avbs01/avbs01.pdf
1341 /* Note: "MOG" == "Mixture Of Gaussians": */
1343 #define CV_BGFG_MOG_MAX_NGAUSSIANS 500
1345 /* default parameters of gaussian background detection algorithm */
1346 #define CV_BGFG_MOG_BACKGROUND_THRESHOLD 0.7 /* threshold sum of weights for background test */
1347 #define CV_BGFG_MOG_STD_THRESHOLD 2.5 /* lambda=2.5 is 99% */
1348 #define CV_BGFG_MOG_WINDOW_SIZE 200 /* Learning rate; alpha = 1/CV_GBG_WINDOW_SIZE */
1349 #define CV_BGFG_MOG_NGAUSSIANS 5 /* = K = number of Gaussians in mixture */
1350 #define CV_BGFG_MOG_WEIGHT_INIT 0.05
1351 #define CV_BGFG_MOG_SIGMA_INIT 30
1352 #define CV_BGFG_MOG_MINAREA 15.f
1355 #define CV_BGFG_MOG_NCOLORS 3
1357 typedef struct CvGaussBGStatModelParams
1359 int win_size; /* = 1/alpha */
1361 double bg_threshold, std_threshold, minArea;
1362 double weight_init, variance_init;
1363 }CvGaussBGStatModelParams;
1365 typedef struct CvGaussBGValues
1369 double variance[CV_BGFG_MOG_NCOLORS];
1370 double mean[CV_BGFG_MOG_NCOLORS];
1374 typedef struct CvGaussBGPoint
1376 CvGaussBGValues* g_values;
1381 typedef struct CvGaussBGModel
1383 CV_BG_STAT_MODEL_FIELDS();
1384 CvGaussBGStatModelParams params;
1385 CvGaussBGPoint* g_point;
1391 /* Creates Gaussian mixture background model */
1392 CVAPI(CvBGStatModel*) cvCreateGaussianBGModel( IplImage* first_frame,
1393 CvGaussBGStatModelParams* parameters CV_DEFAULT(NULL));
1401 /****************************************************************************************\
1402 * Calibration engine *
1403 \****************************************************************************************/
1405 typedef enum CvCalibEtalonType
1407 CV_CALIB_ETALON_USER = -1,
1408 CV_CALIB_ETALON_CHESSBOARD = 0,
1409 CV_CALIB_ETALON_CHECKERBOARD = CV_CALIB_ETALON_CHESSBOARD
1413 class CV_EXPORTS CvCalibFilter
1416 /* Constructor & destructor */
1418 virtual ~CvCalibFilter();
1420 /* Sets etalon type - one for all cameras.
1421 etalonParams is used in case of pre-defined etalons (such as chessboard).
1422 Number of elements in etalonParams is determined by etalonType.
1423 E.g., if etalon type is CV_ETALON_TYPE_CHESSBOARD then:
1424 etalonParams[0] is number of squares per one side of etalon
1425 etalonParams[1] is number of squares per another side of etalon
1426 etalonParams[2] is linear size of squares in the board in arbitrary units.
1427 pointCount & points are used in case of
1428 CV_CALIB_ETALON_USER (user-defined) etalon. */
1430 SetEtalon( CvCalibEtalonType etalonType, double* etalonParams,
1431 int pointCount = 0, CvPoint2D32f* points = 0 );
1433 /* Retrieves etalon parameters/or and points */
1434 virtual CvCalibEtalonType
1435 GetEtalon( int* paramCount = 0, const double** etalonParams = 0,
1436 int* pointCount = 0, const CvPoint2D32f** etalonPoints = 0 ) const;
1438 /* Sets number of cameras calibrated simultaneously. It is equal to 1 initially */
1439 virtual void SetCameraCount( int cameraCount );
1441 /* Retrieves number of cameras */
1442 int GetCameraCount() const { return cameraCount; }
1444 /* Starts cameras calibration */
1445 virtual bool SetFrames( int totalFrames );
1447 /* Stops cameras calibration */
1448 virtual void Stop( bool calibrate = false );
1450 /* Retrieves number of cameras */
1451 bool IsCalibrated() const { return isCalibrated; }
1453 /* Feeds another serie of snapshots (one per each camera) to filter.
1454 Etalon points on these images are found automatically.
1455 If the function can't locate points, it returns false */
1456 virtual bool FindEtalon( IplImage** imgs );
1458 /* The same but takes matrices */
1459 virtual bool FindEtalon( CvMat** imgs );
1461 /* Lower-level function for feeding filter with already found etalon points.
1462 Array of point arrays for each camera is passed. */
1463 virtual bool Push( const CvPoint2D32f** points = 0 );
1465 /* Returns total number of accepted frames and, optionally,
1466 total number of frames to collect */
1467 virtual int GetFrameCount( int* framesTotal = 0 ) const;
1469 /* Retrieves camera parameters for specified camera.
1470 If camera is not calibrated the function returns 0 */
1471 virtual const CvCamera* GetCameraParams( int idx = 0 ) const;
1473 virtual const CvStereoCamera* GetStereoParams() const;
1475 /* Sets camera parameters for all cameras */
1476 virtual bool SetCameraParams( CvCamera* params );
1478 /* Saves all camera parameters to file */
1479 virtual bool SaveCameraParams( const char* filename );
1481 /* Loads all camera parameters from file */
1482 virtual bool LoadCameraParams( const char* filename );
1484 /* Undistorts images using camera parameters. Some of src pointers can be NULL. */
1485 virtual bool Undistort( IplImage** src, IplImage** dst );
1487 /* Undistorts images using camera parameters. Some of src pointers can be NULL. */
1488 virtual bool Undistort( CvMat** src, CvMat** dst );
1490 /* Returns array of etalon points detected/partally detected
1491 on the latest frame for idx-th camera */
1492 virtual bool GetLatestPoints( int idx, CvPoint2D32f** pts,
1493 int* count, bool* found );
1495 /* Draw the latest detected/partially detected etalon */
1496 virtual void DrawPoints( IplImage** dst );
1498 /* Draw the latest detected/partially detected etalon */
1499 virtual void DrawPoints( CvMat** dst );
1501 virtual bool Rectify( IplImage** srcarr, IplImage** dstarr );
1502 virtual bool Rectify( CvMat** srcarr, CvMat** dstarr );
1506 enum { MAX_CAMERAS = 3 };
1509 CvCalibEtalonType etalonType;
1510 int etalonParamCount;
1511 double* etalonParams;
1512 int etalonPointCount;
1513 CvPoint2D32f* etalonPoints;
1517 CvMemStorage* storage;
1521 CvCamera cameraParams[MAX_CAMERAS];
1522 CvStereoCamera stereo;
1523 CvPoint2D32f* points[MAX_CAMERAS];
1524 CvMat* undistMap[MAX_CAMERAS][2];
1526 int latestCounts[MAX_CAMERAS];
1527 CvPoint2D32f* latestPoints[MAX_CAMERAS];
1528 CvMat* rectMap[MAX_CAMERAS][2];
1530 /* Added by Valery */
1531 //CvStereoCamera stereoParams;
1539 #include "cvaux.hpp"
1540 #include "cvvidsurv.hpp"
1541 /*#include "cvmat.hpp"*/