1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
44 /****************************************************************************************\
46 \****************************************************************************************/
48 const char* cvTsGetTypeName( int type )
50 static const char* type_names[] = { "8u", "8s", "16u", "16s", "32s", "32f", "64f", "ptr" };
51 return type_names[CV_MAT_DEPTH(type)];
55 int cvTsTypeByName( const char* name )
58 for( i = 0; i < CV_DEPTH_MAX; i++ )
59 if( strcmp(name, cvTsGetTypeName(i)) == 0 )
65 void cvTsRandUni( CvRNG* rng, CvMat* a, CvScalar param0, CvScalar param1 )
67 int i, j, k, cn, ncols;
68 CvScalar scale = param0;
69 CvScalar delta = param1;
70 double C = 1./(65536.*65536.);
72 cn = CV_MAT_CN(a->type);
75 for( k = 0; k < 4; k++ )
77 double s = scale.val[k] - delta.val[k];
82 delta.val[k] = scale.val[k];
88 for( i = 0; i < a->rows; i++ )
90 uchar* data = a->data.ptr + i*a->step;
92 switch( CV_MAT_DEPTH(a->type) )
95 for( j = 0; j < ncols; j += cn )
96 for( k = 0; k < cn; k++ )
98 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
99 ((uchar*)data)[j + k] = CV_CAST_8U(val);
103 for( j = 0; j < ncols; j += cn )
104 for( k = 0; k < cn; k++ )
106 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
107 ((schar*)data)[j + k] = CV_CAST_8S(val);
111 for( j = 0; j < ncols; j += cn )
112 for( k = 0; k < cn; k++ )
114 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
115 ((ushort*)data)[j + k] = CV_CAST_16U(val);
119 for( j = 0; j < ncols; j += cn )
120 for( k = 0; k < cn; k++ )
122 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
123 ((short*)data)[j + k] = CV_CAST_16S(val);
127 for( j = 0; j < ncols; j += cn )
128 for( k = 0; k < cn; k++ )
130 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
131 ((int*)data)[j + k] = val;
135 for( j = 0; j < ncols; j += cn )
136 for( k = 0; k < cn; k++ )
138 double val = cvTsRandInt(rng)*scale.val[k] + delta.val[k];
139 ((float*)data)[j + k] = (float)val;
143 for( j = 0; j < ncols; j += cn )
144 for( k = 0; k < cn; k++ )
146 double val = cvTsRandInt(rng);
147 val = (val + cvTsRandInt(rng)*C)*scale.val[k] + delta.val[k];
148 ((double*)data)[j + k] = val;
159 void cvTsZero( CvMat* c, const CvMat* mask )
161 int i, j, elem_size = CV_ELEM_SIZE(c->type), width = c->cols;
163 for( i = 0; i < c->rows; i++ )
166 memset( c->data.ptr + i*c->step, 0, width*elem_size );
169 const uchar* mrow = mask->data.ptr + mask->step*i;
170 uchar* cptr = c->data.ptr + c->step*i;
171 for( j = 0; j < width; j++, cptr += elem_size )
173 memset( cptr, 0, elem_size );
179 // initializes scaled identity matrix
180 void cvTsSetIdentity( CvMat* c, CvScalar diag_value )
184 width = MIN(c->rows, c->cols);
185 for( i = 0; i < width; i++ )
186 cvSet2D( c, i, i, diag_value );
190 // copies selected region of one array to another array
191 void cvTsCopy( const CvMat* a, CvMat* b, const CvMat* mask )
196 el_size = CV_ELEM_SIZE(a->type);
200 assert( CV_ARE_SIZES_EQ(a,mask) &&
201 (CV_MAT_TYPE(mask->type) == CV_8UC1 ||
202 CV_MAT_TYPE(mask->type) == CV_8SC1 ));
205 assert( CV_ARE_TYPES_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) );
210 for( i = 0; i < a->rows; i++ )
212 uchar* a_data = a->data.ptr + a->step*i;
213 uchar* b_data = b->data.ptr + b->step*i;
216 memcpy( b_data, a_data, ncols );
219 uchar* m_data = mask->data.ptr + mask->step*i;
221 for( j = 0; j < ncols; j++, b_data += el_size, a_data += el_size )
225 for( k = 0; k < el_size; k++ )
226 b_data[k] = a_data[k];
234 void cvTsConvert( const CvMat* a, CvMat* b )
236 int i, j, ncols = b->cols*CV_MAT_CN(b->type);
239 assert( CV_ARE_SIZES_EQ(a,b) && CV_ARE_CNS_EQ(a,b) );
240 buf = (double*)cvStackAlloc(ncols*sizeof(buf[0]));
242 for( i = 0; i < b->rows; i++ )
244 uchar* a_data = a->data.ptr + i*a->step;
245 uchar* b_data = b->data.ptr + i*b->step;
247 switch( CV_MAT_DEPTH(a->type) )
250 for( j = 0; j < ncols; j++ )
251 buf[j] = ((uchar*)a_data)[j];
254 for( j = 0; j < ncols; j++ )
255 buf[j] = ((schar*)a_data)[j];
258 for( j = 0; j < ncols; j++ )
259 buf[j] = ((ushort*)a_data)[j];
262 for( j = 0; j < ncols; j++ )
263 buf[j] = ((short*)a_data)[j];
266 for( j = 0; j < ncols; j++ )
267 buf[j] = ((int*)a_data)[j];
270 for( j = 0; j < ncols; j++ )
271 buf[j] = ((float*)a_data)[j];
274 for( j = 0; j < ncols; j++ )
275 buf[j] = ((double*)a_data)[j];
282 switch( CV_MAT_DEPTH(b->type) )
285 for( j = 0; j < ncols; j++ )
287 int val = cvRound(buf[j]);
288 ((uchar*)b_data)[j] = CV_CAST_8U(val);
292 for( j = 0; j < ncols; j++ )
294 int val = cvRound(buf[j]);
295 ((schar*)b_data)[j] = CV_CAST_8S(val);
299 for( j = 0; j < ncols; j++ )
301 int val = cvRound(buf[j]);
302 ((ushort*)b_data)[j] = CV_CAST_16U(val);
306 for( j = 0; j < ncols; j++ )
308 int val = cvRound(buf[j]);
309 ((short*)b_data)[j] = CV_CAST_16S(val);
313 for( j = 0; j < ncols; j++ )
315 int val = cvRound(buf[j]);
316 ((int*)b_data)[j] = CV_CAST_32S(val);
320 for( j = 0; j < ncols; j++ )
321 ((float*)b_data)[j] = CV_CAST_32F(buf[j]);
324 for( j = 0; j < ncols; j++ )
325 ((double*)b_data)[j] = CV_CAST_64F(buf[j]);
334 // extracts a single channel from a multi-channel array
335 void cvTsExtract( const CvMat* a, CvMat* b, int coi )
338 int el_size, el_size1, ncols;
340 el_size = CV_ELEM_SIZE(a->type);
341 el_size1 = CV_ELEM_SIZE(b->type);
344 assert( CV_ARE_DEPTHS_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) &&
345 (unsigned)coi < (unsigned)CV_MAT_CN(a->type) &&
346 CV_MAT_CN(b->type) == 1 );
348 for( i = 0; i < a->rows; i++ )
350 uchar* a_data = a->data.ptr + a->step*i;
351 uchar* b_data = b->data.ptr + b->step*i;
352 a_data += el_size1*coi;
353 for( j = 0; j < ncols; j++, b_data += el_size1, a_data += el_size )
355 for( k = 0; k < el_size1; k++ )
356 b_data[k] = a_data[k];
361 // replaces a single channel in a multi-channel array
362 void cvTsInsert( const CvMat* a, CvMat* b, int coi )
365 int el_size, el_size1, ncols;
367 el_size = CV_ELEM_SIZE(b->type);
368 el_size1 = CV_ELEM_SIZE(a->type);
371 assert( CV_ARE_DEPTHS_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) &&
372 (unsigned)coi < (unsigned)CV_MAT_CN(b->type) &&
373 CV_MAT_CN(a->type) == 1 );
375 for( i = 0; i < a->rows; i++ )
377 uchar* a_data = a->data.ptr + a->step*i;
378 uchar* b_data = b->data.ptr + b->step*i;
379 b_data += el_size1*coi;
380 for( j = 0; j < ncols; j++, b_data += el_size, a_data += el_size1 )
382 for( k = 0; k < el_size1; k++ )
383 b_data[k] = a_data[k];
389 // c = alpha*a + beta*b + gamma
390 void cvTsAdd( const CvMat* a, CvScalar alpha, const CvMat* b, CvScalar beta,
391 CvScalar gamma, CvMat* c, int calc_abs )
393 int i, j, k, cn, ncols;
395 double* alpha_buf = 0;
396 double* beta_buf = 0;
397 double* gamma_buf = 0;
405 cn = CV_MAT_CN(c->type);
418 assert( CV_ARE_SIZES_EQ(a,c) && CV_MAT_CN(a->type) == cn );
419 buf = (double*)malloc( a->cols * cn * sizeof(buf[0]) );
420 alpha_buf = (double*)malloc( a->cols * cn * sizeof(alpha_buf[0]) );
425 assert( CV_ARE_SIZES_EQ(b,c) && CV_MAT_CN(b->type) == cn );
426 beta_buf = (double*)malloc( b->cols * cn * sizeof(beta_buf[0]) );
430 gamma_buf = (double*)malloc( ncols * sizeof(gamma_buf[0]) );
434 if( !a && !b && calc_abs )
436 for( k = 0; k < cn; k++ )
437 gamma.val[k] = fabs(gamma.val[k]);
440 for( i = 0; i < 1 + (a != 0) + (b != 0); i++ )
442 double* scalar_buf = i == 0 ? gamma_buf : i == 1 ? alpha_buf : beta_buf;
443 CvScalar scalar = i == 0 ? gamma : i == 1 ? alpha : beta;
444 for( j = 0; j < ncols; j += cn )
445 for( k = 0; k < cn; k++ )
446 scalar_buf[j + k] = scalar.val[k];
449 for( i = 0; i < c->rows; i++ )
451 uchar* c_data = c->data.ptr + i*c->step;
455 uchar* a_data = a->data.ptr + i*a->step;
457 switch( CV_MAT_DEPTH(a->type) )
460 for( j = 0; j < ncols; j++ )
461 buf[j] = ((uchar*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
464 for( j = 0; j < ncols; j++ )
465 buf[j] = ((schar*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
468 for( j = 0; j < ncols; j++ )
469 buf[j] = ((ushort*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
472 for( j = 0; j < ncols; j++ )
473 buf[j] = ((short*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
476 for( j = 0; j < ncols; j++ )
477 buf[j] = ((int*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
480 for( j = 0; j < ncols; j++ )
481 buf[j] = ((float*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
484 for( j = 0; j < ncols; j++ )
485 buf[j] = ((double*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
495 uchar* b_data = b->data.ptr + i*b->step;
497 switch( CV_MAT_DEPTH(b->type) )
500 for( j = 0; j < ncols; j++ )
501 buf[j] += ((uchar*)b_data)[j]*beta_buf[j];
504 for( j = 0; j < ncols; j++ )
505 buf[j] += ((schar*)b_data)[j]*beta_buf[j];
508 for( j = 0; j < ncols; j++ )
509 buf[j] += ((ushort*)b_data)[j]*beta_buf[j];
512 for( j = 0; j < ncols; j++ )
513 buf[j] += ((short*)b_data)[j]*beta_buf[j];
516 for( j = 0; j < ncols; j++ )
517 buf[j] += ((int*)b_data)[j]*beta_buf[j];
520 for( j = 0; j < ncols; j++ )
521 buf[j] += ((float*)b_data)[j]*beta_buf[j];
524 for( j = 0; j < ncols; j++ )
525 buf[j] += ((double*)b_data)[j]*beta_buf[j];
537 for( j = 0; j < ncols; j++ )
538 buf[j] = fabs(buf[j]);
543 memcpy( c_data, c_data - c->step, c->cols*CV_ELEM_SIZE(c->type) );
547 switch( CV_MAT_DEPTH(c->type) )
550 for( j = 0; j < ncols; j++ )
552 int val = cvRound(buf[j]);
553 ((uchar*)c_data)[j] = CV_CAST_8U(val);
557 for( j = 0; j < ncols; j++ )
559 int val = cvRound(buf[j]);
560 ((schar*)c_data)[j] = CV_CAST_8S(val);
564 for( j = 0; j < ncols; j++ )
566 int val = cvRound(buf[j]);
567 ((ushort*)c_data)[j] = CV_CAST_16U(val);
571 for( j = 0; j < ncols; j++ )
573 int val = cvRound(buf[j]);
574 ((short*)c_data)[j] = CV_CAST_16S(val);
578 for( j = 0; j < ncols; j++ )
580 int val = cvRound(buf[j]);
581 ((int*)c_data)[j] = CV_CAST_32S(val);
585 for( j = 0; j < ncols; j++ )
586 ((float*)c_data)[j] = CV_CAST_32F(buf[j]);
589 for( j = 0; j < ncols; j++ )
590 ((double*)c_data)[j] = CV_CAST_64F(buf[j]);
597 if( buf && buf != gamma_buf )
609 void cvTsMul( const CvMat* a, const CvMat* b, CvScalar alpha, CvMat* c )
611 int i, j, k, cn, ncols;
613 double* alpha_buf = 0;
621 assert( CV_ARE_SIZES_EQ(a,c) && CV_ARE_SIZES_EQ(b,c) &&
622 CV_ARE_TYPES_EQ(a,b) && CV_ARE_CNS_EQ(a,c) );
624 cn = CV_MAT_CN(c->type);
625 ncols = c->cols * cn;
626 alpha_buf = (double*)malloc( ncols * sizeof(alpha_buf[0]) );
627 buf = (double*)malloc( ncols * sizeof(buf[0]) );
629 for( j = 0; j < ncols; j += cn )
630 for( k = 0; k < cn; k++ )
631 alpha_buf[j + k] = alpha.val[k];
633 for( i = 0; i < c->rows; i++ )
635 uchar* c_data = c->data.ptr + i*c->step;
636 uchar* a_data = a->data.ptr + i*a->step;
637 uchar* b_data = b->data.ptr + i*b->step;
639 switch( CV_MAT_DEPTH(a->type) )
642 for( j = 0; j < ncols; j++ )
643 buf[j] = (alpha_buf[j]*((uchar*)a_data)[j])*((uchar*)b_data)[j];
646 for( j = 0; j < ncols; j++ )
647 buf[j] = (alpha_buf[j]*((schar*)a_data)[j])*((schar*)b_data)[j];
650 for( j = 0; j < ncols; j++ )
651 buf[j] = (alpha_buf[j]*((ushort*)a_data)[j])*((ushort*)b_data)[j];
654 for( j = 0; j < ncols; j++ )
655 buf[j] = (alpha_buf[j]*((short*)a_data)[j])*((short*)b_data)[j];
658 for( j = 0; j < ncols; j++ )
659 buf[j] = (alpha_buf[j]*((int*)a_data)[j])*((int*)b_data)[j];
662 for( j = 0; j < ncols; j++ )
663 buf[j] = (alpha_buf[j]*((float*)a_data)[j])*((float*)b_data)[j];
666 for( j = 0; j < ncols; j++ )
667 buf[j] = (alpha_buf[j]*((double*)a_data)[j])*((double*)b_data)[j];
674 switch( CV_MAT_DEPTH(c->type) )
677 for( j = 0; j < ncols; j++ )
679 int val = cvRound(buf[j]);
680 ((uchar*)c_data)[j] = CV_CAST_8U(val);
684 for( j = 0; j < ncols; j++ )
686 int val = cvRound(buf[j]);
687 ((schar*)c_data)[j] = CV_CAST_8S(val);
691 for( j = 0; j < ncols; j++ )
693 int val = cvRound(buf[j]);
694 ((ushort*)c_data)[j] = CV_CAST_16U(val);
698 for( j = 0; j < ncols; j++ )
700 int val = cvRound(buf[j]);
701 ((short*)c_data)[j] = CV_CAST_16S(val);
705 for( j = 0; j < ncols; j++ )
707 int val = cvRound(buf[j]);
708 ((int*)c_data)[j] = CV_CAST_32S(val);
712 for( j = 0; j < ncols; j++ )
713 ((float*)c_data)[j] = CV_CAST_32F(buf[j]);
716 for( j = 0; j < ncols; j++ )
717 ((double*)c_data)[j] = CV_CAST_64F(buf[j]);
733 void cvTsDiv( const CvMat* a, const CvMat* b, CvScalar alpha, CvMat* c )
735 int i, j, k, cn, ncols;
737 double* alpha_buf = 0;
747 assert( CV_ARE_SIZES_EQ(a,c) &&
748 CV_ARE_TYPES_EQ(a,b) && CV_ARE_CNS_EQ(a,c) );
751 assert( CV_ARE_SIZES_EQ(b,c) && CV_ARE_CNS_EQ(b,c) );
753 cn = CV_MAT_CN(c->type);
754 ncols = c->cols * cn;
755 alpha_buf = (double*)malloc( ncols * sizeof(alpha_buf[0]) );
756 buf = (double*)malloc( ncols * sizeof(buf[0]) );
758 for( j = 0; j < ncols; j += cn )
759 for( k = 0; k < cn; k++ )
760 alpha_buf[j + k] = alpha.val[k];
762 for( i = 0; i < c->rows; i++ )
764 uchar* c_data = c->data.ptr + i*c->step;
765 uchar* a_data = a ? a->data.ptr + i*a->step : 0;
766 uchar* b_data = b->data.ptr + i*b->step;
768 switch( CV_MAT_DEPTH(b->type) )
771 for( j = 0; j < ncols; j++ )
773 int denom = ((uchar*)b_data)[j];
774 int num = a_data ? ((uchar*)a_data)[j] : 1;
775 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
779 for( j = 0; j < ncols; j++ )
781 int denom = ((schar*)b_data)[j];
782 int num = a_data ? ((schar*)a_data)[j] : 1;
783 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
787 for( j = 0; j < ncols; j++ )
789 int denom = ((ushort*)b_data)[j];
790 int num = a_data ? ((ushort*)a_data)[j] : 1;
791 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
795 for( j = 0; j < ncols; j++ )
797 int denom = ((short*)b_data)[j];
798 int num = a_data ? ((short*)a_data)[j] : 1;
799 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
803 for( j = 0; j < ncols; j++ )
805 int denom = ((int*)b_data)[j];
806 int num = a_data ? ((int*)a_data)[j] : 1;
807 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
811 for( j = 0; j < ncols; j++ )
813 double denom = ((float*)b_data)[j];
814 double num = a_data ? ((float*)a_data)[j] : 1;
815 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
819 for( j = 0; j < ncols; j++ )
821 double denom = ((double*)b_data)[j];
822 double num = a_data ? ((double*)a_data)[j] : 1;
823 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
831 switch( CV_MAT_DEPTH(c->type) )
834 for( j = 0; j < ncols; j++ )
836 int val = cvRound(buf[j]);
837 ((uchar*)c_data)[j] = CV_CAST_8U(val);
841 for( j = 0; j < ncols; j++ )
843 int val = cvRound(buf[j]);
844 ((schar*)c_data)[j] = CV_CAST_8S(val);
848 for( j = 0; j < ncols; j++ )
850 int val = cvRound(buf[j]);
851 ((ushort*)c_data)[j] = CV_CAST_16U(val);
855 for( j = 0; j < ncols; j++ )
857 int val = cvRound(buf[j]);
858 ((short*)c_data)[j] = CV_CAST_16S(val);
862 for( j = 0; j < ncols; j++ )
864 int val = cvRound(buf[j]);
865 ((int*)c_data)[j] = CV_CAST_32S(val);
869 for( j = 0; j < ncols; j++ )
870 ((float*)c_data)[j] = CV_CAST_32F(buf[j]);
873 for( j = 0; j < ncols; j++ )
874 ((double*)c_data)[j] = CV_CAST_64F(buf[j]);
889 // c = min(a,b) or c = max(a,b)
890 void cvTsMinMax( const CvMat* a, const CvMat* b, CvMat* c, int op_type )
893 int calc_max = op_type == CV_TS_MAX;
901 assert( CV_ARE_SIZES_EQ(a,c) && CV_ARE_TYPES_EQ(a,c) &&
902 CV_ARE_SIZES_EQ(b,c) && CV_ARE_TYPES_EQ(b,c) &&
903 CV_MAT_CN(a->type) == 1 );
906 for( i = 0; i < c->rows; i++ )
908 uchar* c_data = c->data.ptr + i*c->step;
909 uchar* a_data = a->data.ptr + i*a->step;
910 uchar* b_data = b->data.ptr + i*b->step;
912 switch( CV_MAT_DEPTH(a->type) )
915 for( j = 0; j < ncols; j++ )
917 int aj = ((uchar*)a_data)[j];
918 int bj = ((uchar*)b_data)[j];
919 ((uchar*)c_data)[j] = (uchar)(calc_max ? MAX(aj, bj) : MIN(aj,bj));
923 for( j = 0; j < ncols; j++ )
925 int aj = ((schar*)a_data)[j];
926 int bj = ((schar*)b_data)[j];
927 ((schar*)c_data)[j] = (schar)(calc_max ? MAX(aj, bj) : MIN(aj,bj));
931 for( j = 0; j < ncols; j++ )
933 int aj = ((ushort*)a_data)[j];
934 int bj = ((ushort*)b_data)[j];
935 ((ushort*)c_data)[j] = (ushort)(calc_max ? MAX(aj, bj) : MIN(aj,bj));
939 for( j = 0; j < ncols; j++ )
941 int aj = ((short*)a_data)[j];
942 int bj = ((short*)b_data)[j];
943 ((short*)c_data)[j] = (short)(calc_max ? MAX(aj, bj) : MIN(aj,bj));
947 for( j = 0; j < ncols; j++ )
949 int aj = ((int*)a_data)[j];
950 int bj = ((int*)b_data)[j];
951 ((int*)c_data)[j] = calc_max ? MAX(aj, bj) : MIN(aj,bj);
955 for( j = 0; j < ncols; j++ )
957 float aj = ((float*)a_data)[j];
958 float bj = ((float*)b_data)[j];
959 ((float*)c_data)[j] = calc_max ? MAX(aj, bj) : MIN(aj,bj);
963 for( j = 0; j < ncols; j++ )
965 double aj = ((double*)a_data)[j];
966 double bj = ((double*)b_data)[j];
967 ((double*)c_data)[j] = calc_max ? MAX(aj, bj) : MIN(aj,bj);
977 // c = min(a,b) or c = max(a,b)
978 void cvTsMinMaxS( const CvMat* a, double s, CvMat* c, int op_type )
981 int calc_max = op_type == CV_TS_MAX;
991 assert( CV_ARE_SIZES_EQ(a,c) && CV_ARE_TYPES_EQ(a,c) &&
992 CV_MAT_CN(a->type) == 1 );
995 switch( CV_MAT_DEPTH(a->type) )
1001 is = CV_CAST_8S(is);
1004 is = CV_CAST_16U(is);
1007 is = CV_CAST_16S(is);
1013 for( i = 0; i < c->rows; i++ )
1015 uchar* c_data = c->data.ptr + i*c->step;
1016 uchar* a_data = a->data.ptr + i*a->step;
1018 switch( CV_MAT_DEPTH(a->type) )
1021 for( j = 0; j < ncols; j++ )
1023 int aj = ((uchar*)a_data)[j];
1024 ((uchar*)c_data)[j] = (uchar)(calc_max ? MAX(aj, is) : MIN(aj, is));
1028 for( j = 0; j < ncols; j++ )
1030 int aj = ((schar*)a_data)[j];
1031 ((schar*)c_data)[j] = (schar)(calc_max ? MAX(aj, is) : MIN(aj, is));
1035 for( j = 0; j < ncols; j++ )
1037 int aj = ((ushort*)a_data)[j];
1038 ((ushort*)c_data)[j] = (ushort)(calc_max ? MAX(aj, is) : MIN(aj, is));
1042 for( j = 0; j < ncols; j++ )
1044 int aj = ((short*)a_data)[j];
1045 ((short*)c_data)[j] = (short)(calc_max ? MAX(aj, is) : MIN(aj, is));
1049 for( j = 0; j < ncols; j++ )
1051 int aj = ((int*)a_data)[j];
1052 ((int*)c_data)[j] = calc_max ? MAX(aj, is) : MIN(aj, is);
1056 for( j = 0; j < ncols; j++ )
1058 float aj = ((float*)a_data)[j];
1059 ((float*)c_data)[j] = calc_max ? MAX(aj, fs) : MIN(aj, fs);
1063 for( j = 0; j < ncols; j++ )
1065 double aj = ((double*)a_data)[j];
1066 ((double*)c_data)[j] = calc_max ? MAX(aj, s) : MIN(aj, s);
1076 // checks that the array does not have NaNs and/or Infs and all the elements are
1077 // within [min_val,max_val). idx is the index of the first "bad" element.
1078 int cvTsCheck( const CvMat* a, double min_val, double max_val, CvPoint* idx )
1082 int imin = 0, imax = 0;
1083 cn = CV_MAT_CN(a->type);
1086 if( CV_MAT_DEPTH(a->type) <= CV_32S )
1088 imin = cvCeil(min_val);
1089 imax = cvFloor(max_val);
1092 for( i = 0; i < a->rows; i++ )
1094 uchar* data = a->data.ptr + a->step*i;
1096 switch( CV_MAT_DEPTH(a->type) )
1099 for( j = 0; j < ncols; j++ )
1101 int val = ((uchar*)data)[j];
1102 if( val < imin || imax < val )
1107 for( j = 0; j < ncols; j++ )
1109 int val = ((schar*)data)[j];
1110 if( val < imin || imax < val )
1115 for( j = 0; j < ncols; j++ )
1117 int val = ((ushort*)data)[j];
1118 if( val < imin || imax < val )
1123 for( j = 0; j < ncols; j++ )
1125 int val = ((short*)data)[j];
1126 if( val < imin || imax < val )
1131 for( j = 0; j < ncols; j++ )
1133 int val = ((int*)data)[j];
1134 if( val < imin || imax < val )
1139 for( j = 0; j < ncols; j++ )
1141 double val = ((float*)data)[j];
1142 if( cvIsNaN(val) || cvIsInf(val) || val < min_val || max_val < val )
1147 for( j = 0; j < ncols; j++ )
1149 double val = ((double*)data)[j];
1150 if( cvIsNaN(val) || cvIsInf(val) || val < min_val || max_val < val )
1168 // compares two arrays. max_diff is the maximum actual difference,
1169 // success_err_level is maximum allowed difference, idx is the index of the first
1170 // element for which difference is >success_err_level
1171 // (or index of element with the maximum difference)
1172 int cvTsCmpEps( const CvMat* check_arr, const CvMat* etalon, double* _max_diff,
1173 double success_err_level, CvPoint* idx, bool element_wise_relative_error )
1183 cn = CV_MAT_CN(check_arr->type);
1184 ncols = check_arr->cols*cn;
1186 *idx = cvPoint(0,0);
1188 assert( CV_ARE_TYPES_EQ(check_arr,etalon) && CV_ARE_SIZES_EQ(check_arr,etalon) );
1190 if( CV_MAT_DEPTH(check_arr->type) < CV_32S )
1191 ilevel = cvFloor(success_err_level);
1193 if( CV_MAT_DEPTH(check_arr->type) >= CV_32F && !element_wise_relative_error )
1195 double maxval0 = 1.;
1196 maxval = cvTsNorm( etalon, 0, CV_C, 0 );
1197 maxval = MAX(maxval, maxval0);
1200 for( i = 0; i < check_arr->rows; i++ )
1202 uchar* a_data = check_arr->data.ptr + check_arr->step*i;
1203 uchar* b_data = etalon->data.ptr + etalon->step*i;
1205 switch( CV_MAT_DEPTH(check_arr->type) )
1208 for( j = 0; j < ncols; j++ )
1210 int val = abs(((uchar*)a_data)[j] - ((uchar*)b_data)[j]);
1211 if( val > imaxdiff )
1214 *idx = cvPoint(j,i);
1221 for( j = 0; j < ncols; j++ )
1223 int val = abs(((schar*)a_data)[j] - ((schar*)b_data)[j]);
1224 if( val > imaxdiff )
1227 *idx = cvPoint(j,i);
1234 for( j = 0; j < ncols; j++ )
1236 int val = abs(((ushort*)a_data)[j] - ((ushort*)b_data)[j]);
1237 if( val > imaxdiff )
1240 *idx = cvPoint(j,i);
1247 for( j = 0; j < ncols; j++ )
1249 int val = abs(((short*)a_data)[j] - ((short*)b_data)[j]);
1250 if( val > imaxdiff )
1253 *idx = cvPoint(j,i);
1260 for( j = 0; j < ncols; j++ )
1262 double val = fabs((double)((int*)a_data)[j] - (double)((int*)b_data)[j]);
1266 *idx = cvPoint(j,i);
1267 if( val > success_err_level )
1273 for( j = 0; j < ncols; j++ )
1275 double a_val = ((float*)a_data)[j];
1276 double b_val = ((float*)b_data)[j];
1278 if( cvIsNaN(a_val) || cvIsInf(a_val) )
1281 *idx = cvPoint(j,i);
1284 if( cvIsNaN(b_val) || cvIsInf(b_val) )
1287 *idx = cvPoint(j,i);
1290 a_val = fabs(a_val - b_val);
1291 threshold = element_wise_relative_error ? fabs(b_val) + 1 : maxval;
1292 if( a_val > threshold*success_err_level )
1294 maxdiff = a_val/threshold;
1295 *idx = cvPoint(j,i);
1301 for( j = 0; j < ncols; j++ )
1303 double a_val = ((double*)a_data)[j];
1304 double b_val = ((double*)b_data)[j];
1306 if( cvIsNaN(a_val) || cvIsInf(a_val) )
1309 *idx = cvPoint(j,i);
1312 if( cvIsNaN(b_val) || cvIsInf(b_val) )
1315 *idx = cvPoint(j,i);
1318 a_val = fabs(a_val - b_val);
1319 threshold = element_wise_relative_error ? fabs(b_val)+1 : maxval;
1320 if( a_val > threshold*success_err_level )
1322 maxdiff = a_val/threshold;
1323 *idx = cvPoint(j,i);
1337 if( CV_MAT_DEPTH(check_arr->type) < CV_32S )
1341 maxdiff = exp(1000.);
1342 *_max_diff = maxdiff;
1347 int cvTsCmpEps2( CvTS* ts, const CvArr* _a, const CvArr* _b, double success_err_level,
1348 bool element_wise_relative_error, const char* desc )
1352 CvMat astub, bstub, *a, *b;
1353 CvPoint idx = {0,0};
1356 a = cvGetMat( _a, &astub );
1357 b = cvGetMat( _b, &bstub );
1358 code = cvTsCmpEps( a, b, &diff, success_err_level, &idx,
1359 element_wise_relative_error );
1364 sprintf( msg, "%s: Too big difference (=%g)", desc, diff );
1365 code = CvTS::FAIL_BAD_ACCURACY;
1368 sprintf( msg, "%s: Invalid output", desc );
1369 code = CvTS::FAIL_INVALID_OUTPUT;
1372 sprintf( msg, "%s: Invalid reference output", desc );
1373 code = CvTS::FAIL_INVALID_OUTPUT;
1381 if( a->rows == 1 && a->cols == 1 )
1383 assert( idx.x == 0 && idx.y == 0 );
1384 ts->printf( CvTS::LOG, "%s\n", msg );
1386 else if( a->rows == 1 || a->cols == 1 )
1388 assert( idx.x == 0 || idx.y == 0 );
1389 ts->printf( CvTS::LOG, "%s at element %d\n", msg, idx.x + idx.y );
1392 ts->printf( CvTS::LOG, "%s at (%d,%d)\n", msg, idx.x, idx.y );
1399 int cvTsCmpEps2_64f( CvTS* ts, const double* val, const double* ref_val, int len,
1400 double eps, const char* param_name )
1402 CvMat _val = cvMat( 1, len, CV_64F, (void*)val );
1403 CvMat _ref_val = cvMat( 1, len, CV_64F, (void*)ref_val );
1405 return cvTsCmpEps2( ts, &_val, &_ref_val, eps, true, param_name );
1408 // compares two arrays. the result is 8s image that takes values -1, 0, 1
1409 void cvTsCmp( const CvMat* a, const CvMat* b, CvMat* result, int cmp_op )
1411 int i = 0, j = 0, ncols;
1414 assert( CV_ARE_TYPES_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) && CV_MAT_CN(a->type) == 1 );
1415 assert( CV_ARE_SIZES_EQ(a,result) &&
1416 (CV_MAT_TYPE(result->type) == CV_8UC1 ||
1417 CV_MAT_TYPE(result->type) == CV_8SC1 ));
1419 for( i = 0; i < a->rows; i++ )
1421 uchar* a_data = a->data.ptr + a->step*i;
1422 uchar* b_data = b->data.ptr + b->step*i;
1423 schar* r_data = (schar*)(result->data.ptr + result->step*i);
1425 switch( CV_MAT_DEPTH(a->type) )
1428 for( j = 0; j < ncols; j++ )
1430 int a_val = ((uchar*)a_data)[j];
1431 int b_val = ((uchar*)b_data)[j];
1432 r_data[j] = (schar)CV_CMP(a_val,b_val);
1436 for( j = 0; j < ncols; j++ )
1438 int a_val = ((schar*)a_data)[j];
1439 int b_val = ((schar*)b_data)[j];
1440 r_data[j] = (schar)CV_CMP(a_val,b_val);
1444 for( j = 0; j < ncols; j++ )
1446 int a_val = ((ushort*)a_data)[j];
1447 int b_val = ((ushort*)b_data)[j];
1448 r_data[j] = (schar)CV_CMP(a_val,b_val);
1452 for( j = 0; j < ncols; j++ )
1454 int a_val = ((short*)a_data)[j];
1455 int b_val = ((short*)b_data)[j];
1456 r_data[j] = (schar)CV_CMP(a_val,b_val);
1460 for( j = 0; j < ncols; j++ )
1462 int a_val = ((int*)a_data)[j];
1463 int b_val = ((int*)b_data)[j];
1464 r_data[j] = (schar)CV_CMP(a_val,b_val);
1468 for( j = 0; j < ncols; j++ )
1470 float a_val = ((float*)a_data)[j];
1471 float b_val = ((float*)b_data)[j];
1472 r_data[j] = (schar)CV_CMP(a_val,b_val);
1476 for( j = 0; j < ncols; j++ )
1478 double a_val = ((double*)a_data)[j];
1479 double b_val = ((double*)b_data)[j];
1480 r_data[j] = (schar)CV_CMP(a_val,b_val);
1490 for( j = 0; j < ncols; j++ )
1491 r_data[j] = (schar)(r_data[j] == 0 ? -1 : 0);
1494 for( j = 0; j < ncols; j++ )
1495 r_data[j] = (schar)(r_data[j] != 0 ? -1 : 0);
1498 for( j = 0; j < ncols; j++ )
1499 r_data[j] = (schar)(r_data[j] < 0 ? -1 : 0);
1502 for( j = 0; j < ncols; j++ )
1503 r_data[j] = (schar)(r_data[j] <= 0 ? -1 : 0);
1506 for( j = 0; j < ncols; j++ )
1507 r_data[j] = (schar)(r_data[j] >= 0 ? -1 : 0);
1510 for( j = 0; j < ncols; j++ )
1511 r_data[j] = (schar)(r_data[j] > 0 ? -1 : 0);
1519 // compares two arrays. the result is 8s image that takes values -1, 0, 1
1520 void cvTsCmpS( const CvMat* a, double fval, CvMat* result, int cmp_op )
1523 int ncols, ival = 0;
1526 if( CV_MAT_DEPTH(a->type) <= CV_32S )
1527 ival = cvRound(fval);
1529 assert( CV_MAT_CN(a->type) == 1 );
1530 assert( CV_ARE_SIZES_EQ(a,result) &&
1531 (CV_MAT_TYPE(result->type) == CV_8UC1 ||
1532 CV_MAT_TYPE(result->type) == CV_8SC1 ));
1534 for( i = 0; i < a->rows; i++ )
1536 uchar* a_data = a->data.ptr + a->step*i;
1537 schar* r_data = (schar*)(result->data.ptr + result->step*i);
1539 switch( CV_MAT_DEPTH(a->type) )
1542 for( j = 0; j < ncols; j++ )
1544 int a_val = ((uchar*)a_data)[j];
1545 r_data[j] = (schar)CV_CMP(a_val,ival);
1549 for( j = 0; j < ncols; j++ )
1551 int a_val = ((schar*)a_data)[j];
1552 r_data[j] = (schar)CV_CMP(a_val,ival);
1556 for( j = 0; j < ncols; j++ )
1558 int a_val = ((ushort*)a_data)[j];
1559 r_data[j] = (schar)CV_CMP(a_val,ival);
1563 for( j = 0; j < ncols; j++ )
1565 int a_val = ((short*)a_data)[j];
1566 r_data[j] = (schar)CV_CMP(a_val,ival);
1570 for( j = 0; j < ncols; j++ )
1572 int a_val = ((int*)a_data)[j];
1573 r_data[j] = (schar)CV_CMP(a_val,ival);
1577 for( j = 0; j < ncols; j++ )
1579 float a_val = ((float*)a_data)[j];
1580 r_data[j] = (schar)CV_CMP(a_val,fval);
1584 for( j = 0; j < ncols; j++ )
1586 double a_val = ((double*)a_data)[j];
1587 r_data[j] = (schar)CV_CMP(a_val,fval);
1597 for( j = 0; j < ncols; j++ )
1598 r_data[j] = (schar)(r_data[j] == 0 ? -1 : 0);
1601 for( j = 0; j < ncols; j++ )
1602 r_data[j] = (schar)(r_data[j] != 0 ? -1 : 0);
1605 for( j = 0; j < ncols; j++ )
1606 r_data[j] = (schar)(r_data[j] < 0 ? -1 : 0);
1609 for( j = 0; j < ncols; j++ )
1610 r_data[j] = (schar)(r_data[j] <= 0 ? -1 : 0);
1613 for( j = 0; j < ncols; j++ )
1614 r_data[j] = (schar)(r_data[j] >= 0 ? -1 : 0);
1617 for( j = 0; j < ncols; j++ )
1618 r_data[j] = (schar)(r_data[j] > 0 ? -1 : 0);
1627 // calculates norm of a matrix
1628 double cvTsNorm( const CvMat* arr, const CvMat* mask, int norm_type, int coi )
1630 int i = 0, j = 0, k;
1631 int depth, cn0, cn, ncols, el_size1;
1637 cn0 = cn = CV_MAT_CN(arr->type);
1638 ncols = arr->cols*cn;
1639 depth = CV_MAT_DEPTH(arr->type);
1640 el_size1 = CV_ELEM_SIZE(depth);
1641 zerobuf = (uchar*)cvStackAlloc(el_size1*cn);
1642 memset( zerobuf, 0, el_size1*cn);
1646 assert( CV_ARE_SIZES_EQ( arr, mask ) && CV_IS_MASK_ARR(mask) );
1647 buffer = cvStackAlloc( el_size1*ncols );
1653 for( i = 0; i < arr->rows; i++ )
1655 const uchar* data = arr->data.ptr + arr->step*i + (coi - (coi != 0))*el_size1;
1659 const uchar* mdata = mask->data.ptr + mask->step*i;
1664 for( j = 0; j < ncols; j += cn0 )
1666 const uchar* src = *mdata++ ? (uchar*)data + j : zerobuf;
1667 for( k = 0; k < cn0; k++ )
1668 ((uchar*)buffer)[j+k] = src[k];
1673 for( j = 0; j < ncols; j += cn0 )
1675 const short* src = *mdata++ ? (short*)data + j : (short*)zerobuf;
1676 for( k = 0; k < cn0; k++ )
1677 ((short*)buffer)[j+k] = src[k];
1682 for( j = 0; j < ncols; j += cn0 )
1684 const int* src = *mdata++ ? (int*)data + j : (int*)zerobuf;
1685 for( k = 0; k < cn0; k++ )
1686 ((int*)buffer)[j+k] = src[k];
1690 for( j = 0; j < ncols; j += cn0 )
1692 const double* src = *mdata++ ? (double*)data + j : (double*)zerobuf;
1693 for( k = 0; k < cn0; k++ )
1694 ((double*)buffer)[j+k] = src[k];
1701 data = (const uchar*)buffer;
1707 if( norm_type == CV_C )
1709 for( j = 0; j < ncols; j += cn )
1711 int val = ((const uchar*)data)[j];
1712 inorm = MAX( inorm, val );
1715 else if( norm_type == CV_L1 )
1718 for( j = 0; j < ncols; j += cn )
1720 int val = ((const uchar*)data)[j];
1728 for( j = 0; j < ncols; j += cn )
1730 int val = ((const uchar*)data)[j];
1737 if( norm_type == CV_C )
1739 for( j = 0; j < ncols; j += cn )
1741 int val = abs(((const schar*)data)[j]);
1742 inorm = MAX( inorm, val );
1745 else if( norm_type == CV_L1 )
1748 for( j = 0; j < ncols; j += cn )
1750 int val = abs(((const schar*)data)[j]);
1758 for( j = 0; j < ncols; j += cn )
1760 int val = ((const schar*)data)[j];
1767 if( norm_type == CV_C )
1769 for( j = 0; j < ncols; j += cn )
1771 int val = ((const ushort*)data)[j];
1772 inorm = MAX( inorm, val );
1775 else if( norm_type == CV_L1 )
1778 for( j = 0; j < ncols; j += cn )
1780 int val = ((const ushort*)data)[j];
1787 for( j = 0; j < ncols; j += cn )
1789 double val = ((const ushort*)data)[j];
1795 if( norm_type == CV_C )
1797 for( j = 0; j < ncols; j += cn )
1799 int val = abs(((const short*)data)[j]);
1800 inorm = MAX( inorm, val );
1803 else if( norm_type == CV_L1 )
1806 for( j = 0; j < ncols; j += cn )
1808 int val = abs(((const short*)data)[j]);
1815 for( j = 0; j < ncols; j += cn )
1817 double val = ((const short*)data)[j];
1823 if( norm_type == CV_C )
1825 for( j = 0; j < ncols; j += cn )
1827 int val = abs(((const int*)data)[j]);
1828 inorm = MAX( inorm, val );
1831 else if( norm_type == CV_L1 )
1833 for( j = 0; j < ncols; j += cn )
1835 double val = fabs((double)((const int*)data)[j]);
1841 for( j = 0; j < ncols; j += cn )
1843 double val = ((const int*)data)[j];
1849 if( norm_type == CV_C )
1851 for( j = 0; j < ncols; j += cn )
1853 double val = fabs((double)((const float*)data)[j]);
1854 fnorm = MAX( fnorm, val );
1857 else if( norm_type == CV_L1 )
1859 for( j = 0; j < ncols; j += cn )
1861 double val = fabs((double)((const float*)data)[j]);
1867 for( j = 0; j < ncols; j += cn )
1869 double val = ((const float*)data)[j];
1875 if( norm_type == CV_C )
1877 for( j = 0; j < ncols; j += cn )
1879 double val = fabs(((const double*)data)[j]);
1880 fnorm = MAX( fnorm, val );
1883 else if( norm_type == CV_L1 )
1885 for( j = 0; j < ncols; j += cn )
1887 double val = fabs(((const double*)data)[j]);
1893 for( j = 0; j < ncols; j += cn )
1895 double val = ((const double*)data)[j];
1906 if( norm_type == CV_L2 )
1907 fnorm = sqrt( fnorm );
1908 else if( depth < CV_32F && norm_type == CV_C )
1915 // retrieves mean, standard deviation and the number of nonzero mask pixels
1916 int cvTsMeanStdDevNonZero( const CvMat* arr, const CvMat* mask,
1917 CvScalar* _mean, CvScalar* _stddev, int coi )
1919 int i = 0, j = 0, k;
1920 int depth, cn0, cn, cols, ncols, el_size1;
1921 CvScalar sum = cvScalar(0), sqsum = cvScalar(0);
1923 int isum[4], isqsum[4];
1927 cn0 = cn = CV_MAT_CN(arr->type);
1929 ncols = arr->cols*cn;
1930 depth = CV_MAT_DEPTH(arr->type);
1931 el_size1 = CV_ELEM_SIZE(depth);
1934 assert( CV_ARE_SIZES_EQ( arr, mask ) && CV_IS_MASK_ARR(mask) );
1938 maskbuf = (uchar*)cvStackAlloc( cols );
1939 memset( maskbuf, 1, cols );
1940 nonzero = cols*arr->rows;
1946 for( i = 0; i < arr->rows; i++ )
1948 const uchar* data = arr->data.ptr + arr->step*i + (coi - (coi != 0))*el_size1;
1953 mdata = mask->data.ptr + mask->step*i;
1954 for( j = 0; j < cols; j++ )
1955 nonzero += mdata[j] != 0;
1962 // if only a number of pixels in the mask is needed, skip the rest of the loop body
1963 if( !_mean && !_stddev )
1969 for( k = 0; k < cn; k++ )
1970 isum[k] = isqsum[k] = 0;
1971 for( j = 0; j < ncols; j += cn0 )
1974 for( k = 0; k < cn; k++ )
1976 int val = ((const uchar*)data)[j+k];
1978 isqsum[k] += val*val;
1981 for( k = 0; k < cn; k++ )
1983 sum.val[k] += isum[k];
1984 sqsum.val[k] += isqsum[k];
1988 for( k = 0; k < cn; k++ )
1989 isum[k] = isqsum[k] = 0;
1990 for( j = 0; j < ncols; j += cn0 )
1993 for( k = 0; k < cn; k++ )
1995 int val = ((const schar*)data)[j+k];
1997 isqsum[k] += val*val;
2000 for( k = 0; k < cn; k++ )
2002 sum.val[k] += isum[k];
2003 sqsum.val[k] += isqsum[k];
2007 for( k = 0; k < cn; k++ )
2009 for( j = 0; j < ncols; j += cn0 )
2012 for( k = 0; k < cn; k++ )
2014 int val = ((const ushort*)data)[j+k];
2016 sqsum.val[k] += ((double)val)*val;
2019 for( k = 0; k < cn; k++ )
2020 sum.val[k] += isum[k];
2023 for( k = 0; k < cn; k++ )
2025 for( j = 0; j < ncols; j += cn0 )
2028 for( k = 0; k < cn; k++ )
2030 int val = ((const short*)data)[j+k];
2032 sqsum.val[k] += ((double)val)*val;
2035 for( k = 0; k < cn; k++ )
2036 sum.val[k] += isum[k];
2039 for( j = 0; j < ncols; j += cn0 )
2042 for( k = 0; k < cn; k++ )
2044 double val = ((const int*)data)[j+k];
2046 sqsum.val[k] += val*val;
2051 for( j = 0; j < ncols; j += cn0 )
2054 for( k = 0; k < cn; k++ )
2056 double val = ((const float*)data)[j+k];
2058 sqsum.val[k] += val*val;
2063 for( j = 0; j < ncols; j += cn0 )
2066 for( k = 0; k < cn; k++ )
2068 double val = ((const double*)data)[j+k];
2070 sqsum.val[k] += val*val;
2080 inv_area = nonzero ? 1./nonzero : 0.;
2081 for( k = 0; k < cn; k++ )
2083 sum.val[k] *= inv_area;
2084 double t = sqsum.val[k]*inv_area - sum.val[k]*sum.val[k];
2085 sqsum.val[k] = sqrt(MAX(t, 0));
2094 // retrieves global extremums and their positions
2095 void cvTsMinMaxLoc( const CvMat* arr, const CvMat* mask,
2096 double* _minval, double* _maxval,
2097 CvPoint* _minidx, CvPoint* _maxidx, int coi )
2100 int depth, cn, cols, ncols, el_size1;
2101 CvPoint minidx = {-1,-1}, maxidx = {-1,-1};
2103 int iminval = INT_MAX, imaxval = INT_MIN;
2104 double minval = DBL_MAX, maxval = -minval;
2106 cn = CV_MAT_CN(arr->type);
2108 ncols = arr->cols*cn;
2109 depth = CV_MAT_DEPTH(arr->type);
2110 el_size1 = CV_ELEM_SIZE(depth);
2114 assert( CV_ARE_SIZES_EQ( arr, mask ) && CV_IS_MASK_ARR(mask) );
2118 maskbuf = (uchar*)cvStackAlloc( cols );
2119 memset( maskbuf, 1, cols );
2122 if( coi == 0 && cn > 1 )
2128 for( i = 0; i < arr->rows; i++ )
2130 const uchar* data = arr->data.ptr + arr->step*i + (coi - (coi != 0))*el_size1;
2131 const uchar* mdata = mask ? mask->data.ptr + mask->step*i : maskbuf;
2136 for( j = 0; j < ncols; j += cn, mdata++ )
2138 int val = ((const uchar*)data)[j];
2139 if( val < iminval && *mdata )
2142 minidx = cvPoint(j,i);
2144 if( val > imaxval && *mdata )
2147 maxidx = cvPoint(j,i);
2152 for( j = 0; j < ncols; j += cn, mdata++ )
2154 int val = ((const schar*)data)[j];
2155 if( val < iminval && *mdata )
2158 minidx = cvPoint(j,i);
2160 if( val > imaxval && *mdata )
2163 maxidx = cvPoint(j,i);
2168 for( j = 0; j < ncols; j += cn, mdata++ )
2170 int val = ((const ushort*)data)[j];
2171 if( val < iminval && *mdata )
2174 minidx = cvPoint(j,i);
2176 if( val > imaxval && *mdata )
2179 maxidx = cvPoint(j,i);
2184 for( j = 0; j < ncols; j += cn, mdata++ )
2186 int val = ((const short*)data)[j];
2187 if( val < iminval && *mdata )
2190 minidx = cvPoint(j,i);
2192 if( val > imaxval && *mdata )
2195 maxidx = cvPoint(j,i);
2200 for( j = 0; j < ncols; j += cn, mdata++ )
2202 int val = ((const int*)data)[j];
2203 if( val < iminval && *mdata )
2206 minidx = cvPoint(j,i);
2208 if( val > imaxval && *mdata )
2211 maxidx = cvPoint(j,i);
2216 for( j = 0; j < ncols; j += cn, mdata++ )
2218 float val = ((const float*)data)[j];
2219 if( val < minval && *mdata )
2222 minidx = cvPoint(j,i);
2224 if( val > maxval && *mdata )
2227 maxidx = cvPoint(j,i);
2232 for( j = 0; j < ncols; j += cn, mdata++ )
2234 double val = ((const double*)data)[j];
2235 if( val < minval && *mdata )
2238 minidx = cvPoint(j,i);
2240 if( val > maxval && *mdata )
2243 maxidx = cvPoint(j,i);
2254 minval = maxval = 0;
2257 if( depth < CV_32F )
2258 minval = iminval, maxval = imaxval;
2277 void cvTsLogic( const CvMat* a, const CvMat* b, CvMat* c, int logic_op )
2279 int i = 0, j = 0, ncols;
2280 ncols = a->cols*CV_ELEM_SIZE(a->type);
2282 assert( CV_ARE_TYPES_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) );
2283 assert( CV_ARE_TYPES_EQ(a,c) && CV_ARE_SIZES_EQ(a,c) );
2285 for( i = 0; i < a->rows; i++ )
2287 uchar* a_data = a->data.ptr + a->step*i;
2288 uchar* b_data = b->data.ptr + b->step*i;
2289 uchar* c_data = c->data.ptr + c->step*i;
2293 case CV_TS_LOGIC_AND:
2294 for( j = 0; j < ncols; j++ )
2295 c_data[j] = (uchar)(a_data[j] & b_data[j]);
2297 case CV_TS_LOGIC_OR:
2298 for( j = 0; j < ncols; j++ )
2299 c_data[j] = (uchar)(a_data[j] | b_data[j]);
2301 case CV_TS_LOGIC_XOR:
2302 for( j = 0; j < ncols; j++ )
2303 c_data[j] = (uchar)(a_data[j] ^ b_data[j]);
2312 void cvTsLogicS( const CvMat* a, CvScalar s, CvMat* c, int logic_op )
2314 int i = 0, j = 0, k;
2315 int cn, ncols, elem_size;
2327 cn = CV_MAT_CN(a->type);
2328 elem_size = CV_ELEM_SIZE(a->type);
2329 ncols = a->cols * elem_size;
2330 b_data = (uchar*)malloc( ncols );
2332 assert( CV_ARE_TYPES_EQ(a,c) && CV_ARE_SIZES_EQ(a,c) );
2334 if( logic_op == CV_TS_LOGIC_NOT )
2336 memset( b_data, -1, ncols );
2337 logic_op = CV_TS_LOGIC_XOR;
2341 switch( CV_MAT_DEPTH(a->type) )
2344 for( k = 0; k < cn; k++ )
2346 int val = cvRound(s.val[k]);
2347 buf.ptr[k] = CV_CAST_8U(val);
2351 for( k = 0; k < cn; k++ )
2353 int val = cvRound(s.val[k]);
2354 buf.c[k] = CV_CAST_8S(val);
2358 for( k = 0; k < cn; k++ )
2360 int val = cvRound(s.val[k]);
2361 buf.w[k] = CV_CAST_16U(val);
2365 for( k = 0; k < cn; k++ )
2367 int val = cvRound(s.val[k]);
2368 buf.s[k] = CV_CAST_16S(val);
2372 for( k = 0; k < cn; k++ )
2374 int val = cvRound(s.val[k]);
2375 buf.i[k] = CV_CAST_32S(val);
2379 for( k = 0; k < cn; k++ )
2381 double val = s.val[k];
2382 buf.f[k] = CV_CAST_32F(val);
2386 for( k = 0; k < cn; k++ )
2388 double val = s.val[k];
2389 buf.d[k] = CV_CAST_64F(val);
2397 for( j = 0; j < ncols; j += elem_size )
2398 memcpy( b_data + j, buf.ptr, elem_size );
2401 for( i = 0; i < a->rows; i++ )
2403 uchar* a_data = a->data.ptr + a->step*i;
2404 uchar* c_data = c->data.ptr + c->step*i;
2408 case CV_TS_LOGIC_AND:
2409 for( j = 0; j < ncols; j++ )
2410 c_data[j] = (uchar)(a_data[j] & b_data[j]);
2412 case CV_TS_LOGIC_OR:
2413 for( j = 0; j < ncols; j++ )
2414 c_data[j] = (uchar)(a_data[j] | b_data[j]);
2416 case CV_TS_LOGIC_XOR:
2417 for( j = 0; j < ncols; j++ )
2418 c_data[j] = (uchar)(a_data[j] ^ b_data[j]);
2431 void cvTsGEMM( const CvMat* a, const CvMat* b, double alpha,
2432 const CvMat* c, double beta, CvMat* d, int flags )
2435 int a_rows, a_cols, b_rows, b_cols;
2436 int c_rows, c_cols, d_rows, d_cols;
2438 int a_step, a_delta, b_step, b_delta;
2439 int c_step, c_delta, d_step;
2441 a_rows = a->rows; a_cols = a->cols;
2442 cn = CV_MAT_CN(a->type);
2443 el_size = CV_ELEM_SIZE(a->type & ~CV_MAT_CN_MASK);
2444 a_step = a->step / el_size; a_delta = cn;
2445 d_rows = d->rows; d_cols = d->cols;
2446 b_rows = b->rows; b_cols = b->cols;
2447 b_step = b->step / el_size; b_delta = cn;
2448 c_rows = c ? c->rows : 0; c_cols = c ? c->cols : 0;
2449 c_step = c ? c->step / el_size : 0; c_delta = c ? cn : 0;
2450 d_step = d->step / el_size;
2452 assert( CV_ARE_TYPES_EQ(a,b) && CV_ARE_TYPES_EQ(a,d) );
2453 assert( CV_MAT_CN(a->type) <= 2 );
2455 if( flags & CV_TS_GEMM_A_T )
2457 CV_SWAP( a_rows, a_cols, i );
2458 CV_SWAP( a_step, a_delta, i );
2461 if( flags & CV_TS_GEMM_B_T )
2463 CV_SWAP( b_rows, b_cols, i );
2464 CV_SWAP( b_step, b_delta, i );
2467 if( flags & CV_TS_GEMM_C_T )
2469 CV_SWAP( c_rows, c_cols, i );
2470 CV_SWAP( c_step, c_delta, i );
2473 assert( a_rows == d_rows && a_cols == b_rows && b_cols == d_cols );
2474 assert( a->data.ptr != d->data.ptr && b->data.ptr != d->data.ptr );
2478 assert( CV_ARE_TYPES_EQ(a,c) && c_rows == d_rows && c_cols == d_cols );
2479 assert( c->data.ptr != d->data.ptr || (flags & CV_TS_GEMM_C_T) == 0 );
2482 if( CV_MAT_DEPTH(a->type) == CV_32F )
2484 float* a_data0 = a->data.fl;
2485 float* b_data0 = b->data.fl;
2486 float* c_data0 = c ? c->data.fl : 0;
2487 float* d_data = d->data.fl;
2489 for( i = 0; i < d_rows; i++, d_data += d_step, c_data0 += c_step, a_data0 += a_step )
2491 for( j = 0; j < d_cols; j++ )
2493 float* a_data = a_data0;
2494 float* b_data = b_data0 + j*b_delta;
2495 float* c_data = c_data0 + j*c_delta;
2500 for( k = 0; k < a_cols; k++ )
2502 s += ((double)a_data[0])*b_data[0];
2506 d_data[j] = (float)(s*alpha + (c_data ? c_data[0]*beta : 0));
2510 double s_re = 0, s_im = 0;
2512 for( k = 0; k < a_cols; k++ )
2514 s_re += ((double)a_data[0])*b_data[0] - ((double)a_data[1])*b_data[1];
2515 s_im += ((double)a_data[0])*b_data[1] + ((double)a_data[1])*b_data[0];
2525 s_re += c_data[0]*beta;
2526 s_im += c_data[1]*beta;
2529 d_data[j*2] = (float)s_re;
2530 d_data[j*2+1] = (float)s_im;
2535 else if( CV_MAT_DEPTH(a->type) == CV_64F )
2537 double* a_data0 = a->data.db;
2538 double* b_data0 = b->data.db;
2539 double* c_data0 = c ? c->data.db : 0;
2540 double* d_data = d->data.db;
2542 for( i = 0; i < d_rows; i++, d_data += d_step, c_data0 += c_step, a_data0 += a_step )
2544 for( j = 0; j < d_cols; j++ )
2546 double* a_data = a_data0;
2547 double* b_data = b_data0 + j*b_delta;
2548 double* c_data = c_data0 + j*c_delta;
2553 for( k = 0; k < a_cols; k++ )
2555 s += a_data[0]*b_data[0];
2559 d_data[j] = s*alpha + (c_data ? c_data[0]*beta : 0);
2563 double s_re = 0, s_im = 0;
2565 for( k = 0; k < a_cols; k++ )
2567 s_re += a_data[0]*b_data[0] - a_data[1]*b_data[1];
2568 s_im += a_data[0]*b_data[1] + a_data[1]*b_data[0];
2577 s_re += c_data[0]*beta;
2578 s_im += c_data[1]*beta;
2582 d_data[j*2+1] = s_im;
2594 CvMat* cvTsSelect( const CvMat* a, CvMat* header, CvRect rect )
2599 h = cvMat( rect.height, rect.width, a->type );
2600 el_size = CV_ELEM_SIZE(a->type);
2602 h.data.ptr = a->data.ptr + rect.y*a->step + rect.x*el_size;
2603 h.step = rect.height > 1 ? a->step : 0;
2604 h.type &= ~CV_MAT_CONT_FLAG;
2605 if( rect.height == 1 || h.step == h.cols*el_size )
2606 h.type |= CV_MAT_CONT_FLAG;
2612 double cvTsMinVal( int type )
2614 switch( CV_MAT_DEPTH(type) )
2635 double cvTsMaxVal( int type )
2637 switch( CV_MAT_DEPTH(type) )
2658 void cvTsPrepareToFilter( const CvMat* a, CvMat* b, CvPoint ofs,
2659 int border_mode, CvScalar fill_val )
2664 assert( 0 <= ofs.x && ofs.x <= b->cols - a->cols &&
2665 0 <= ofs.y && ofs.y <= b->rows - a->rows );
2667 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y, a->cols, a->rows ));
2668 cvTsCopy( a, &temp, 0 );
2670 assert( border_mode == CV_TS_BORDER_FILL ||
2671 border_mode == CV_TS_BORDER_REPLICATE ||
2672 border_mode == CV_TS_BORDER_REFLECT );
2676 if( border_mode == CV_TS_BORDER_FILL )
2678 cvTsSelect( b, &temp, cvRect( ofs.x, 0, a->cols, ofs.y ));
2679 cvTsAdd( 0, cvScalar(0), 0, cvScalar(0), fill_val, &temp, 0 );
2681 else if( border_mode == CV_TS_BORDER_REPLICATE || a->rows == 1 )
2683 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y, a->cols, 1 ));
2684 for( i = ofs.y-1; i >= 0; i-- )
2686 cvTsSelect( b, &temp2, cvRect( ofs.x, i, a->cols, 1 ));
2687 cvTsCopy( &temp, &temp2, 0 );
2690 else if( border_mode == CV_TS_BORDER_REFLECT )
2693 for( i = ofs.y-1; i >= 0; i-- )
2695 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y+j, a->cols, 1 ));
2696 cvTsSelect( b, &temp2, cvRect( ofs.x, i, a->cols, 1 ));
2697 cvTsCopy( &temp, &temp2, 0 );
2698 if( (unsigned)(j + dir) >= (unsigned)a->rows )
2706 if( ofs.y < b->rows )
2708 if( border_mode == CV_TS_BORDER_FILL )
2710 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y, a->cols, b->rows - ofs.y ));
2711 cvTsAdd( 0, cvScalar(0), 0, cvScalar(0), fill_val, &temp, 0 );
2713 else if( border_mode == CV_TS_BORDER_REPLICATE || a->rows == 1 )
2715 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y - 1, a->cols, 1 ));
2716 for( i = ofs.y; i < b->rows; i++ )
2718 cvTsSelect( b, &temp2, cvRect( ofs.x, i, a->cols, 1 ));
2719 cvTsCopy( &temp, &temp2, 0 );
2724 j = a->rows - 2; dir = -1;
2725 for( i = ofs.y; i < b->rows; i++ )
2727 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y-a->rows+j, a->cols, 1 ));
2728 cvTsSelect( b, &temp2, cvRect( ofs.x, i, a->cols, 1 ));
2729 cvTsCopy( &temp, &temp2, 0 );
2730 if( (unsigned)(j + dir) >= (unsigned)a->rows )
2739 if( border_mode == CV_TS_BORDER_FILL )
2741 cvTsSelect( b, &temp, cvRect( 0, 0, ofs.x, b->rows ));
2742 cvTsAdd( 0, cvScalar(0), 0, cvScalar(0), fill_val, &temp, 0 );
2744 else if( border_mode == CV_TS_BORDER_REPLICATE || a->cols == 1 )
2746 cvTsSelect( b, &temp, cvRect( ofs.x, 0, 1, b->rows ));
2747 for( i = ofs.x-1; i >= 0; i-- )
2749 cvTsSelect( b, &temp2, cvRect( i, 0, 1, b->rows ));
2750 cvTsCopy( &temp, &temp2, 0 );
2753 else if( border_mode == CV_TS_BORDER_REFLECT )
2756 for( i = ofs.x-1; i >= 0; i-- )
2758 cvTsSelect( b, &temp, cvRect( ofs.x+j, 0, 1, b->rows ));
2759 cvTsSelect( b, &temp2, cvRect( i, 0, 1, b->rows ));
2760 cvTsCopy( &temp, &temp2, 0 );
2761 if( (unsigned)(j + dir) >= (unsigned)a->cols )
2769 if( ofs.x < b->cols )
2771 if( border_mode == CV_TS_BORDER_FILL )
2773 cvTsSelect( b, &temp, cvRect( ofs.x, 0, b->cols - ofs.x, b->rows ));
2774 cvTsAdd( 0, cvScalar(0), 0, cvScalar(0), fill_val, &temp, 0 );
2776 else if( border_mode == CV_TS_BORDER_REPLICATE || a->cols == 1 )
2778 cvTsSelect( b, &temp, cvRect( ofs.x-1, 0, 1, b->rows ));
2779 for( i = ofs.x; i < b->cols; i++ )
2781 cvTsSelect( b, &temp2, cvRect( i, 0, 1, b->rows ));
2782 cvTsCopy( &temp, &temp2, 0 );
2785 else if( border_mode == CV_TS_BORDER_REFLECT )
2787 j = a->cols - 2; dir = -1;
2788 for( i = ofs.x; i < b->cols; i++ )
2790 cvTsSelect( b, &temp, cvRect( ofs.x-a->cols+j, 0, 1, b->rows ));
2791 cvTsSelect( b, &temp2, cvRect( i, 0, 1, b->rows ));
2792 cvTsCopy( &temp, &temp2, 0 );
2793 if( (unsigned)(j + dir) >= (unsigned)a->cols )
2802 void cvTsConvolve2D( const CvMat* a, CvMat* b, const CvMat* kernel, CvPoint anchor )
2805 int cn, ncols, a_step;
2806 int ker_size = kernel->rows*kernel->cols;
2807 int* offset = (int*)malloc( ker_size*sizeof(offset[0]));
2808 float* k_data = (float*)malloc( ker_size*sizeof(k_data[0]));
2810 float first = kernel->data.fl[0];
2811 uchar *a_data, *b_data;
2813 cn = CV_MAT_CN(a->type);
2815 a_step = a->step / CV_ELEM_SIZE(a->type & ~CV_MAT_CN_MASK);
2817 assert( a->cols == b->cols + kernel->cols - 1 &&
2818 a->rows == b->rows + kernel->rows - 1 && CV_ARE_TYPES_EQ( a, b ) );
2819 assert( CV_MAT_TYPE(kernel->type) == CV_32FC1 );
2820 assert( 0 <= anchor.x && anchor.x < kernel->cols &&
2821 0 <= anchor.y && anchor.y < kernel->rows );
2823 for( i = 0, k = 0; i < kernel->rows; i++ )
2824 for( j = 0; j < kernel->cols; j++ )
2826 float f = ((float*)(kernel->data.ptr + kernel->step*i))[j];
2830 offset[k++] = (i - anchor.y)*a_step + (j - anchor.x)*cn;
2837 a_data = a->data.ptr + a->step*anchor.y + CV_ELEM_SIZE(a->type)*anchor.x;
2838 b_data = b->data.ptr;
2840 for( i = 0; i < b->rows; i++, a_data += a->step, b_data += b->step )
2842 switch( CV_MAT_DEPTH(a->type) )
2845 for( j = 0; j < ncols; j++ )
2849 for( k = 0; k < ker_size; k++ )
2850 s += ((uchar*)a_data)[j+offset[k]]*k_data[k];
2852 ((uchar*)b_data)[j] = CV_CAST_8U(val);
2856 for( j = 0; j < ncols; j++ )
2860 for( k = 0; k < ker_size; k++ )
2861 s += ((schar*)a_data)[j+offset[k]]*k_data[k];
2863 ((schar*)b_data)[j] = CV_CAST_8S(val);
2867 for( j = 0; j < ncols; j++ )
2871 for( k = 0; k < ker_size; k++ )
2872 s += ((ushort*)a_data)[j+offset[k]]*k_data[k];
2874 ((ushort*)b_data)[j] = CV_CAST_16U(val);
2878 for( j = 0; j < ncols; j++ )
2882 for( k = 0; k < ker_size; k++ )
2883 s += ((short*)a_data)[j+offset[k]]*k_data[k];
2885 ((short*)b_data)[j] = CV_CAST_16S(val);
2889 for( j = 0; j < ncols; j++ )
2892 for( k = 0; k < ker_size; k++ )
2893 s += ((int*)a_data)[j+offset[k]]*k_data[k];
2894 ((int*)b_data)[j] = cvRound(s);
2900 for( j = 0; j < ncols; j++ )
2903 for( k = 0; k < ker_size; k++ )
2904 s += (double)((float*)a_data)[j+offset[k]]*k_data[k];
2905 ((float*)b_data)[j] = (float)s;
2910 // special branch to speedup feature selection and blur tests
2911 for( j = 0; j < ncols; j++ )
2914 for( k = 0; k < ker_size; k++ )
2915 s += (double)((float*)a_data)[j+offset[k]];
2916 ((float*)b_data)[j] = (float)(s*first);
2921 for( j = 0; j < ncols; j++ )
2924 for( k = 0; k < ker_size; k++ )
2925 s += ((double*)a_data)[j+offset[k]]*k_data[k];
2926 ((double*)b_data)[j] = (double)s;
2939 void cvTsMinMaxFilter( const CvMat* a, CvMat* b, const IplConvKernel* kernel, int op_type )
2942 int cn, ncols, a_step;
2943 int ker_size = kernel->nRows*kernel->nCols;
2944 int* offset = (int*)malloc( ker_size*sizeof(offset[0]));
2945 int calc_max = op_type == CV_TS_MAX;
2946 uchar *a_data, *b_data;
2948 cn = CV_MAT_CN(a->type);
2950 a_step = a->step / CV_ELEM_SIZE(a->type & ~CV_MAT_CN_MASK);
2952 assert( a->cols == b->cols + kernel->nCols - 1 &&
2953 a->rows == b->rows + kernel->nRows - 1 && CV_ARE_TYPES_EQ( a, b ) );
2954 assert( 0 <= kernel->anchorX && kernel->anchorX < kernel->nCols &&
2955 0 <= kernel->anchorY && kernel->anchorY < kernel->nRows );
2957 for( i = 0, k = 0; i < kernel->nRows; i++ )
2958 for( j = 0; j < kernel->nCols; j++ )
2960 if( !kernel->values || kernel->values[i*kernel->nCols + j] )
2961 offset[k++] = (i - kernel->anchorY)*a_step + (j - kernel->anchorX)*cn;
2969 a_data = a->data.ptr + kernel->anchorY*a->step + kernel->anchorX*CV_ELEM_SIZE(a->type);
2970 b_data = b->data.ptr;
2972 for( i = 0; i < b->rows; i++, a_data += a->step, b_data += b->step )
2974 switch( CV_MAT_DEPTH(a->type) )
2977 for( j = 0; j < ncols; j++ )
2979 int m = ((uchar*)a_data)[j+offset[0]];
2980 for( k = 1; k < ker_size; k++ )
2982 int v = ((uchar*)a_data)[j+offset[k]];
2991 ((uchar*)b_data)[j] = (uchar)m;
2995 for( j = 0; j < ncols; j++ )
2997 int m = ((ushort*)a_data)[j+offset[0]];
2998 for( k = 1; k < ker_size; k++ )
3000 int v = ((ushort*)a_data)[j+offset[k]];
3009 ((ushort*)b_data)[j] = (ushort)m;
3013 for( j = 0; j < ncols; j++ )
3015 int m = ((short*)a_data)[j+offset[0]];
3016 for( k = 1; k < ker_size; k++ )
3018 int v = ((short*)a_data)[j+offset[k]];
3027 ((short*)b_data)[j] = (short)m;
3031 for( j = 0; j < ncols; j++ )
3033 int m = ((int*)a_data)[j+offset[0]];
3034 for( k = 1; k < ker_size; k++ )
3036 int v = ((int*)a_data)[j+offset[k]];
3045 ((int*)b_data)[j] = m;
3049 for( j = 0; j < ncols; j++ )
3051 float m = ((float*)a_data)[j+offset[0]];
3052 for( k = 1; k < ker_size; k++ )
3054 float v = ((float*)a_data)[j+offset[k]];
3063 ((float*)b_data)[j] = (float)m;
3067 for( j = 0; j < ncols; j++ )
3069 double m = ((double*)a_data)[j+offset[0]];
3070 for( k = 1; k < ker_size; k++ )
3072 double v = ((double*)a_data)[j+offset[k]];
3081 ((double*)b_data)[j] = (double)m;
3093 double cvTsCrossCorr( const CvMat* a, const CvMat* b )
3099 cn = CV_MAT_CN(a->type);
3102 assert( CV_ARE_SIZES_EQ( a, b ) && CV_ARE_TYPES_EQ( a, b ) );
3103 for( i = 0; i < a->rows; i++ )
3105 uchar* a_data = a->data.ptr + a->step*i;
3106 uchar* b_data = b->data.ptr + b->step*i;
3108 switch( CV_MAT_DEPTH(a->type) )
3111 for( j = 0; j < ncols; j++ )
3112 s += ((uchar*)a_data)[j]*((uchar*)b_data)[j];
3115 for( j = 0; j < ncols; j++ )
3116 s += ((schar*)a_data)[j]*((schar*)b_data)[j];
3119 for( j = 0; j < ncols; j++ )
3120 s += (double)((ushort*)a_data)[j]*((ushort*)b_data)[j];
3123 for( j = 0; j < ncols; j++ )
3124 s += ((short*)a_data)[j]*((short*)b_data)[j];
3127 for( j = 0; j < ncols; j++ )
3128 s += ((double)((int*)a_data)[j])*((int*)b_data)[j];
3131 for( j = 0; j < ncols; j++ )
3132 s += ((double)((float*)a_data)[j])*((float*)b_data)[j];
3135 for( j = 0; j < ncols; j++ )
3136 s += ((double*)a_data)[j]*((double*)b_data)[j];
3148 void cvTsTransform( const CvMat* a, CvMat* b, const CvMat* transmat, const CvMat* shift )
3150 int i, j, k, cols, dst_cols;
3151 int cn, dst_cn, depth, mat_depth, shiftstep;
3152 double mat[20], *buf, *dst_buf;
3154 cn = CV_MAT_CN(a->type);
3155 dst_cn = CV_MAT_CN(b->type);
3156 depth = CV_MAT_DEPTH(a->type);
3157 mat_depth = CV_MAT_DEPTH(transmat->type);
3158 cols = transmat->cols;
3160 // prepare cn x (cn + 1) transform matrix
3161 if( mat_depth == CV_32F )
3163 shiftstep = shift && shift->rows > 1 ? shift->step/sizeof(float) : 1;
3164 for( i = 0; i < transmat->rows; i++ )
3166 mat[i*(cn+1) + cn] = 0.;
3167 for( j = 0; j < cols; j++ )
3168 mat[i*(cn+1) + j] = ((float*)(transmat->data.ptr + transmat->step*i))[j];
3170 mat[i*(cn+1) + cn] = shift->data.fl[i*shiftstep];
3175 assert( mat_depth == CV_64F );
3177 shiftstep = shift && shift->rows > 1 ? shift->step/sizeof(double) : 1;
3178 for( i = 0; i < transmat->rows; i++ )
3180 mat[i*(cn+1) + cn] = 0.;
3181 for( j = 0; j < cols; j++ )
3182 mat[i*(cn+1) + j] = ((double*)(transmat->data.ptr + transmat->step*i))[j];
3184 mat[i*(cn+1) + cn] = shift->data.db[i*shiftstep];
3189 cols = a->cols * cn;
3190 dst_cols = a->cols * dst_cn;
3191 buf = (double*)cvStackAlloc( cols * sizeof(double) );
3192 dst_buf = (double*)cvStackAlloc( dst_cols * sizeof(double) );
3194 for( i = 0; i < a->rows; i++ )
3196 uchar* src = a->data.ptr + i*a->step;
3197 uchar* dst = b->data.ptr + i*b->step;
3198 double* _dst = dst_buf;
3203 for( j = 0; j < cols; j++ )
3204 buf[j] = ((uchar*)src)[j];
3207 for( j = 0; j < cols; j++ )
3208 buf[j] = ((ushort*)src)[j];
3211 for( j = 0; j < cols; j++ )
3212 buf[j] = ((short*)src)[j];
3215 for( j = 0; j < cols; j++ )
3216 buf[j] = ((int*)src)[j];
3219 for( j = 0; j < cols; j++ )
3220 buf[j] = ((float*)src)[j];
3223 for( j = 0; j < cols; j++ )
3224 buf[j] = ((double*)src)[j];
3233 for( j = 0; j < cols; j++, _dst += dst_cn )
3234 for( k = 0; k < dst_cn; k++ )
3235 _dst[k] = buf[j]*mat[2*k] + mat[2*k+1];
3238 for( j = 0; j < cols; j += 2, _dst += dst_cn )
3239 for( k = 0; k < dst_cn; k++ )
3240 _dst[k] = buf[j]*mat[3*k] + buf[j+1]*mat[3*k+1] + mat[3*k+2];
3243 for( j = 0; j < cols; j += 3, _dst += dst_cn )
3244 for( k = 0; k < dst_cn; k++ )
3245 _dst[k] = buf[j]*mat[4*k] + buf[j+1]*mat[4*k+1] +
3246 buf[j+2]*mat[4*k+2] + mat[4*k+3];
3249 for( j = 0; j < cols; j += 4, _dst += dst_cn )
3250 for( k = 0; k < dst_cn; k++ )
3251 _dst[k] = buf[j]*mat[5*k] + buf[j+1]*mat[5*k+1] +
3252 buf[j+2]*mat[5*k+2] + buf[j+3]*mat[5*k+3] + mat[5*k+4];
3261 for( j = 0; j < dst_cols; j++ )
3263 int val = cvRound(dst_buf[j]);
3264 ((uchar*)dst)[j] = CV_CAST_8U(val);
3268 for( j = 0; j < dst_cols; j++ )
3270 int val = cvRound(dst_buf[j]);
3271 ((ushort*)dst)[j] = CV_CAST_16U(val);
3275 for( j = 0; j < dst_cols; j++ )
3277 int val = cvRound(dst_buf[j]);
3278 ((short*)dst)[j] = CV_CAST_16S(val);
3282 for( j = 0; j < dst_cols; j++ )
3283 ((int*)dst)[j] = cvRound(dst_buf[j]);
3286 for( j = 0; j < dst_cols; j++ )
3287 ((float*)dst)[j] = (float)dst_buf[j];
3290 for( j = 0; j < dst_cols; j++ )
3291 ((double*)dst)[j] = dst_buf[j];
3300 CvMat* cvTsTranspose( const CvMat* a, CvMat* b )
3302 int i, j, k, rows, cols, elem_size;
3303 uchar *a_data, *b_data;
3306 elem_size = CV_ELEM_SIZE(a->type);
3310 assert( a->rows == b->cols && a->cols == b->rows && CV_ARE_TYPES_EQ(a,b) );
3311 a_data = a->data.ptr;
3313 b_data = b->data.ptr;
3318 for( i = 0; i < rows; i++ )
3320 for( j = 0; j <= i; j++ )
3322 uchar* a_ij = a_data + a_step*i + elem_size*j;
3323 uchar* a_ji = a_data + a_step*j + elem_size*i;
3324 uchar* b_ij = b_data + b_step*i + elem_size*j;
3325 uchar* b_ji = b_data + b_step*j + elem_size*i;
3326 for( k = 0; k < elem_size; k++ )
3338 for( i = 0; i < cols; i++ )
3340 for( j = 0; j < rows; j++ )
3342 uchar* a_ji = a_data + a_step*j + elem_size*i;
3343 uchar* b_ij = b_data + b_step*i + elem_size*j;
3344 for( k = 0; k < elem_size; k++ )
3354 void cvTsFlip( const CvMat* a, CvMat* b, int flip_type )
3356 int i, j, k, rows, cols, elem_size;
3357 uchar *a_data, *b_data;
3360 elem_size = CV_ELEM_SIZE(a->type);
3362 cols = a->cols*elem_size;
3364 assert( CV_ARE_SIZES_EQ(a,b) && CV_ARE_TYPES_EQ(a,b) && a->data.ptr != b->data.ptr );
3365 a_data = a->data.ptr;
3367 b_data = b->data.ptr;
3370 if( flip_type <= 0 )
3372 a_data += a_step*(rows-1);
3376 for( i = 0; i < rows; i++ )
3378 if( flip_type == 0 )
3379 memcpy( b_data, a_data, cols );
3382 for( j = 0; j < cols; j += elem_size )
3383 for( k = 0; k < elem_size; k++ )
3384 b_data[j+k] = a_data[cols - elem_size - j + k];
3392 void cvTsPatchZeros( CvMat* mat, double level )
3394 int i, j, ncols = mat->cols * CV_MAT_CN(mat->type);
3396 for( i = 0; i < mat->rows; i++ )
3398 switch( CV_MAT_DEPTH(mat->type) )
3402 float* data = (float*)(mat->data.ptr + i*mat->step);
3403 for( j = 0; j < ncols; j++ )
3404 if( fabs(data[j]) < level )
3410 double* data = (double*)(mat->data.ptr + i*mat->step);
3411 for( j = 0; j < ncols; j++ )
3412 if( fabs(data[j]) < level )