1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
44 /****************************************************************************************\
46 \****************************************************************************************/
48 icvBLAS_GEMM_32f_t icvBLAS_GEMM_32f_p = 0;
49 icvBLAS_GEMM_64f_t icvBLAS_GEMM_64f_p = 0;
50 icvBLAS_GEMM_32fc_t icvBLAS_GEMM_32fc_p = 0;
51 icvBLAS_GEMM_64fc_t icvBLAS_GEMM_64fc_p = 0;
54 icvGEMM_CopyBlock( const uchar* src, int src_step,
55 uchar* dst, int dst_step,
56 CvSize size, int pix_size )
59 size.width = size.width * (pix_size / sizeof(int));
61 for( ; size.height--; src += src_step, dst += dst_step )
63 for( j = 0; j <= size.width - 4; j += 4 )
65 int t0 = ((const int*)src)[j];
66 int t1 = ((const int*)src)[j+1];
68 ((int*)dst)[j+1] = t1;
69 t0 = ((const int*)src)[j+2];
70 t1 = ((const int*)src)[j+3];
71 ((int*)dst)[j+2] = t0;
72 ((int*)dst)[j+3] = t1;
75 for( ; j < size.width; j++ )
76 ((int*)dst)[j] = ((const int*)src)[j];
82 icvGEMM_TransposeBlock( const uchar* src, int src_step,
83 uchar* dst, int dst_step,
84 CvSize size, int pix_size )
87 for( i = 0; i < size.width; i++, dst += dst_step, src += pix_size )
89 const uchar* _src = src;
93 for( j = 0; j < size.height; j++, _src += src_step )
94 ((int*)dst)[j] = ((int*)_src)[0];
97 for( j = 0; j < size.height*2; j += 2, _src += src_step )
99 int t0 = ((int*)_src)[0];
100 int t1 = ((int*)_src)[1];
102 ((int*)dst)[j+1] = t1;
106 for( j = 0; j < size.height*4; j += 4, _src += src_step )
108 int t0 = ((int*)_src)[0];
109 int t1 = ((int*)_src)[1];
111 ((int*)dst)[j+1] = t1;
112 t0 = ((int*)_src)[2];
113 t1 = ((int*)_src)[3];
114 ((int*)dst)[j+2] = t0;
115 ((int*)dst)[j+3] = t1;
125 #define ICV_DEF_GEMM_SINGLE_MUL( flavor, arrtype, worktype ) \
126 static CvStatus CV_STDCALL \
127 icvGEMMSingleMul_##flavor( const arrtype* a_data, size_t a_step, \
128 const arrtype* b_data, size_t b_step, \
129 const arrtype* c_data, size_t c_step, \
130 arrtype* d_data, size_t d_step, \
131 CvSize a_size, CvSize d_size, \
132 double alpha, double beta, int flags ) \
134 int i, j, k, n = a_size.width, m = d_size.width, drows = d_size.height; \
135 const arrtype *_a_data = a_data, *_b_data = b_data, *_c_data = c_data; \
136 arrtype* a_buf = 0; \
137 size_t a_step0, a_step1, c_step0, c_step1, t_step; \
139 a_step /= sizeof(a_data[0]); \
140 b_step /= sizeof(b_data[0]); \
141 c_step /= sizeof(c_data[0]); \
142 d_step /= sizeof(d_data[0]); \
147 c_step0 = c_step1 = 0; \
148 else if( !(flags & CV_GEMM_C_T) ) \
149 c_step0 = c_step, c_step1 = 1; \
151 c_step0 = 1, c_step1 = c_step; \
153 if( flags & CV_GEMM_A_T ) \
155 CV_SWAP( a_step0, a_step1, t_step ); \
157 if( a_step > 1 && n > 1 ) \
158 a_buf = (arrtype*)cvStackAlloc(n*sizeof(a_data[0])); \
161 if( n == 1 ) /* external product */ \
163 arrtype* b_buf = 0; \
167 a_buf = (arrtype*)cvStackAlloc(drows*sizeof(a_data[0])); \
168 for( k = 0; k < drows; k++ ) \
169 a_buf[k] = a_data[a_step*k]; \
175 b_buf = (arrtype*)cvStackAlloc(d_size.width*sizeof(b_buf[0]) ); \
176 for( j = 0; j < d_size.width; j++ ) \
177 b_buf[j] = b_data[j*b_step]; \
181 for( i = 0; i < drows; i++, _c_data += c_step0, \
184 worktype al = worktype(a_data[i])*alpha; \
186 for( j = 0; j <= d_size.width - 2; j += 2, c_data += 2*c_step1 )\
188 worktype s0 = al*b_data[j]; \
189 worktype s1 = al*b_data[j+1]; \
192 d_data[j] = arrtype(s0); \
193 d_data[j+1] = arrtype(s1); \
197 d_data[j] = arrtype(s0 + c_data[0]*beta); \
198 d_data[j+1] = arrtype(s1 + c_data[c_step1]*beta); \
202 for( ; j < d_size.width; j++, c_data += c_step1 ) \
204 worktype s0 = al*b_data[j]; \
206 d_data[j] = arrtype(s0); \
208 d_data[j] = arrtype(s0 + c_data[0]*beta); \
212 else if( flags & CV_GEMM_B_T ) /* A * Bt */ \
214 for( i = 0; i < drows; i++, _a_data += a_step0, \
215 _c_data += c_step0, \
224 for( k = 0; k < n; k++ ) \
225 a_buf[k] = a_data[a_step1*k]; \
229 for( j = 0; j < d_size.width; j++, b_data += b_step, \
230 c_data += c_step1 ) \
232 worktype s0(0), s1(0), s2(0), s3(0); \
234 for( k = 0; k <= n - 4; k += 4 ) \
236 s0 += worktype(a_data[k])*b_data[k]; \
237 s1 += worktype(a_data[k+1])*b_data[k+1]; \
238 s2 += worktype(a_data[k+2])*b_data[k+2]; \
239 s3 += worktype(a_data[k+3])*b_data[k+3]; \
242 for( ; k < n; k++ ) \
243 s0 += worktype(a_data[k])*b_data[k]; \
244 s0 = (s0+s1+s2+s3)*alpha; \
247 d_data[j] = arrtype(s0); \
249 d_data[j] = arrtype(s0 + c_data[0]*beta); \
253 else if( d_size.width*sizeof(d_data[0]) <= 1600 ) \
255 for( i = 0; i < drows; i++, _a_data += a_step0, \
256 _c_data += c_step0, \
259 a_data = _a_data, c_data = _c_data; \
263 for( k = 0; k < n; k++ ) \
264 a_buf[k] = a_data[a_step1*k]; \
268 for( j = 0; j <= m - 4; j += 4, c_data += 4*c_step1 ) \
270 const arrtype* b = _b_data + j; \
271 worktype s0(0), s1(0), s2(0), s3(0); \
273 for( k = 0; k < n; k++, b += b_step ) \
275 worktype a(a_data[k]); \
276 s0 += a * b[0]; s1 += a * b[1]; \
277 s2 += a * b[2]; s3 += a * b[3]; \
282 d_data[j] = arrtype(s0*alpha); \
283 d_data[j+1] = arrtype(s1*alpha); \
284 d_data[j+2] = arrtype(s2*alpha); \
285 d_data[j+3] = arrtype(s3*alpha); \
289 s0 = s0*alpha; s1 = s1*alpha; \
290 s2 = s2*alpha; s3 = s3*alpha; \
291 d_data[j] = arrtype(s0 + c_data[0]*beta); \
292 d_data[j+1] = arrtype(s1 + c_data[c_step1]*beta); \
293 d_data[j+2] = arrtype(s2 + c_data[c_step1*2]*beta); \
294 d_data[j+3] = arrtype(s3 + c_data[c_step1*3]*beta); \
298 for( ; j < m; j++, c_data += c_step1 ) \
300 const arrtype* b = _b_data + j; \
303 for( k = 0; k < n; k++, b += b_step ) \
304 s0 += worktype(a_data[k]) * b[0]; \
308 d_data[j] = arrtype(s0); \
310 d_data[j] = arrtype(s0 + c_data[0]*beta); \
316 worktype* d_buf = (worktype*)cvStackAlloc(m*sizeof(d_buf[0])); \
318 for( i = 0; i < drows; i++, _a_data += a_step0, \
319 _c_data += c_step0, \
328 for( k = 0; k < n; k++ ) \
329 a_buf[k] = _a_data[a_step1*k]; \
333 for( j = 0; j < m; j++ ) \
334 d_buf[j] = worktype(0); \
336 for( k = 0; k < n; k++, b_data += b_step ) \
338 worktype al(a_data[k]); \
340 for( j = 0; j <= m - 4; j += 4 ) \
342 worktype t0 = d_buf[j] + b_data[j]*al; \
343 worktype t1 = d_buf[j+1] + b_data[j+1]*al; \
346 t0 = d_buf[j+2] + b_data[j+2]*al; \
347 t1 = d_buf[j+3] + b_data[j+3]*al; \
352 for( ; j < m; j++ ) \
353 d_buf[j] += b_data[j]*al; \
357 for( j = 0; j < m; j++ ) \
358 d_data[j] = arrtype(d_buf[j]*alpha); \
360 for( j = 0; j < m; j++, c_data += c_step1 ) \
362 worktype t = d_buf[j]*alpha; \
363 d_data[j] = arrtype(t + c_data[0]*beta); \
371 #define ICV_DEF_GEMM_BLOCK_MUL( flavor, arrtype, worktype ) \
372 static CvStatus CV_STDCALL \
373 icvGEMMBlockMul_##flavor( const arrtype* a_data, size_t a_step, \
374 const arrtype* b_data, size_t b_step, \
375 worktype* d_data, size_t d_step, \
376 CvSize a_size, CvSize d_size, int flags ) \
378 int i, j, k, n = a_size.width, m = d_size.width; \
379 const arrtype *_a_data = a_data, *_b_data = b_data; \
380 arrtype* a_buf = 0; \
381 size_t a_step0, a_step1, t_step; \
382 int do_acc = flags & 16; \
384 a_step /= sizeof(a_data[0]); \
385 b_step /= sizeof(b_data[0]); \
386 d_step /= sizeof(d_data[0]); \
391 if( flags & CV_GEMM_A_T ) \
393 CV_SWAP( a_step0, a_step1, t_step ); \
395 a_buf = (arrtype*)cvStackAlloc(n*sizeof(a_data[0])); \
398 if( flags & CV_GEMM_B_T ) \
400 /* second operand is transposed */ \
401 for( i = 0; i < d_size.height; i++, _a_data += a_step0, \
404 a_data = _a_data; b_data = _b_data; \
408 for( k = 0; k < n; k++ ) \
409 a_buf[k] = a_data[a_step1*k]; \
413 for( j = 0; j < d_size.width; j++, b_data += b_step ) \
415 worktype s0 = do_acc ? d_data[j]:worktype(0), s1(0);\
416 for( k = 0; k <= n - 2; k += 2 ) \
418 s0 += worktype(a_data[k])*b_data[k]; \
419 s1 += worktype(a_data[k+1])*b_data[k+1]; \
422 for( ; k < n; k++ ) \
423 s0 += worktype(a_data[k])*b_data[k]; \
425 d_data[j] = s0 + s1; \
431 for( i = 0; i < d_size.height; i++, _a_data += a_step0, \
434 a_data = _a_data, b_data = _b_data; \
438 for( k = 0; k < n; k++ ) \
439 a_buf[k] = a_data[a_step1*k]; \
443 for( j = 0; j <= m - 4; j += 4 ) \
445 worktype s0, s1, s2, s3; \
446 const arrtype* b = b_data + j; \
450 s0 = d_data[j]; s1 = d_data[j+1]; \
451 s2 = d_data[j+2]; s3 = d_data[j+3]; \
454 s0 = s1 = s2 = s3 = worktype(0); \
456 for( k = 0; k < n; k++, b += b_step ) \
458 worktype a(a_data[k]); \
459 s0 += a * b[0]; s1 += a * b[1]; \
460 s2 += a * b[2]; s3 += a * b[3]; \
463 d_data[j] = s0; d_data[j+1] = s1; \
464 d_data[j+2] = s2; d_data[j+3] = s3; \
467 for( ; j < m; j++ ) \
469 const arrtype* b = b_data + j; \
470 worktype s0 = do_acc ? d_data[j] : worktype(0); \
472 for( k = 0; k < n; k++, b += b_step ) \
473 s0 += worktype(a_data[k]) * b[0]; \
484 #define ICV_DEF_GEMM_STORE( flavor, arrtype, worktype ) \
485 static CvStatus CV_STDCALL \
486 icvGEMMStore_##flavor( const arrtype* c_data, size_t c_step, \
487 const worktype* d_buf, size_t d_buf_step, \
488 arrtype* d_data, size_t d_step, CvSize d_size,\
489 double alpha, double beta, int flags ) \
491 const arrtype* _c_data = c_data; \
493 size_t c_step0, c_step1; \
495 c_step /= sizeof(c_data[0]); \
496 d_buf_step /= sizeof(d_buf[0]); \
497 d_step /= sizeof(d_data[0]); \
500 c_step0 = c_step1 = 0; \
501 else if( !(flags & CV_GEMM_C_T) ) \
502 c_step0 = c_step, c_step1 = 1; \
504 c_step0 = 1, c_step1 = c_step; \
506 for( ; d_size.height--; _c_data += c_step0, \
507 d_buf += d_buf_step, \
513 for( j = 0; j <= d_size.width - 4; j += 4, c_data += 4*c_step1 )\
515 worktype t0 = alpha*d_buf[j]; \
516 worktype t1 = alpha*d_buf[j+1]; \
517 t0 += beta*worktype(c_data[0]); \
518 t1 += beta*worktype(c_data[c_step1]); \
519 d_data[j] = arrtype(t0); \
520 d_data[j+1] = arrtype(t1); \
521 t0 = alpha*d_buf[j+2]; \
522 t1 = alpha*d_buf[j+3]; \
523 t0 += beta*worktype(c_data[c_step1*2]); \
524 t1 += beta*worktype(c_data[c_step1*3]); \
525 d_data[j+2] = arrtype(t0); \
526 d_data[j+3] = arrtype(t1); \
528 for( ; j < d_size.width; j++, c_data += c_step1 ) \
530 worktype t0 = alpha*d_buf[j]; \
531 d_data[j] = arrtype(t0 + beta*c_data[0]); \
536 for( j = 0; j <= d_size.width - 4; j += 4 ) \
538 worktype t0 = alpha*d_buf[j]; \
539 worktype t1 = alpha*d_buf[j+1]; \
540 d_data[j] = arrtype(t0); \
541 d_data[j+1] = arrtype(t1); \
542 t0 = alpha*d_buf[j+2]; \
543 t1 = alpha*d_buf[j+3]; \
544 d_data[j+2] = arrtype(t0); \
545 d_data[j+3] = arrtype(t1); \
547 for( ; j < d_size.width; j++ ) \
548 d_data[j] = arrtype(alpha*d_buf[j]); \
555 ICV_DEF_GEMM_SINGLE_MUL( 32f_C1R, float, double)
556 ICV_DEF_GEMM_BLOCK_MUL( 32f_C1R, float, double)
557 ICV_DEF_GEMM_STORE( 32f_C1R, float, double)
559 ICV_DEF_GEMM_SINGLE_MUL( 64f_C1R, double, double)
560 ICV_DEF_GEMM_BLOCK_MUL( 64f_C1R, double, double)
561 ICV_DEF_GEMM_STORE( 64f_C1R, double, double)
563 ICV_DEF_GEMM_SINGLE_MUL( 32f_C2R, CvComplex32f, CvComplex64f)
564 ICV_DEF_GEMM_BLOCK_MUL( 32f_C2R, CvComplex32f, CvComplex64f)
565 ICV_DEF_GEMM_STORE( 32f_C2R, CvComplex32f, CvComplex64f)
567 ICV_DEF_GEMM_SINGLE_MUL( 64f_C2R, CvComplex64f, CvComplex64f)
568 ICV_DEF_GEMM_BLOCK_MUL( 64f_C2R, CvComplex64f, CvComplex64f)
569 ICV_DEF_GEMM_STORE( 64f_C2R, CvComplex64f, CvComplex64f)
571 typedef CvStatus (CV_STDCALL *CvGEMMSingleMulFunc)( const void* src1, size_t step1,
572 const void* src2, size_t step2, const void* src3, size_t step3,
573 void* dst, size_t dststep, CvSize srcsize, CvSize dstsize,
574 double alpha, double beta, int flags );
576 typedef CvStatus (CV_STDCALL *CvGEMMBlockMulFunc)( const void* src1, size_t step1,
577 const void* src2, size_t step2, void* dst, size_t dststep,
578 CvSize srcsize, CvSize dstsize, int flags );
580 typedef CvStatus (CV_STDCALL *CvGEMMStoreFunc)( const void* src1, size_t step1,
581 const void* src2, size_t step2, void* dst, size_t dststep,
582 CvSize dstsize, double alpha, double beta, int flags );
585 static void icvInitGEMMTable( CvBigFuncTable* single_mul_tab,
586 CvBigFuncTable* block_mul_tab,
587 CvBigFuncTable* store_tab )
589 single_mul_tab->fn_2d[CV_32FC1] = (void*)icvGEMMSingleMul_32f_C1R;
590 single_mul_tab->fn_2d[CV_64FC1] = (void*)icvGEMMSingleMul_64f_C1R;
591 single_mul_tab->fn_2d[CV_32FC2] = (void*)icvGEMMSingleMul_32f_C2R;
592 single_mul_tab->fn_2d[CV_64FC2] = (void*)icvGEMMSingleMul_64f_C2R;
593 block_mul_tab->fn_2d[CV_32FC1] = (void*)icvGEMMBlockMul_32f_C1R;
594 block_mul_tab->fn_2d[CV_64FC1] = (void*)icvGEMMBlockMul_64f_C1R;
595 block_mul_tab->fn_2d[CV_32FC2] = (void*)icvGEMMBlockMul_32f_C2R;
596 block_mul_tab->fn_2d[CV_64FC2] = (void*)icvGEMMBlockMul_64f_C2R;
597 store_tab->fn_2d[CV_32FC1] = (void*)icvGEMMStore_32f_C1R;
598 store_tab->fn_2d[CV_64FC1] = (void*)icvGEMMStore_64f_C1R;
599 store_tab->fn_2d[CV_32FC2] = (void*)icvGEMMStore_32f_C2R;
600 store_tab->fn_2d[CV_64FC2] = (void*)icvGEMMStore_64f_C2R;
605 cvGEMM( const CvArr* Aarr, const CvArr* Barr, double alpha,
606 const CvArr* Carr, double beta, CvArr* Darr, int flags )
608 const int block_lin_size = 128;
609 const int block_size = block_lin_size * block_lin_size;
611 static CvBigFuncTable single_mul_tab, block_mul_tab, store_tab;
612 static int inittab = 0;
613 static double zero[] = {0,0,0,0};
614 static float zerof[] = {0,0,0,0};
618 uchar* block_buffer = 0;
620 CV_FUNCNAME( "cvGEMM" );
624 CvMat *A = (CvMat*)Aarr;
625 CvMat *B = (CvMat*)Barr;
626 CvMat *C = (CvMat*)Carr;
627 CvMat *D = (CvMat*)Darr;
630 CvMat stub, stub1, stub2, stub3;
631 CvSize a_size, d_size;
637 CV_CALL( A = cvGetMat( A, &stub1, &coi ));
640 CV_ERROR( CV_BadCOI, "" );
646 CV_CALL( B = cvGetMat( B, &stub2, &coi ));
649 CV_ERROR( CV_BadCOI, "" );
655 CV_CALL( D = cvGetMat( D, &stub, &coi ));
658 CV_ERROR( CV_BadCOI, "" );
669 CV_CALL( C = cvGetMat( C, &stub3, &coi ));
672 CV_ERROR( CV_BadCOI, "" );
675 if( !CV_ARE_TYPES_EQ( C, D ))
676 CV_ERROR( CV_StsUnmatchedFormats, "" );
678 if( (flags&CV_GEMM_C_T) == 0 && (C->cols != D->cols || C->rows != D->rows) ||
679 (flags&CV_GEMM_C_T) != 0 && (C->rows != D->cols || C->cols != D->rows))
680 CV_ERROR( CV_StsUnmatchedSizes, "" );
682 if( (flags & CV_GEMM_C_T) != 0 && C->data.ptr == D->data.ptr )
686 flags &= ~CV_GEMM_C_T;
694 C->type = CV_MAT_CONT_FLAG;
697 type = CV_MAT_TYPE(A->type);
698 if( !CV_ARE_TYPES_EQ( A, B ) || !CV_ARE_TYPES_EQ( A, D ) )
699 CV_ERROR( CV_StsUnmatchedFormats, "" );
701 a_size.width = A->cols;
702 a_size.height = A->rows;
703 d_size.width = D->cols;
704 d_size.height = D->rows;
706 switch( flags & (CV_GEMM_A_T|CV_GEMM_B_T) )
710 if( a_size.width != len ||
711 B->cols != d_size.width ||
712 a_size.height != d_size.height )
713 CV_ERROR( CV_StsUnmatchedSizes, "" );
717 if( a_size.height != len ||
718 B->cols != d_size.width ||
719 a_size.width != d_size.height )
720 CV_ERROR( CV_StsUnmatchedSizes, "" );
724 if( a_size.width != len ||
725 B->rows != d_size.width ||
726 a_size.height != d_size.height )
727 CV_ERROR( CV_StsUnmatchedSizes, "" );
731 if( a_size.height != len ||
732 B->rows != d_size.width ||
733 a_size.width != d_size.height )
734 CV_ERROR( CV_StsUnmatchedSizes, "" );
738 if( flags == 0 && 2 <= len && len <= 4 && (len == d_size.width || len == d_size.height) )
743 double* d = D->data.db;
744 const double *a = A->data.db, *b = B->data.db, *c = C->data.db;
745 size_t d_step = D->step/sizeof(d[0]),
746 a_step = A->step/sizeof(a[0]),
747 b_step = B->step/sizeof(b[0]),
748 c_step = C->step/sizeof(c[0]);
756 if( len == d_size.width && b != d )
758 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
760 double t0 = a[0]*b[0] + a[1]*b[b_step];
761 double t1 = a[0]*b[1] + a[1]*b[b_step+1];
762 d[0] = t0*alpha + c[0]*beta;
763 d[1] = t1*alpha + c[1]*beta;
775 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
777 double t0 = a[0]*b[0] + a[1]*b[b_step];
778 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
779 d[0] = t0*alpha + c[0]*beta;
780 d[d_step] = t1*alpha + c[c_step]*beta;
787 if( len == d_size.width && b != d )
789 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
791 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
792 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
793 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
794 d[0] = t0*alpha + c[0]*beta;
795 d[1] = t1*alpha + c[1]*beta;
796 d[2] = t2*alpha + c[2]*beta;
808 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
810 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
811 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
812 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
814 d[0] = t0*alpha + c[0]*beta;
815 d[d_step] = t1*alpha + c[c_step]*beta;
816 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
823 if( len == d_size.width && b != d )
825 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
827 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
828 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
829 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
830 double t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
831 d[0] = t0*alpha + c[0]*beta;
832 d[1] = t1*alpha + c[1]*beta;
833 d[2] = t2*alpha + c[2]*beta;
834 d[3] = t3*alpha + c[3]*beta;
837 else if( d_size.width <= 16 && a != d )
846 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
848 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
849 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
850 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
851 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
852 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
853 double t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
854 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
855 d[0] = t0*alpha + c[0]*beta;
856 d[d_step] = t1*alpha + c[c_step]*beta;
857 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
858 d[d_step*3] = t3*alpha + c[c_step*3]*beta;
869 float* d = D->data.fl;
870 const float *a = A->data.fl, *b = B->data.fl, *c = C->data.fl;
871 size_t d_step = D->step/sizeof(d[0]),
872 a_step = A->step/sizeof(a[0]),
873 b_step = B->step/sizeof(b[0]),
874 c_step = C->step/sizeof(c[0]);
882 if( len == d_size.width && b != d )
884 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
886 float t0 = a[0]*b[0] + a[1]*b[b_step];
887 float t1 = a[0]*b[1] + a[1]*b[b_step+1];
888 d[0] = (float)(t0*alpha + c[0]*beta);
889 d[1] = (float)(t1*alpha + c[1]*beta);
901 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
903 float t0 = a[0]*b[0] + a[1]*b[b_step];
904 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
905 d[0] = (float)(t0*alpha + c[0]*beta);
906 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
913 if( len == d_size.width && b != d )
915 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
917 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
918 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
919 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
920 d[0] = (float)(t0*alpha + c[0]*beta);
921 d[1] = (float)(t1*alpha + c[1]*beta);
922 d[2] = (float)(t2*alpha + c[2]*beta);
934 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
936 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
937 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
938 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
940 d[0] = (float)(t0*alpha + c[0]*beta);
941 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
942 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
949 if( len == d_size.width && b != d )
951 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
953 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
954 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
955 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
956 float t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
957 d[0] = (float)(t0*alpha + c[0]*beta);
958 d[1] = (float)(t1*alpha + c[1]*beta);
959 d[2] = (float)(t2*alpha + c[2]*beta);
960 d[3] = (float)(t3*alpha + c[3]*beta);
963 else if( len <= 16 && a != d )
972 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
974 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
975 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
976 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
977 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
978 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
979 float t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
980 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
981 d[0] = (float)(t0*alpha + c[0]*beta);
982 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
983 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
984 d[d_step*3] = (float)(t3*alpha + c[c_step*3]*beta);
995 int b_step = B->step;
996 CvGEMMSingleMulFunc single_mul_func;
998 icvBLAS_GEMM_32f_t blas_func = 0;
1002 icvInitGEMMTable( &single_mul_tab, &block_mul_tab, &store_tab );
1006 single_mul_func = (CvGEMMSingleMulFunc)single_mul_tab.fn_2d[type];
1007 if( !single_mul_func )
1008 CV_ERROR( CV_StsUnsupportedFormat, "" );
1010 if( D->data.ptr == A->data.ptr || D->data.ptr == B->data.ptr )
1012 int buf_size = d_size.width*d_size.height*CV_ELEM_SIZE(type);
1013 if( d_size.width <= CV_MAX_LOCAL_MAT_SIZE )
1015 buffer = (uchar*)cvStackAlloc( buf_size );
1019 CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
1021 tmat = cvMat( d_size.height, d_size.width, type, buffer );
1025 if( (d_size.width == 1 || len == 1) && !(flags & CV_GEMM_B_T) && CV_IS_MAT_CONT(B->type) )
1027 b_step = d_size.width == 1 ? 0 : CV_ELEM_SIZE(type);
1028 flags |= CV_GEMM_B_T;
1031 if( (d_size.width | d_size.height | len) >= 16 && icvBLAS_GEMM_32f_p != 0 )
1033 blas_func = type == CV_32FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32f_p :
1034 type == CV_64FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64f_p :
1035 type == CV_32FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32fc_p :
1036 type == CV_64FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64fc_p : 0;
1041 const char* transa = flags & CV_GEMM_A_T ? "t" : "n";
1042 const char* transb = flags & CV_GEMM_B_T ? "t" : "n";
1047 if( C->data.ptr != D->data.ptr )
1049 if( !(flags & CV_GEMM_C_T) )
1052 cvTranspose( C, D );
1056 if( CV_MAT_DEPTH(type) == CV_32F )
1058 CvComplex32f _alpha, _beta;
1060 lda = A->step/sizeof(float);
1061 ldb = b_step/sizeof(float);
1062 ldd = D->step/sizeof(float);
1063 _alpha.re = (float)alpha;
1065 _beta.re = C->data.ptr ? (float)beta : 0;
1067 if( CV_MAT_CN(type) == 2 )
1068 lda /= 2, ldb /= 2, ldd /= 2;
1070 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1071 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1072 &_beta, D->data.ptr, &ldd );
1076 CvComplex64f _alpha, _beta;
1078 lda = A->step/sizeof(double);
1079 ldb = b_step/sizeof(double);
1080 ldd = D->step/sizeof(double);
1083 _beta.re = C->data.ptr ? beta : 0;
1085 if( CV_MAT_CN(type) == 2 )
1086 lda /= 2, ldb /= 2, ldd /= 2;
1088 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1089 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1090 &_beta, D->data.ptr, &ldd );
1093 else if( (d_size.height <= block_lin_size/2 || d_size.width <= block_lin_size/2) &&
1094 len <= 10000 || len <= 10 ||
1095 d_size.width <= block_lin_size && d_size.height <= block_lin_size && len <= block_lin_size )
1097 single_mul_func( A->data.ptr, A->step, B->data.ptr, b_step,
1098 C->data.ptr, C->step, D->data.ptr, D->step,
1099 a_size, d_size, alpha, beta, flags );
1103 int is_a_t = flags & CV_GEMM_A_T;
1104 int is_b_t = flags & CV_GEMM_B_T;
1105 int elem_size = CV_ELEM_SIZE(type);
1107 int a_buf_size = 0, b_buf_size, d_buf_size;
1111 int i, j, k, di = 0, dj = 0, dk = 0;
1113 int a_step0, a_step1, b_step0, b_step1, c_step0, c_step1;
1114 int work_elem_size = elem_size << (CV_MAT_DEPTH(type) == CV_32F ? 1 : 0);
1115 CvGEMMBlockMulFunc block_mul_func = (CvGEMMBlockMulFunc)block_mul_tab.fn_2d[type];
1116 CvGEMMStoreFunc store_func = (CvGEMMStoreFunc)store_tab.fn_2d[type];
1118 assert( block_mul_func && store_func );
1121 a_step0 = A->step, a_step1 = elem_size;
1123 a_step0 = elem_size, a_step1 = A->step;
1126 b_step0 = b_step, b_step1 = elem_size;
1128 b_step0 = elem_size, b_step1 = b_step;
1132 c_step0 = c_step1 = 0;
1133 flags &= ~CV_GEMM_C_T;
1135 else if( !(flags & CV_GEMM_C_T) )
1136 c_step0 = C->step, c_step1 = elem_size;
1138 c_step0 = elem_size, c_step1 = C->step;
1140 dm0 = MIN( block_lin_size, d_size.height );
1141 dn0 = MIN( block_lin_size, d_size.width );
1142 dk0_1 = block_size / dm0;
1143 dk0_2 = block_size / dn0;
1144 dk0 = MAX( dk0_1, dk0_2 );
1145 dk0 = MIN( dk0, len );
1146 if( dk0*dm0 > block_size )
1147 dm0 = block_size / dk0;
1148 if( dk0*dn0 > block_size )
1149 dn0 = block_size / dk0;
1151 dk0_1 = (dn0+dn0/8+2) & -2;
1152 b_buf_size = (dk0+dk0/8+1)*dk0_1*elem_size;
1153 d_buf_size = (dk0+dk0/8+1)*dk0_1*work_elem_size;
1157 a_buf_size = (dm0+dm0/8+1)*((dk0+dk0/8+2)&-2)*elem_size;
1158 flags &= ~CV_GEMM_A_T;
1161 CV_CALL( block_buffer = (uchar*)cvAlloc(a_buf_size + b_buf_size + d_buf_size));
1162 d_buf = block_buffer;
1163 b_buf = d_buf + d_buf_size;
1166 a_buf = b_buf + b_buf_size;
1168 for( i = 0; i < d_size.height; i += di )
1171 if( i + di >= d_size.height || 8*(i + di) + di > 8*d_size.height )
1172 di = d_size.height - i;
1174 for( j = 0; j < d_size.width; j += dj )
1176 uchar* _d = D->data.ptr + i*D->step + j*elem_size;
1177 const uchar* _c = C->data.ptr + i*c_step0 + j*c_step1;
1178 int _d_step = D->step;
1181 if( j + dj >= d_size.width || 8*(j + dj) + dj > 8*d_size.width )
1182 dj = d_size.width - j;
1188 _d_step = dj*work_elem_size;
1191 for( k = 0; k < len; k += dk )
1193 const uchar* _a = A->data.ptr + i*a_step0 + k*a_step1;
1194 int _a_step = A->step;
1195 const uchar* _b = B->data.ptr + k*b_step0 + j*b_step1;
1196 int _b_step = b_step;
1200 if( k + dk >= len || 8*(k + dk) + dk > 8*len )
1204 a_bl_size.width = dk, a_bl_size.height = di;
1206 a_bl_size.width = di, a_bl_size.height = dk;
1208 if( a_buf && is_a_t )
1211 _a_step = dk*elem_size;
1212 icvGEMM_TransposeBlock( _a, A->step, a_buf, _a_step, a_bl_size, elem_size );
1213 CV_SWAP( a_bl_size.width, a_bl_size.height, t );
1217 if( dj < d_size.width )
1221 b_size.width = dj, b_size.height = dk;
1223 b_size.width = dk, b_size.height = dj;
1225 _b_step = b_size.width*elem_size;
1226 icvGEMM_CopyBlock( _b, b_step, b_buf, _b_step, b_size, elem_size );
1231 block_mul_func( _a, _a_step, _b, _b_step, _d, _d_step,
1232 a_bl_size, cvSize(dj,di), flags );
1234 single_mul_func( _a, _a_step, _b, _b_step, _c, C->step, _d, _d_step,
1235 a_bl_size, cvSize(dj,di), alpha, beta, flags );
1240 store_func( _c, C->step, _d, _d_step, D->data.ptr + i*D->step + j*elem_size,
1241 D->step, cvSize(dj,di), alpha, beta, flags );
1247 CV_CALL( cvCopy( D, D0 ));
1252 if( buffer && !local_alloc )
1255 cvFree( &block_buffer );
1259 /****************************************************************************************\
1261 \****************************************************************************************/
1263 #define ICV_DEF_TRANSFORM_CASE_C1( arrtype, temptype, _ld_, \
1264 _cast_macro1_, _cast_macro2_ ) \
1266 for( i = 0; i < size.width; i++, dst += dst_cn ) \
1268 const double* _mat = mat; \
1269 double v0 = _ld_(src[i]); \
1270 for( k = 0; k < dst_cn; k++, _mat += 2 ) \
1272 temptype t0 = _cast_macro1_(_mat[0]*v0 + _mat[1]); \
1273 dst[k] = _cast_macro2_(t0); \
1276 src += size.width; \
1280 #define ICV_DEF_DIAG_TRANSFORM_CASE_C1( arrtype, temptype, _ld_, \
1281 _cast_macro1_, _cast_macro2_ ) \
1282 for( i = 0; i < size.width; i++ ) \
1286 ft0 = mat[0]*_ld_(src[i]) + mat[1]; \
1287 t0 = _cast_macro1_(ft0); \
1288 dst[i] = _cast_macro2_(t0); \
1292 #define ICV_DEF_TRANSFORM_CASE_C2( arrtype, temptype, _ld_, \
1293 _cast_macro1_, _cast_macro2_ ) \
1296 for( i = 0; i < size.width*2; i += 2 ) \
1300 ft0 = mat[0]*_ld_(src[i]) + mat[1]*_ld_(src[i+1]) + mat[2]; \
1301 ft1 = mat[3]*_ld_(src[i]) + mat[4]*_ld_(src[i+1]) + mat[5]; \
1302 t0 = _cast_macro1_(ft0); \
1303 t1 = _cast_macro1_(ft1); \
1304 dst[i] = _cast_macro2_(t0); \
1305 dst[i+1] = _cast_macro2_(t1); \
1307 src += size.width*2; dst += size.width*2; \
1310 for( i = 0; i < size.width; i++, src += 2, dst += dst_cn ) \
1312 const double* _mat = mat; \
1313 double v0 = _ld_(src[0]), v1 = src[1]; \
1314 for( k = 0; k < dst_cn; k++, _mat += 3 ) \
1317 _cast_macro1_(_mat[0]*v0 + _mat[1]*v1 + _mat[2]); \
1318 dst[k] = _cast_macro2_(t0); \
1323 #define ICV_DEF_DIAG_TRANSFORM_CASE_C2( arrtype, temptype, _ld_, \
1324 _cast_macro1_, _cast_macro2_ ) \
1325 for( i = 0; i < size.width*2; i += 2 ) \
1329 ft0 = mat[0]*_ld_(src[i]) + mat[2]; \
1330 ft1 = mat[4]*_ld_(src[i+1]) + mat[5]; \
1331 t0 = _cast_macro1_(ft0); \
1332 t1 = _cast_macro1_(ft1); \
1333 dst[i] = _cast_macro2_(t0); \
1334 dst[i+1] = _cast_macro2_(t1); \
1338 #define ICV_DEF_TRANSFORM_CASE_C3( arrtype, temptype, _ld_, \
1339 _cast_macro1_, _cast_macro2_ ) \
1342 for( i = 0; i < size.width*3; i += 3 ) \
1344 double ft0, ft1, ft2; \
1345 temptype t0, t1, t2; \
1346 ft0 = mat[0]*_ld_(src[i]) + mat[1]*_ld_(src[i+1]) + \
1347 mat[2]*_ld_(src[i+2]) + mat[3]; \
1348 ft1 = mat[4]*_ld_(src[i]) + mat[5]*_ld_(src[i+1]) + \
1349 mat[6]*_ld_(src[i+2]) + mat[7]; \
1350 ft2 = mat[8]*_ld_(src[i]) + mat[9]*_ld_(src[i+1]) + \
1351 mat[10]*_ld_(src[i+2]) + mat[11]; \
1352 t0 = _cast_macro1_(ft0); \
1353 t1 = _cast_macro1_(ft1); \
1354 t2 = _cast_macro1_(ft2); \
1355 dst[i] = _cast_macro2_(t0); \
1356 dst[i+1] = _cast_macro2_(t1); \
1357 dst[i+2] = _cast_macro2_(t2); \
1359 src += size.width*3; dst += size.width*3; \
1361 else if( dst_cn == 1 ) \
1363 for( i = 0; i < size.width; i++, src += 3 ) \
1365 temptype t0 = _cast_macro1_(mat[0]*_ld_(src[0]) + \
1366 mat[1]*_ld_(src[1]) + mat[2]*_ld_(src[2]) + mat[3]); \
1367 dst[i] = _cast_macro2_(t0); \
1369 dst += size.width; \
1372 for( i = 0; i < size.width; i++, src += 3, dst += dst_cn ) \
1374 const double* _mat = mat; \
1375 double v0=_ld_(src[0]), v1=_ld_(src[1]), v2=_ld_(src[2]); \
1376 for( k = 0; k < dst_cn; k++, _mat += 4 ) \
1378 temptype t0 = _cast_macro1_(_mat[0]*v0 + \
1379 _mat[1]*v1 + _mat[2]*v2 + _mat[3]); \
1380 dst[k] = _cast_macro2_(t0); \
1385 #define ICV_DEF_DIAG_TRANSFORM_CASE_C3( arrtype, temptype, _ld_, \
1386 _cast_macro1_, _cast_macro2_ ) \
1387 for( i = 0; i < size.width*3; i += 3 ) \
1389 double ft0, ft1, ft2; \
1390 temptype t0, t1, t2; \
1391 ft0 = mat[0]*_ld_(src[i]) + mat[3]; \
1392 ft1 = mat[5]*_ld_(src[i+1]) + mat[7]; \
1393 ft2 = mat[10]*_ld_(src[i+2]) + mat[11]; \
1394 t0 = _cast_macro1_(ft0); \
1395 t1 = _cast_macro1_(ft1); \
1396 t2 = _cast_macro1_(ft2); \
1397 dst[i] = _cast_macro2_(t0); \
1398 dst[i+1] = _cast_macro2_(t1); \
1399 dst[i+2] = _cast_macro2_(t2); \
1403 #define ICV_DEF_TRANSFORM_CASE_C4( arrtype, temptype, _ld_, \
1404 _cast_macro1_, _cast_macro2_ ) \
1405 for( i = 0; i < size.width; i++, src += 4, dst += dst_cn ) \
1407 const double* _mat = mat; \
1408 double v0 = _ld_(src[0]), v1 = _ld_(src[1]), \
1409 v2 = _ld_(src[2]), v3 = _ld_(src[3]); \
1410 for( k = 0; k < dst_cn; k++, _mat += 5 ) \
1412 temptype t0 =_cast_macro1_(_mat[0]*v0+_mat[1]*v1+ \
1413 _mat[2]*v2+_mat[3]*v3+_mat[4]); \
1414 dst[k] = _cast_macro2_(t0); \
1419 #define ICV_DEF_DIAG_TRANSFORM_CASE_C4( arrtype, temptype, _ld_, \
1420 _cast_macro1_, _cast_macro2_ ) \
1421 for( i = 0; i < size.width*4; i += 4 ) \
1425 ft0 = mat[0]*_ld_(src[i]) + mat[4]; \
1426 ft1 = mat[6]*_ld_(src[i+1]) + mat[9]; \
1427 t0 = _cast_macro1_(ft0); \
1428 t1 = _cast_macro1_(ft1); \
1429 dst[i] = _cast_macro2_(t0); \
1430 dst[i+1] = _cast_macro2_(t1); \
1431 ft0 = mat[12]*_ld_(src[i+2]) + mat[14]; \
1432 ft1 = mat[18]*_ld_(src[i+3]) + mat[19]; \
1433 t0 = _cast_macro1_(ft0); \
1434 t1 = _cast_macro1_(ft1); \
1435 dst[i+2] = _cast_macro2_(t0); \
1436 dst[i+3] = _cast_macro2_(t1); \
1441 #define ICV_DEF_TRANSFORM_FUNC( flavor, arrtype, temptype, _ld_, \
1442 _cast_macro1_, _cast_macro2_, cn )\
1443 static CvStatus CV_STDCALL \
1444 icvTransform_##flavor( const arrtype* src, int srcstep, \
1445 arrtype* dst, int dststep, CvSize size, \
1446 const double* mat, int dst_cn ) \
1448 srcstep = srcstep/sizeof(src[0]) - size.width*cn; \
1449 dststep = dststep/sizeof(dst[0]) - size.width*dst_cn; \
1450 for( ; size.height--; src += srcstep, dst += dststep ) \
1453 ICV_DEF_TRANSFORM_CASE_C##cn( arrtype, temptype, _ld_, \
1454 _cast_macro1_, _cast_macro2_ ) \
1461 #define ICV_DEF_DIAG_TRANSFORM_FUNC( flavor, arrtype, temptype, _ld_, \
1462 _cast_macro1_, _cast_macro2_, cn )\
1463 static CvStatus CV_STDCALL \
1464 icvDiagTransform_##flavor( const arrtype* src, int srcstep, \
1465 arrtype* dst, int dststep, CvSize size, \
1466 const double* mat ) \
1468 srcstep /= sizeof(src[0]); \
1469 dststep /= sizeof(dst[0]); \
1470 for( ; size.height--; src += srcstep, dst += dststep ) \
1473 ICV_DEF_DIAG_TRANSFORM_CASE_C##cn( arrtype, temptype, _ld_, \
1474 _cast_macro1_, _cast_macro2_ ) \
1481 ICV_DEF_TRANSFORM_FUNC( 8u_C1R, uchar, int, CV_8TO32F, cvRound, CV_CAST_8U, 1 )
1482 ICV_DEF_TRANSFORM_FUNC( 8u_C2R, uchar, int, CV_8TO32F, cvRound, CV_CAST_8U, 2 )
1483 ICV_DEF_TRANSFORM_FUNC( 8u_C3R, uchar, int, CV_8TO32F, cvRound, CV_CAST_8U, 3 )
1484 ICV_DEF_TRANSFORM_FUNC( 8u_C4R, uchar, int, CV_8TO32F, cvRound, CV_CAST_8U, 4 )
1486 ICV_DEF_TRANSFORM_FUNC( 16u_C1R, ushort, int, CV_NOP, cvRound, CV_CAST_16U, 1 )
1487 ICV_DEF_TRANSFORM_FUNC( 16u_C2R, ushort, int, CV_NOP, cvRound, CV_CAST_16U, 2 )
1488 ICV_DEF_TRANSFORM_FUNC( 16u_C3R, ushort, int, CV_NOP, cvRound, CV_CAST_16U, 3 )
1489 ICV_DEF_TRANSFORM_FUNC( 16u_C4R, ushort, int, CV_NOP, cvRound, CV_CAST_16U, 4 )
1491 ICV_DEF_TRANSFORM_FUNC( 16s_C1R, short, int, CV_NOP, cvRound, CV_CAST_16S, 1 )
1492 ICV_DEF_TRANSFORM_FUNC( 16s_C2R, short, int, CV_NOP, cvRound, CV_CAST_16S, 2 )
1493 ICV_DEF_TRANSFORM_FUNC( 16s_C3R, short, int, CV_NOP, cvRound, CV_CAST_16S, 3 )
1494 ICV_DEF_TRANSFORM_FUNC( 16s_C4R, short, int, CV_NOP, cvRound, CV_CAST_16S, 4 )
1496 ICV_DEF_TRANSFORM_FUNC( 32s_C1R, int, int, CV_NOP, cvRound, CV_NOP, 1 )
1497 ICV_DEF_TRANSFORM_FUNC( 32s_C2R, int, int, CV_NOP, cvRound, CV_NOP, 2 )
1498 ICV_DEF_TRANSFORM_FUNC( 32s_C3R, int, int, CV_NOP, cvRound, CV_NOP, 3 )
1499 ICV_DEF_TRANSFORM_FUNC( 32s_C4R, int, int, CV_NOP, cvRound, CV_NOP, 4 )
1501 ICV_DEF_TRANSFORM_FUNC( 32f_C1R, float, double, CV_NOP, CV_NOP, CV_CAST_32F, 1 )
1502 ICV_DEF_TRANSFORM_FUNC( 32f_C2R, float, double, CV_NOP, CV_NOP, CV_CAST_32F, 2 )
1503 ICV_DEF_TRANSFORM_FUNC( 32f_C3R, float, double, CV_NOP, CV_NOP, CV_CAST_32F, 3 )
1504 ICV_DEF_TRANSFORM_FUNC( 32f_C4R, float, double, CV_NOP, CV_NOP, CV_CAST_32F, 4 )
1506 ICV_DEF_TRANSFORM_FUNC( 64f_C1R, double, double, CV_NOP, CV_NOP, CV_CAST_64F, 1 )
1507 ICV_DEF_TRANSFORM_FUNC( 64f_C2R, double, double, CV_NOP, CV_NOP, CV_CAST_64F, 2 )
1508 ICV_DEF_TRANSFORM_FUNC( 64f_C3R, double, double, CV_NOP, CV_NOP, CV_CAST_64F, 3 )
1509 ICV_DEF_TRANSFORM_FUNC( 64f_C4R, double, double, CV_NOP, CV_NOP, CV_CAST_64F, 4 )
1511 ICV_DEF_DIAG_TRANSFORM_FUNC( 16u_C1R, ushort, int, CV_NOP, cvRound, CV_CAST_16U, 1 )
1512 ICV_DEF_DIAG_TRANSFORM_FUNC( 16u_C2R, ushort, int, CV_NOP, cvRound, CV_CAST_16U, 2 )
1513 ICV_DEF_DIAG_TRANSFORM_FUNC( 16u_C3R, ushort, int, CV_NOP, cvRound, CV_CAST_16U, 3 )
1514 ICV_DEF_DIAG_TRANSFORM_FUNC( 16u_C4R, ushort, int, CV_NOP, cvRound, CV_CAST_16U, 4 )
1516 ICV_DEF_DIAG_TRANSFORM_FUNC( 16s_C1R, short, int, CV_NOP, cvRound, CV_CAST_16S, 1 )
1517 ICV_DEF_DIAG_TRANSFORM_FUNC( 16s_C2R, short, int, CV_NOP, cvRound, CV_CAST_16S, 2 )
1518 ICV_DEF_DIAG_TRANSFORM_FUNC( 16s_C3R, short, int, CV_NOP, cvRound, CV_CAST_16S, 3 )
1519 ICV_DEF_DIAG_TRANSFORM_FUNC( 16s_C4R, short, int, CV_NOP, cvRound, CV_CAST_16S, 4 )
1521 ICV_DEF_DIAG_TRANSFORM_FUNC( 32s_C1R, int, int, CV_NOP, cvRound, CV_NOP, 1 )
1522 ICV_DEF_DIAG_TRANSFORM_FUNC( 32s_C2R, int, int, CV_NOP, cvRound, CV_NOP, 2 )
1523 ICV_DEF_DIAG_TRANSFORM_FUNC( 32s_C3R, int, int, CV_NOP, cvRound, CV_NOP, 3 )
1524 ICV_DEF_DIAG_TRANSFORM_FUNC( 32s_C4R, int, int, CV_NOP, cvRound, CV_NOP, 4 )
1526 ICV_DEF_DIAG_TRANSFORM_FUNC( 32f_C1R, float, double, CV_NOP, CV_NOP, CV_CAST_32F, 1 )
1527 ICV_DEF_DIAG_TRANSFORM_FUNC( 32f_C2R, float, double, CV_NOP, CV_NOP, CV_CAST_32F, 2 )
1528 ICV_DEF_DIAG_TRANSFORM_FUNC( 32f_C3R, float, double, CV_NOP, CV_NOP, CV_CAST_32F, 3 )
1529 ICV_DEF_DIAG_TRANSFORM_FUNC( 32f_C4R, float, double, CV_NOP, CV_NOP, CV_CAST_32F, 4 )
1531 ICV_DEF_DIAG_TRANSFORM_FUNC( 64f_C1R, double, double, CV_NOP, CV_NOP, CV_CAST_64F, 1 )
1532 ICV_DEF_DIAG_TRANSFORM_FUNC( 64f_C2R, double, double, CV_NOP, CV_NOP, CV_CAST_64F, 2 )
1533 ICV_DEF_DIAG_TRANSFORM_FUNC( 64f_C3R, double, double, CV_NOP, CV_NOP, CV_CAST_64F, 3 )
1534 ICV_DEF_DIAG_TRANSFORM_FUNC( 64f_C4R, double, double, CV_NOP, CV_NOP, CV_CAST_64F, 4 )
1536 #define icvTransform_8s_C1R 0
1537 #define icvTransform_8s_C2R 0
1538 #define icvTransform_8s_C3R 0
1539 #define icvTransform_8s_C4R 0
1541 #define icvDiagTransform_8s_C1R 0
1542 #define icvDiagTransform_8s_C2R 0
1543 #define icvDiagTransform_8s_C3R 0
1544 #define icvDiagTransform_8s_C4R 0
1546 #define icvDiagTransform_8u_C1R 0
1547 #define icvDiagTransform_8u_C2R 0
1548 #define icvDiagTransform_8u_C3R 0
1549 #define icvDiagTransform_8u_C4R 0
1551 CV_DEF_INIT_BIG_FUNC_TAB_2D( Transform, R )
1552 CV_DEF_INIT_BIG_FUNC_TAB_2D( DiagTransform, R )
1554 typedef CvStatus (CV_STDCALL * CvTransformFunc)(
1555 const void* src, int srcstep,
1556 void* dst, int dststep, CvSize size,
1557 const void* mat, int dst_cn );
1559 typedef CvStatus (CV_STDCALL * CvDiagTransformFunc)(
1560 const void* src, int srcstep,
1561 void* dst, int dststep, CvSize size,
1564 typedef CvStatus (CV_STDCALL * CvDiagTransformFunc)(
1565 const void* src, int srcstep,
1566 void* dst, int dststep, CvSize size,
1569 ///////////////////// IPP transform functions //////////////////
1571 icvColorTwist_8u_C3R_t icvColorTwist_8u_C3R_p = 0;
1572 icvColorTwist_16u_C3R_t icvColorTwist_16u_C3R_p = 0;
1573 icvColorTwist_16s_C3R_t icvColorTwist_16s_C3R_p = 0;
1574 icvColorTwist_32f_C3R_t icvColorTwist_32f_C3R_p = 0;
1575 icvColorTwist_32f_C4R_t icvColorTwist_32f_C4R_p = 0;
1577 icvColorToGray_8u_C3C1R_t icvColorToGray_8u_C3C1R_p = 0;
1578 icvColorToGray_16u_C3C1R_t icvColorToGray_16u_C3C1R_p = 0;
1579 icvColorToGray_16s_C3C1R_t icvColorToGray_16s_C3C1R_p = 0;
1580 icvColorToGray_32f_C3C1R_t icvColorToGray_32f_C3C1R_p = 0;
1582 icvColorToGray_8u_AC4C1R_t icvColorToGray_8u_AC4C1R_p = 0;
1583 icvColorToGray_16u_AC4C1R_t icvColorToGray_16u_AC4C1R_p = 0;
1584 icvColorToGray_16s_AC4C1R_t icvColorToGray_16s_AC4C1R_p = 0;
1585 icvColorToGray_32f_AC4C1R_t icvColorToGray_32f_AC4C1R_p = 0;
1587 typedef CvStatus (CV_STDCALL * CvColorTwistIPPFunc)( const void* src, int srcstep,
1588 void* dst, int dststep, CvSize size, const float* coeffs );
1590 ////////////////////////////////////////////////////////////////
1593 cvTransform( const CvArr* srcarr, CvArr* dstarr,
1594 const CvMat* transmat, const CvMat* shiftvec )
1596 static CvBigFuncTable transform_tab, diag_transform_tab;
1597 static int inittab = 0;
1600 CV_FUNCNAME( "cvTransform" );
1604 CvMat srcstub, *src = (CvMat*)srcarr;
1605 CvMat dststub, *dst = (CvMat*)dstarr;
1606 CvMat rotstub, *rot = (CvMat*)transmat;
1607 CvMat shiftstub, *shift = (CvMat*)shiftvec;
1608 CvSeq *src_seq = 0, *dst_seq = 0;
1609 CvSeq hdr; // need only one copy of stub header & seqblock (either for src or dst)
1610 CvSeqBlock block_hdr;
1611 int i, j, type, cn, dst_cn;
1612 int coi = 0, coi2 = 0;
1613 double* buffer = (double*)cvStackAlloc( CV_CN_MAX*(CV_CN_MAX+1)*sizeof(buffer[0]) );
1617 icvInitTransformRTable( &transform_tab );
1618 icvInitDiagTransformRTable( &diag_transform_tab );
1622 if( CV_IS_SEQ( src ))
1624 src_seq = (CvSeq*)src;
1625 if( CV_ELEM_SIZE(src_seq->flags) != src_seq->elem_size )
1626 CV_ERROR( CV_StsUnsupportedFormat, "Unsupported type of sequence elements" );
1629 CV_CALL( src = cvGetMat( src, &srcstub, &coi ));
1631 if( CV_IS_SEQ( dst ))
1633 dst_seq = (CvSeq*)dst;
1634 if( CV_ELEM_SIZE(dst_seq->flags) != dst_seq->elem_size )
1635 CV_ERROR( CV_StsUnsupportedFormat, "Unsupported type of sequence elements" );
1638 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 ));
1640 if( coi != 0 || coi2 != 0 )
1641 CV_ERROR( CV_BadCOI, "" );
1643 if( !CV_ARE_DEPTHS_EQ(src, dst) )
1644 CV_ERROR( CV_StsUnmatchedFormats, "" );
1646 if( src_seq || dst_seq )
1650 if( CV_IS_MAT_CONT(src->type) || src->rows != 1 && src->cols != 1 )
1651 CV_ERROR( CV_StsBadSize, "if eigher the source or destination is a sequence, "
1652 "the other array must be also a sequence of continous 1d vector" );
1653 src_seq = cvMakeSeqHeaderForArray( CV_MAT_TYPE(src->type), sizeof(hdr),
1654 CV_ELEM_SIZE(src->type), src->data.ptr,
1655 src->rows + src->cols + 1, &hdr, &block_hdr );
1660 if( CV_IS_MAT_CONT(dst->type) || dst->rows != 1 && dst->cols != 1 )
1661 CV_ERROR( CV_StsBadSize, "if eigher the source or destination is a sequence, "
1662 "the other array must be also a sequence of continous 1d vector" );
1663 if( dst->rows + dst->cols - 1 != src_seq->total )
1664 CV_ERROR( CV_StsUnmatchedFormats,
1665 "source sequence and destination vector have different sizes" );
1666 dst_seq = cvMakeSeqHeaderForArray( CV_MAT_TYPE(dst->type), sizeof(hdr),
1667 CV_ELEM_SIZE(dst->type), dst->data.ptr,
1668 dst->rows + dst->cols + 1, &hdr, &block_hdr );
1670 else if( dst_seq->total != src_seq->total )
1672 if( dst_seq->total > src_seq->total )
1673 cvSeqPopMulti( dst_seq, 0, dst_seq->total - src_seq->total );
1675 cvSeqPushMulti( dst_seq, 0, src_seq->total - dst_seq->total );
1678 else if( !CV_ARE_SIZES_EQ( src, dst ))
1679 CV_ERROR( CV_StsUnmatchedSizes, "" );
1681 type = CV_MAT_TYPE( src->type );
1682 cn = CV_MAT_CN( type );
1683 dst_cn = CV_MAT_CN( dst->type );
1685 if( cn > 4 || dst_cn > 4 )
1686 CV_ERROR( CV_StsOutOfRange, "Both input and output array must have at most 4 channels" );
1688 if( !CV_IS_MAT( rot ))
1689 CV_CALL( rot = cvGetMat( rot, &rotstub, &coi ));
1691 if( rot->rows != dst_cn )
1692 CV_ERROR( CV_StsBadSize,
1693 "The height of transmat matrix must be equal to number of channels" );
1695 if( rot->cols == cn + 1 || rot->cols == cn )
1697 if( CV_MAT_TYPE( rot->type ) == CV_64FC1 )
1699 for( i = 0; i < dst_cn; i++ )
1701 buffer[i*(cn+1) + cn] = 0;
1702 for( j = 0; j < rot->cols; j++ )
1703 buffer[i*(cn+1) + j] = ((double*)(rot->data.ptr + rot->step*i))[j];
1706 else if( CV_MAT_TYPE( rot->type ) == CV_32FC1 )
1708 for( i = 0; i < dst_cn; i++ )
1710 buffer[i*(cn+1) + cn] = 0;
1711 for( j = 0; j < rot->cols; j++ )
1712 buffer[i*(cn+1) + j] = ((float*)(rot->data.ptr + rot->step*i))[j];
1716 CV_ERROR( CV_StsUnsupportedFormat, "Rotation matrix must be 32fC1 or 64fC1" );
1719 CV_ERROR( CV_StsUnmatchedSizes, "If the source array has <cn> channels, "
1720 "the transformation matrix must have <cn> x <cn>+1 or <cn> x <cn> size" );
1724 if( !CV_IS_MAT( shift ))
1725 CV_CALL( shift = cvGetMat( shift, &shiftstub, &coi ));
1727 if( CV_MAT_CN( shift->type ) * shift->cols * shift->rows == dst_cn &&
1728 (shift->rows == 1 || shift->rows == dst_cn) ||
1729 (shift->cols == 1 || shift->cols == dst_cn) )
1731 if( CV_MAT_DEPTH( shift->type ) == CV_64F )
1733 int step = shift->step ? shift->step/sizeof(double) : 1;
1734 for( i = 0; i < dst_cn; i++ )
1735 buffer[i*(cn+1) + cn] += shift->data.db[i*step];
1737 else if( CV_MAT_DEPTH( shift->type ) == CV_32F )
1739 int step = shift->step ? shift->step/sizeof(float) : 1;
1740 for( i = 0; i < dst_cn; i++ )
1741 buffer[i*(cn+1) + cn] += shift->data.fl[i*step];
1744 CV_ERROR( CV_StsUnsupportedFormat, "Shift vector must be 32f or 64f" );
1748 CV_ERROR( CV_StsUnmatchedSizes,
1749 "Shift (if present) must be 1 dimensional vector with the number "
1750 "of elements equal to number of channels in the processed array" );
1754 if( coi != 0 || coi2 != 0 )
1755 CV_ERROR( CV_BadCOI, "" );
1758 CvTransformFunc func = (CvTransformFunc)(transform_tab.fn_2d[type]);
1759 CvDiagTransformFunc diag_func = 0;
1760 CvLUT_TransformFunc lut_func = 0;
1761 int diag_transform = 0;
1762 CvColorTwistIPPFunc ipp_func = 0;
1764 float* ipp_coeffs = (float*)cvStackAlloc( 16*sizeof(ipp_coeffs[0]) );
1767 CV_ERROR( CV_StsUnsupportedFormat, "" );
1770 ipp_func = type == CV_8UC3 ? icvColorTwist_8u_C3R_p :
1771 type == CV_16UC3 ? icvColorTwist_16u_C3R_p :
1772 type == CV_16SC3 ? icvColorTwist_16s_C3R_p :
1773 type == CV_32FC3 ? icvColorTwist_32f_C3R_p :
1774 type == CV_32FC4 && fabs(buffer[4]) < DBL_EPSILON &&
1775 fabs(buffer[9]) < DBL_EPSILON && fabs(buffer[14]) < DBL_EPSILON &&
1776 fabs(buffer[19]) < DBL_EPSILON ? icvColorTwist_32f_C4R_p : 0;
1777 else if( dst_cn == 1 && (cn == 3 || cn == 4) &&
1778 buffer[0] >= 0 && buffer[1] >= 0 && buffer[2] >= 0 &&
1779 buffer[0] + buffer[1] + buffer[2] <= 1.01 &&
1780 fabs(buffer[3]) < DBL_EPSILON && (cn == 3 || fabs(buffer[4]) < DBL_EPSILON) )
1783 ipp_func = type == CV_8UC3 ? icvColorToGray_8u_C3C1R_p :
1784 type == CV_16UC3 ? icvColorToGray_16u_C3C1R_p :
1785 type == CV_16SC3 ? icvColorToGray_16s_C3C1R_p :
1786 type == CV_32FC3 ? icvColorToGray_32f_C3C1R_p : 0;
1788 ipp_func = type == CV_8UC4 ? icvColorToGray_8u_AC4C1R_p :
1789 type == CV_16UC4 ? icvColorToGray_16u_AC4C1R_p :
1790 type == CV_16SC4 ? icvColorToGray_16s_AC4C1R_p :
1791 type == CV_32FC4 ? icvColorToGray_32f_AC4C1R_p : 0;
1797 for( i = 0; i < dst_cn; i++ )
1798 for( j = 0; j < cn; j++ )
1800 if( i != j && fabs(buffer[i*(cn+1) + j]) > DBL_EPSILON )
1807 if( diag_transform )
1809 if( CV_MAT_DEPTH(type) == CV_8U )
1811 CV_CALL( lut = cvCreateMat( 1, 256, type ));
1812 for( i = 0; i < cn; i++ )
1814 double a = buffer[i*(cn+1) + i], b = buffer[i*(cn+1) + cn];
1815 uchar* ltab = lut->data.ptr;
1816 for( j = 0; j < 256; j++ )
1818 int t = cvRound(a*j + b);
1819 ltab[j*cn + i] = CV_CAST_8U(t);
1822 lut_func = cn == 1 ? (CvLUT_TransformFunc)icvLUT_Transform8u_8u_C1R :
1823 cn == 2 ? (CvLUT_TransformFunc)icvLUT_Transform8u_8u_C2R :
1824 cn == 3 ? (CvLUT_TransformFunc)icvLUT_Transform8u_8u_C3R :
1825 (CvLUT_TransformFunc)icvLUT_Transform8u_8u_C4R;
1828 diag_func = (CvDiagTransformFunc)(diag_transform_tab.fn_2d[type]);
1834 const double* ptr = buffer;
1836 // fill cn x 4 ipp_coeffs array
1837 for( i = 0; i < cn*4; i += 4, ptr += cn+1 )
1839 float t0 = (float)ptr[0];
1840 float t1 = (float)ptr[1];
1842 ipp_coeffs[i+1] = t1;
1845 ipp_coeffs[i+2] = t0;
1846 ipp_coeffs[i+3] = t1;
1852 int srcstep = src->step;
1853 int dststep = dst->step;
1854 size = cvGetMatSize( src );
1856 if( CV_IS_MAT_CONT( src->type & dst->type ))
1858 size.width *= size.height;
1860 srcstep = dststep = CV_STUB_STEP;
1864 lut_func( src->data.ptr, src->step, dst->data.ptr,
1865 dst->step, size, lut->data.ptr );
1868 IPPI_CALL( ipp_func( src->data.ptr, srcstep, dst->data.ptr,
1869 dststep, size, ipp_coeffs ));
1871 else if( diag_transform )
1872 diag_func( src->data.ptr, src->step, dst->data.ptr,
1873 dst->step, size, buffer );
1875 func( src->data.ptr, src->step, dst->data.ptr,
1876 dst->step, size, buffer, dst_cn );
1880 CvSeqBlock* src_block = src_seq->first;
1881 CvSeqBlock* dst_block = dst_seq->first;
1882 int src_idx = 0, dst_idx = 0;
1883 int src_elem_size = CV_ELEM_SIZE(src_seq->flags);
1884 int dst_elem_size = CV_ELEM_SIZE(dst_seq->flags);
1886 for( i = src_seq->total; i > 0; )
1888 int src_len = src_block->count - src_idx;
1889 int dst_len = dst_block->count - dst_idx;
1890 const void* srcptr = src_block->data + src_idx*src_elem_size;
1891 void* dstptr = dst_block->data + dst_idx*dst_elem_size;
1892 src_len = MIN(src_len, dst_len);
1895 lut_func( srcptr, CV_STUB_STEP, dstptr, CV_STUB_STEP,
1896 cvSize( src_len, 1 ), lut->data.ptr );
1899 IPPI_CALL( ipp_func( srcptr, CV_STUB_STEP, dstptr, CV_STUB_STEP,
1900 cvSize( src_len, 1 ), ipp_coeffs ));
1902 else if( diag_transform )
1903 diag_func( srcptr, CV_STUB_STEP, dstptr, CV_STUB_STEP,
1904 cvSize( src_len, 1 ), buffer );
1906 func( srcptr, CV_STUB_STEP, dstptr, CV_STUB_STEP,
1907 cvSize( src_len, 1 ), buffer, dst_cn );
1909 if( (src_idx += src_len) == src_block->count )
1910 src_block = src_block->next, src_idx = 0;
1911 if( (dst_idx += src_len) == dst_block->count )
1912 dst_block = dst_block->next, dst_idx = 0;
1920 cvReleaseMat( &lut );
1924 /****************************************************************************************\
1925 * cvPerspectiveTransform *
1926 \****************************************************************************************/
1928 #define ICV_PERSPECTIVE_TRANSFORM_FUNC_2( flavor, arrtype ) \
1929 static CvStatus CV_STDCALL \
1930 icvPerspectiveTransform_##flavor##_C2R( const arrtype* src, int srcstep, \
1931 arrtype* dst, int dststep, \
1932 CvSize size, const double* mat ) \
1936 srcstep /= sizeof(src[0]); dststep /= sizeof(dst[0]); \
1938 for( ; size.height--; src += srcstep, dst += dststep ) \
1940 for( i = 0; i < size.width; i += 2 ) \
1942 arrtype x = src[i], y = src[i + 1]; \
1943 double w = x*mat[6] + y*mat[7] + mat[8]; \
1945 if( fabs(w) > FLT_EPSILON ) \
1948 dst[i] = (arrtype)((x*mat[0] + y*mat[1] + mat[2]) * w); \
1949 dst[i+1] = (arrtype)((x*mat[3] + y*mat[4] + mat[5]) * w); \
1953 dst[i] = (arrtype)0; \
1954 dst[i+1] = (arrtype)0; \
1963 #define ICV_PERSPECTIVE_TRANSFORM_FUNC_3( flavor, arrtype ) \
1964 static CvStatus CV_STDCALL \
1965 icvPerspectiveTransform_##flavor##_C3R( const arrtype* src, int srcstep, \
1966 arrtype* dst, int dststep, \
1967 CvSize size, const double* mat ) \
1971 srcstep /= sizeof(src[0]); dststep /= sizeof(dst[0]); \
1973 for( ; size.height--; src += srcstep, dst += dststep ) \
1975 for( i = 0; i < size.width; i += 3 ) \
1977 arrtype x = src[i], y = src[i + 1], z = src[i + 2]; \
1978 double w = x*mat[12] + y*mat[13] + z*mat[14] + mat[15]; \
1980 if( fabs(w) > FLT_EPSILON ) \
1983 dst[i] = (arrtype)((x*mat[0] + y*mat[1] + z*mat[2] + mat[3]) * w); \
1984 dst[i+1] = (arrtype)((x*mat[4] + y*mat[5] + z*mat[6] + mat[7]) * w); \
1985 dst[i+2] = (arrtype)((x*mat[8] + y*mat[9] + z*mat[10] + mat[11]) * w); \
1989 dst[i] = (arrtype)0; \
1990 dst[i+1] = (arrtype)0; \
1991 dst[i+2] = (arrtype)0; \
1999 ICV_PERSPECTIVE_TRANSFORM_FUNC_2( 32f, float )
2000 ICV_PERSPECTIVE_TRANSFORM_FUNC_2( 64f, double )
2001 ICV_PERSPECTIVE_TRANSFORM_FUNC_3( 32f, float )
2002 ICV_PERSPECTIVE_TRANSFORM_FUNC_3( 64f, double )
2004 static void icvInitPerspectiveTransformTable( CvFuncTable* tab2, CvFuncTable* tab3 )\
2006 tab2->fn_2d[CV_32F] = (void*)icvPerspectiveTransform_32f_C2R; \
2007 tab2->fn_2d[CV_64F] = (void*)icvPerspectiveTransform_64f_C2R; \
2008 tab3->fn_2d[CV_32F] = (void*)icvPerspectiveTransform_32f_C3R; \
2009 tab3->fn_2d[CV_64F] = (void*)icvPerspectiveTransform_64f_C3R; \
2014 cvPerspectiveTransform( const CvArr* srcarr, CvArr* dstarr, const CvMat* mat )
2016 static CvFuncTable tab[2];
2017 static int inittab = 0;
2020 CV_FUNCNAME( "cvPerspectiveProject" );
2024 CvMat sstub, *src = (CvMat*)srcarr;
2025 CvMat dstub, *dst = (CvMat*)dstarr;
2027 CvFunc2D_2A1P func = 0;
2032 icvInitPerspectiveTransformTable( &tab[0], &tab[1] );
2036 if( !CV_IS_MAT( src ))
2039 CV_CALL( src = cvGetMat( src, &sstub, &coi ));
2042 CV_ERROR( CV_BadCOI, "" );
2045 if( !CV_IS_MAT( dst ))
2048 CV_CALL( dst = cvGetMat( dst, &dstub, &coi ));
2051 CV_ERROR( CV_BadCOI, "" );
2054 if( !CV_ARE_TYPES_EQ( src, dst ))
2055 CV_ERROR( CV_StsUnmatchedFormats, "" );
2057 if( !CV_ARE_SIZES_EQ( src, dst ))
2058 CV_ERROR( CV_StsUnmatchedSizes, "" );
2060 type = CV_MAT_TYPE( src->type );
2061 cn = CV_MAT_CN( type );
2063 if( cn != 2 && cn != 3 )
2064 CV_ERROR( CV_BadNumChannels, cvUnsupportedFormat );
2066 if( !CV_IS_MAT( mat ))
2067 CV_ERROR( CV_StsBadArg, "Invalid transformation matrix" );
2069 if( mat->rows != cn + 1 && mat->cols != mat->rows )
2070 CV_ERROR( CV_StsBadSize,
2071 "The size of transform matrix must be equal to number of channels" );
2073 if( CV_MAT_TYPE( mat->type ) == CV_64FC1 )
2075 for( i = 0; i <= cn; i++ )
2077 for( j = 0; j <= cn; j++ )
2078 buffer[i*(cn+1) + j] = ((double*)(mat->data.ptr + mat->step*i))[j];
2081 else if( CV_MAT_TYPE( mat->type ) == CV_32FC1 )
2083 for( i = 0; i <= cn; i++ )
2085 for( j = 0; j <= cn; j++ )
2086 buffer[i*(cn+1) + j] = ((float*)(mat->data.ptr + mat->step*i))[j];
2091 CV_ERROR( CV_StsUnsupportedFormat, "Rotation matrix must be 32fC1 or 64fC1" );
2094 func = (CvFunc2D_2A1P)tab[cn == 3].fn_2d[CV_MAT_DEPTH(type)];
2097 CV_ERROR( CV_StsUnsupportedFormat, "" );
2099 size = cvGetMatSize( src );
2101 if( CV_IS_MAT_CONT( src->type & dst->type ))
2103 size.width *= size.height;
2107 IPPI_CALL( func( src->data.ptr, src->step, dst->data.ptr, dst->step, size, buffer));
2109 CV_CHECK_NANS( dst );
2115 /****************************************************************************************\
2117 \****************************************************************************************/
2119 #define ICV_DEF_MULADDC_CASE_C1( arrtype, temptype, src1, src2, dst, len ) \
2123 for( i = 0; i <= (len) - 4; i += 4 ) \
2125 temptype t0 = (src1)[i]*s0 + (src2)[i]; \
2126 temptype t1 = (src1)[i+1]*s0 + (src2)[i+1]; \
2128 (dst)[i] = (arrtype)t0; \
2129 (dst)[i+1] = (arrtype)t1; \
2131 t0 = (src1)[i+2]*s0 + (src2)[i+2]; \
2132 t1 = (src1)[i+3]*s0 + (src2)[i+3]; \
2134 (dst)[i+2] = (arrtype)t0; \
2135 (dst)[i+3] = (arrtype)t1; \
2138 for( ; i < (len); i++ ) \
2140 temptype t0 = (src1)[i]*s0 + (src2)[i]; \
2141 (dst)[i] = (arrtype)t0; \
2146 #define ICV_DEF_MULADDC_CASE_C2( arrtype, temptype, src1, src2, dst, len ) \
2150 for( i = 0; i <= (len) - 4; i += 4 ) \
2152 temptype t0 = (src1)[i]*s0 - (src1)[i+1]*s1 + (src2)[i]; \
2153 temptype t1 = (src1)[i]*s1 + (src1)[i+1]*s0 + (src2)[i+1]; \
2155 (dst)[i] = (arrtype)t0; \
2156 (dst)[i+1] = (arrtype)t1; \
2158 t0 = (src1)[i+2]*s0 - (src1)[i+3]*s1 + (src2)[i+2]; \
2159 t1 = (src1)[i+2]*s1 + (src1)[i+3]*s0 + (src2)[i+3]; \
2161 (dst)[i+2] = (arrtype)t0; \
2162 (dst)[i+3] = (arrtype)t1; \
2165 for( ; i < (len); i += 2 ) \
2167 temptype t0 = (src1)[i]*s0 - (src1)[i+1]*s1 + (src2)[i]; \
2168 temptype t1 = (src1)[i]*s1 + (src1)[i+1]*s0 + (src2)[i+1]; \
2170 (dst)[i] = (arrtype)t0; \
2171 (dst)[i+1] = (arrtype)t1; \
2176 #define ICV_DEF_MULADDS_FUNC( flavor, arrtype, scalartype, entry, cn ) \
2177 static CvStatus CV_STDCALL \
2178 icvMulAddC_##flavor( const arrtype* src1, int srcstep1, \
2179 const arrtype* src2, int srcstep2, \
2180 arrtype* dst, int dststep, CvSize size, \
2181 const scalartype* scalar ) \
2183 entry(scalartype); \
2184 size.width *= (cn); \
2185 srcstep1 /= sizeof(src1[0]); srcstep2 /= sizeof(src2[0]); \
2186 dststep /= sizeof(dst[0]); \
2188 for( ; size.height--; src1+=srcstep1, src2+=srcstep2, dst+=dststep ) \
2190 ICV_DEF_MULADDC_CASE_C##cn( arrtype, scalartype, src1, src2, \
2198 ICV_DEF_MULADDS_FUNC( 32f_C1R, float, double, CV_UN_ENTRY_C1, 1 )
2199 ICV_DEF_MULADDS_FUNC( 32f_C2R, float, double, CV_UN_ENTRY_C2, 2 )
2200 ICV_DEF_MULADDS_FUNC( 64f_C1R, double, double, CV_UN_ENTRY_C1, 1 )
2201 ICV_DEF_MULADDS_FUNC( 64f_C2R, double, double, CV_UN_ENTRY_C2, 2 )
2205 icvInitMulAddCTable( CvBigFuncTable* tab )
2207 tab->fn_2d[CV_32FC1] = (void*)icvMulAddC_32f_C1R;
2208 tab->fn_2d[CV_32FC2] = (void*)icvMulAddC_32f_C2R;
2209 tab->fn_2d[CV_64FC1] = (void*)icvMulAddC_64f_C1R;
2210 tab->fn_2d[CV_64FC2] = (void*)icvMulAddC_64f_C2R;
2215 cvScaleAdd( const CvArr* srcarr1, CvScalar scale,
2216 const CvArr* srcarr2, CvArr* dstarr )
2218 static CvBigFuncTable muladds_tab;
2219 static int inittab = 0;
2221 CV_FUNCNAME( "cvScaleAdd" );
2225 CvMat stub1, *src1 = (CvMat*)srcarr1;
2226 CvMat stub2, *src2 = (CvMat*)srcarr2;
2227 CvMat stub, *dst = (CvMat*)dstarr;
2231 if( !CV_IS_MAT( src1 ) || !CV_IS_MAT(src2) || !CV_IS_MAT(dst))
2233 int coi1 = 0, coi2 = 0, coi3 = 0;
2234 CV_CALL( src1 = cvGetMat( src1, &stub1, &coi1 ));
2235 CV_CALL( src2 = cvGetMat( src2, &stub2, &coi2 ));
2236 CV_CALL( dst = cvGetMat( dst, &stub, &coi3 ));
2238 if( coi1 + coi2 + coi3 != 0 )
2239 CV_ERROR( CV_BadCOI, "" );
2242 if( !CV_ARE_TYPES_EQ( src1, dst ) || !CV_ARE_TYPES_EQ( src2, dst ))
2243 CV_ERROR( CV_StsUnmatchedFormats, "" );
2245 if( !CV_ARE_SIZES_EQ( src1, dst ) || !CV_ARE_SIZES_EQ( src2, dst ))
2246 CV_ERROR( CV_StsUnmatchedSizes, "" );
2248 type = CV_MAT_TYPE( src1->type );
2249 size = cvGetMatSize( src1 );
2251 if( CV_IS_MAT_CONT( src1->type & src2->type & dst->type ))
2253 size.width *= size.height;
2255 if( size.width <= CV_MAX_INLINE_MAT_OP_SIZE )
2257 if( type == CV_32FC1 )
2259 float* mA = src1->data.fl;
2260 float* mB = src2->data.fl;
2261 float* mC = dst->data.fl;
2265 mC[size.width - 1] = (float)(mA[size.width - 1]*scale.val[0] +
2266 mB[size.width - 1]);
2268 while( --size.width );
2273 if( type == CV_64FC1 )
2275 double* mA = src1->data.db;
2276 double* mB = src2->data.db;
2277 double* mC = dst->data.db;
2281 mC[size.width - 1] = mA[size.width - 1]*scale.val[0] +
2284 while( --size.width );
2295 icvInitMulAddCTable( &muladds_tab );
2299 if( CV_MAT_CN(type) > 2 )
2300 CV_ERROR( CV_StsOutOfRange, "The function only supports 1- and 2-channel arrays" );
2303 CvFunc2D_3A1P func = (CvFunc2D_3A1P)(muladds_tab.fn_2d[type]);
2306 CV_ERROR( CV_StsUnsupportedFormat, "" );
2308 IPPI_CALL( func( src1->data.ptr, src1->step, src2->data.ptr, src2->step,
2309 dst->data.ptr, dst->step, size, scale.val ));
2312 CV_CHECK_NANS( dst );
2318 /****************************************************************************************\
2319 * cvCalcCovarMatrix *
2320 \****************************************************************************************/
2322 #define ICV_DOT_PRODUCT_CASE( flavor, srctype, avgtype, load_macro ) \
2323 static CvStatus CV_STDCALL \
2324 icvDotProductShifted_##flavor##_C1R( const srctype* vec1, int vecstep1, \
2325 const srctype* vec2, int vecstep2, \
2326 const avgtype* avg, int avgstep, \
2327 CvSize size, double* _result ) \
2329 double result = 0; \
2330 vecstep1 /= sizeof(vec1[0]); vecstep2 /= sizeof(vec2[0]); avgstep /= sizeof(avg[0]);\
2332 for( ; size.height--; vec1 += vecstep1, vec2 += vecstep2, avg += avgstep ) \
2335 for( x = 0; x <= size.width - 4; x += 4 ) \
2336 result += (load_macro(vec1[x]) - avg[x])*(load_macro(vec2[x]) - avg[x]) + \
2337 (load_macro(vec1[x+1]) - avg[x+1])*(load_macro(vec2[x+1]) - avg[x+1]) + \
2338 (load_macro(vec1[x+2]) - avg[x+2])*(load_macro(vec2[x+2]) - avg[x+2]) + \
2339 (load_macro(vec1[x+3]) - avg[x+3])*(load_macro(vec2[x+3]) - avg[x+3]); \
2340 for( ; x < size.width; x++ ) \
2341 result += (load_macro(vec1[x]) - avg[x])*(load_macro(vec2[x]) - avg[x]); \
2344 *_result = result; \
2349 ICV_DOT_PRODUCT_CASE( 8u32f, uchar, float, CV_8TO32F )
2350 ICV_DOT_PRODUCT_CASE( 8u64f, uchar, double, CV_8TO32F )
2351 ICV_DOT_PRODUCT_CASE( 16u32f, ushort, float, CV_NOP )
2352 ICV_DOT_PRODUCT_CASE( 16u64f, ushort, double, CV_NOP )
2353 ICV_DOT_PRODUCT_CASE( 16s32f, short, float, CV_NOP )
2354 ICV_DOT_PRODUCT_CASE( 16s64f, short, double, CV_NOP )
2355 ICV_DOT_PRODUCT_CASE( 32f, float, float, CV_NOP )
2356 ICV_DOT_PRODUCT_CASE( 32f64f, float, double, CV_NOP )
2357 ICV_DOT_PRODUCT_CASE( 64f, double, double, CV_NOP )
2359 static void icvInitDotProductShiftedTable( CvFuncTable* tabfl, CvFuncTable* tabdb )
2361 tabfl->fn_2d[CV_8U] = (void*)icvDotProductShifted_8u32f_C1R;
2362 tabfl->fn_2d[CV_8S] = 0;
2363 tabfl->fn_2d[CV_16U] = (void*)icvDotProductShifted_16u32f_C1R;
2364 tabfl->fn_2d[CV_16S] = (void*)icvDotProductShifted_16s32f_C1R;
2365 tabfl->fn_2d[CV_32S] = 0;
2366 tabfl->fn_2d[CV_32F] = (void*)icvDotProductShifted_32f_C1R;
2367 tabfl->fn_2d[CV_64F] = 0;
2369 tabdb->fn_2d[CV_8U] = (void*)icvDotProductShifted_8u64f_C1R;
2370 tabdb->fn_2d[CV_8S] = 0;
2371 tabdb->fn_2d[CV_16U] = (void*)icvDotProductShifted_16u64f_C1R;
2372 tabdb->fn_2d[CV_16S] = (void*)icvDotProductShifted_16s64f_C1R;
2373 tabdb->fn_2d[CV_32S] = 0;
2374 tabdb->fn_2d[CV_32F] = (void*)icvDotProductShifted_32f64f_C1R;
2375 tabdb->fn_2d[CV_64F] = (void*)icvDotProductShifted_64f_C1R;
2378 #define ICV_EXT_PRODUCT_CASE( flavor, srctype, avgtype, load_macro ) \
2379 static CvStatus CV_STDCALL \
2380 icvExtProductShifted_##flavor##_C1R( const srctype* vec, int vecstep, \
2381 const avgtype* avg, int avgstep, \
2382 avgtype* dst, int dststep, \
2383 CvSize size, avgtype* tempbuf ) \
2385 int x, y, dstsize = size.width * size.height; \
2387 vecstep /= sizeof(vec[0]); avgstep /= sizeof(avg[0]); \
2388 for( y = 0; y < size.height; y++, vec += vecstep, avg += avgstep ) \
2389 for( x = 0; x < size.width; x++ ) \
2390 *tempbuf++ = load_macro(vec[x]) - avg[x]; \
2391 tempbuf -= dstsize; \
2393 dststep /= sizeof(dst[0]); \
2394 for( y = 0; y < dstsize; y++, dst += dststep ) \
2396 double ty = tempbuf[y]; \
2397 for( x = 0; x <= y - 3; x += 4 ) \
2399 double t0 = dst[x] + ty*tempbuf[x]; \
2400 double t1 = dst[x+1] + ty*tempbuf[x+1]; \
2401 dst[x] = (avgtype)t0; \
2402 dst[x+1] = (avgtype)t1; \
2403 t0 = dst[x+2] + ty*tempbuf[x+2]; \
2404 t1 = dst[x+3] + ty*tempbuf[x+3]; \
2405 dst[x+2] = (avgtype)t0; \
2406 dst[x+3] = (avgtype)t1; \
2408 for( ; x <= y; x++ ) \
2409 dst[x] = (avgtype)(dst[x] + ty*tempbuf[x]); \
2415 ICV_EXT_PRODUCT_CASE( 8u32f, uchar, float, CV_8TO32F )
2416 ICV_EXT_PRODUCT_CASE( 8u64f, uchar, double, CV_8TO32F )
2417 ICV_EXT_PRODUCT_CASE( 16u32f, ushort, float, CV_NOP )
2418 ICV_EXT_PRODUCT_CASE( 16u64f, ushort, double, CV_NOP )
2419 ICV_EXT_PRODUCT_CASE( 16s32f, short, float, CV_NOP )
2420 ICV_EXT_PRODUCT_CASE( 16s64f, short, double, CV_NOP )
2421 ICV_EXT_PRODUCT_CASE( 32f, float, float, CV_NOP )
2422 ICV_EXT_PRODUCT_CASE( 32f64f, float, double, CV_NOP )
2423 ICV_EXT_PRODUCT_CASE( 64f, double, double, CV_NOP )
2426 static void icvInitExtProductShiftedTable( CvFuncTable* tabfl, CvFuncTable* tabdb )
2428 tabfl->fn_2d[CV_8U] = (void*)icvExtProductShifted_8u32f_C1R;
2429 tabfl->fn_2d[CV_8S] = 0;
2430 tabfl->fn_2d[CV_16U] = (void*)icvExtProductShifted_16u32f_C1R;
2431 tabfl->fn_2d[CV_16S] = (void*)icvExtProductShifted_16s32f_C1R;
2432 tabfl->fn_2d[CV_32S] = 0;
2433 tabfl->fn_2d[CV_32F] = (void*)icvExtProductShifted_32f_C1R;
2434 tabfl->fn_2d[CV_64F] = 0;
2436 tabdb->fn_2d[CV_8U] = (void*)icvExtProductShifted_8u64f_C1R;
2437 tabdb->fn_2d[CV_8S] = 0;
2438 tabdb->fn_2d[CV_16U] = (void*)icvExtProductShifted_16u64f_C1R;
2439 tabdb->fn_2d[CV_16S] = (void*)icvExtProductShifted_16s64f_C1R;
2440 tabdb->fn_2d[CV_32S] = 0;
2441 tabdb->fn_2d[CV_32F] = (void*)icvExtProductShifted_32f64f_C1R;
2442 tabdb->fn_2d[CV_64F] = (void*)icvExtProductShifted_64f_C1R;
2446 typedef struct vec_data
2454 cvCalcCovarMatrix( const CvArr** vecarr, int count,
2455 CvArr* covarr, CvArr* avgarr, int flags )
2457 static CvFuncTable dot_tab[2];
2458 static CvFuncTable ext_tab[2];
2459 static int inittab = 0;
2460 vec_data* vecdata = 0;
2463 CV_FUNCNAME( "cvCalcCovarMatrix" );
2467 CvMat covstub, *cov = (CvMat*)covarr;
2468 CvMat avgstub, *avg = (CvMat*)avgarr;
2469 CvMat vecstub0, *vecmat = 0;
2470 CvSize srcsize, contsize;
2471 int srctype = 0, dsttype = 0;
2473 int cont_flag, vec_delta = 0, vec_step = 0;
2474 int is_covar_normal = (flags & CV_COVAR_NORMAL) != 0;
2479 icvInitDotProductShiftedTable( dot_tab + 0, dot_tab + 1 );
2480 icvInitExtProductShiftedTable( ext_tab + 0, ext_tab + 1 );
2485 CV_ERROR( CV_StsNullPtr, "NULL vec pointer" );
2487 CV_CALL( cov = cvGetMat( cov, &covstub ));
2488 CV_CALL( avg = cvGetMat( avg, &avgstub ));
2490 if( !CV_ARE_TYPES_EQ( cov, avg ))
2491 CV_ERROR( CV_StsUnmatchedFormats,
2492 "Covariation matrix and average vector should have the same types" );
2494 dsttype = CV_MAT_TYPE( cov->type );
2495 if( dsttype != CV_32FC1 && dsttype != CV_64FC1 )
2496 CV_ERROR( CV_StsUnsupportedFormat, "Covariation matrix must be 32fC1 or 64fC1" );
2498 if( cov->rows != cov->cols )
2499 CV_ERROR( CV_StsBadSize, "Covariation matrix must be square" );
2501 srcsize = cvGetMatSize( avg );
2502 contsize.width = srcsize.width * srcsize.height;
2503 contsize.height = 1;
2504 cont_flag = avg->type;
2506 if( flags & (CV_COVAR_ROWS|CV_COVAR_COLS) )
2508 CV_CALL( vecmat = cvGetMat( vecarr[0], &vecstub0 ));
2509 srctype = CV_MAT_TYPE(vecmat->type);
2510 if( flags & CV_COVAR_COLS )
2512 count = vecmat->cols;
2513 if( avg->cols != 1 || avg->rows != vecmat->rows )
2514 CV_ERROR( CV_StsUnmatchedSizes,
2515 "The number of input vectors does not match to avg vector size" );
2517 vec_delta = CV_ELEM_SIZE(vecmat->type);
2518 vec_step = vecmat->step;
2522 count = vecmat->rows;
2523 if( avg->rows != 1 || avg->cols != vecmat->cols )
2524 CV_ERROR( CV_StsUnmatchedSizes,
2525 "The number of input vectors does not match to avg vector size" );
2526 vec_delta = vecmat->step;
2527 vec_step = CV_STUB_STEP;
2530 if( !(flags & CV_COVAR_USE_AVG) )
2531 CV_CALL( cvReduce( vecmat, avg, -1, CV_REDUCE_AVG ));
2533 scale = !(flags & CV_COVAR_SCALE) ? 1. : 1./count;
2535 cvMulTransposed( vecmat, cov, ((flags & CV_COVAR_ROWS)!=0) ^ ((flags & CV_COVAR_NORMAL)==0), avg, scale );
2539 scale = !(flags & CV_COVAR_SCALE) ? 1. : 1./count;
2541 if( is_covar_normal )
2544 CV_ERROR( CV_StsBadSize,
2545 "The number of vectors is zero or negative" );
2546 if( cov->rows != contsize.width )
2547 CV_ERROR( CV_StsUnmatchedSizes,
2548 "The size of input vectors does not match with the size of covariation matrix" );
2550 CV_CALL( tempvec = cvCreateMat( avg->rows, avg->cols, dsttype ));
2552 else if( count != cov->rows )
2553 CV_ERROR( CV_StsUnmatchedSizes,
2554 "The vector count and covariance matrix size do not match" );
2556 if( !(flags & (CV_COVAR_ROWS|CV_COVAR_COLS)) )
2558 if( !(flags & CV_COVAR_USE_AVG) )
2561 CV_CALL( vecdata = (vec_data*)cvAlloc( count*sizeof(vecdata[0])));
2563 for( i = 0; i < count; i++ )
2565 CvMat vecstub, *vec = (CvMat*)vecarr[i];
2568 if( !CV_IS_MAT(vec) )
2569 CV_CALL( vec = cvGetMat( vec, &vecstub ));
2571 if( !CV_ARE_SIZES_EQ( vec, avg ))
2572 CV_ERROR( CV_StsUnmatchedSizes,
2573 "All input vectors and average vector must have the same size" );
2575 vecdata[i].ptr = vec->data.ptr;
2576 vecdata[i].step = vec->step;
2577 cont_flag &= vec->type;
2581 srctype = CV_MAT_TYPE( vec->type );
2582 if( CV_MAT_CN( srctype ) != 1 )
2583 CV_ERROR( CV_BadNumChannels, "All vectors must have a single channel" );
2584 if( srctype != dsttype && !tempvec && !(flags & CV_COVAR_USE_AVG))
2585 CV_CALL( tempvec = cvCreateMat( vec->rows, vec->cols, dsttype ));
2587 else if( CV_MAT_TYPE(vec->type) != srctype )
2588 CV_ERROR( CV_StsUnmatchedFormats,
2589 "All input vectors must have the same type" );
2591 if( !(flags & CV_COVAR_USE_AVG) )
2596 cvConvert( vec, temp );
2598 cvAdd( temp, avg, avg );
2602 if( !(flags & CV_COVAR_USE_AVG) )
2603 cvScale( avg, avg, 1./count );
2606 cont_flag = CV_IS_MAT_CONT( cont_flag );
2610 if( !is_covar_normal )
2612 CvFunc2D_3A1P dot_func =
2613 (CvFunc2D_3A1P)dot_tab[dsttype == CV_64FC1].fn_2d[CV_MAT_DEPTH(srctype)];
2616 CV_ERROR( CV_StsUnsupportedFormat,
2617 "The format of input vectors is not supported" );
2619 for( i = 0; i < count; i++ )
2623 a = 0, b = i+1, delta = 1;
2625 a = i, b = -1, delta = -1;
2627 for( j = a; j != b; j += delta )
2635 v_i = vecdata[i].ptr;
2636 v_j = vecdata[j].ptr;
2637 step_i = vecdata[i].step;
2638 step_j = vecdata[j].step;
2642 v_i = vecmat->data.ptr + vec_delta*i;
2643 v_j = vecmat->data.ptr + vec_delta*j;
2644 step_i = step_j = vec_step;
2647 dot_func( v_i, step_i, v_j, step_j, avg->data.ptr, avg->step, srcsize, &result );
2649 if( dsttype == CV_64FC1 )
2651 ((double*)(cov->data.ptr + i*cov->step))[j] =
2652 ((double*)(cov->data.ptr + j*cov->step))[i] = result*scale;
2656 ((float*)(cov->data.ptr + i*cov->step))[j] =
2657 ((float*)(cov->data.ptr + j*cov->step))[i] = (float)(result*scale);
2664 uchar* cov_ptr = cov->data.ptr;
2665 int cov_step = cov->step;
2666 int cov_size = cov->rows;
2667 CvFunc2D_3A1P ext_func =
2668 (CvFunc2D_3A1P)ext_tab[dsttype == CV_64FC1].fn_2d[CV_MAT_DEPTH(srctype)];
2670 CV_ERROR( CV_StsUnsupportedFormat,
2671 "The format of input vectors is not supported" );
2675 for( i = 0; i < count; i++ )
2682 vstep = vecdata[i].step;
2686 v = vecmat->data.ptr + vec_delta*i;
2690 ext_func( v, vstep, avg->data.ptr, avg->step,
2691 cov_ptr, cov_step, srcsize, tempvec->data.ptr );
2694 if( dsttype == CV_64FC1 )
2695 for( i = 0; i < cov_size; i++ )
2696 for( j = 0; j <= i; j++ )
2698 double* cov1 = ((double*)(cov_ptr + i*cov_step)) + j;
2699 double* cov2 = ((double*)(cov_ptr + j*cov_step)) + i;
2701 if( flags & CV_COVAR_SCALE )
2702 *cov1 = *cov2 = *cov1*scale;
2707 for( i = 0; i < cov_size; i++ )
2708 for( j = 0; j <= i; j++ )
2710 float* cov1 = ((float*)(cov_ptr + i*cov_step)) + j;
2711 float* cov2 = ((float*)(cov_ptr + j*cov_step)) + i;
2713 if( flags & CV_COVAR_SCALE )
2714 *cov1 = *cov2 = (float)(*cov1*scale);
2723 cvReleaseMat( &tempvec );
2726 /****************************************************************************************\
2728 \****************************************************************************************/
2730 #define ICV_MAHALANOBIS( flavor, arrtype ) \
2731 static CvStatus CV_STDCALL \
2732 icvMahalanobis_##flavor##_C1R( const arrtype* mat, int matstep, \
2733 const arrtype* vec, int len, double* _result ) \
2736 double result = 0; \
2738 matstep /= sizeof(mat[0]); \
2739 for( i = 0; i < len; i++, mat += matstep ) \
2741 double row_sum = 0; \
2742 for( j = 0; j <= len - 4; j += 4 ) \
2743 row_sum += vec[j]*mat[j] + vec[j+1]*mat[j+1] + \
2744 vec[j+2]*mat[j+2] + vec[j+3]*mat[j+3]; \
2745 for( ; j < len; j++ ) \
2746 row_sum += vec[j]*mat[j]; \
2747 result += row_sum * vec[i]; \
2749 *_result = result; \
2754 ICV_MAHALANOBIS( 32f, float )
2755 ICV_MAHALANOBIS( 64f, double )
2757 static void icvInitMahalanobisTable( CvFuncTable* tab )
2759 tab->fn_2d[CV_32F] = (void*)icvMahalanobis_32f_C1R;
2760 tab->fn_2d[CV_64F] = (void*)icvMahalanobis_64f_C1R;
2763 typedef CvStatus (CV_STDCALL * CvMahalanobisFunc)( const void* mat, int matstep,
2764 const void* vec, int len, double* _result );
2767 cvMahalanobis( const CvArr* srcAarr, const CvArr* srcBarr, CvArr* matarr )
2769 static CvFuncTable mahal_tab;
2770 static int inittab = 0;
2772 int local_alloc = 0;
2775 CV_FUNCNAME( "cvMahalanobis" );
2779 int buf_size, elem_size, len;
2780 CvMat stubA, *srcA = (CvMat*)srcAarr;
2781 CvMat stubB, *srcB = (CvMat*)srcBarr;
2782 CvMat stub, *mat = (CvMat*)matarr;
2784 CvMahalanobisFunc func;
2788 icvInitMahalanobisTable( &mahal_tab );
2792 if( !CV_IS_MAT(srcA) )
2793 CV_CALL( srcA = cvGetMat( srcA, &stubA ));
2795 if( !CV_IS_MAT(srcB) )
2796 CV_CALL( srcB = cvGetMat( srcB, &stubB ));
2798 if( !CV_IS_MAT(mat) )
2799 CV_CALL( mat = cvGetMat( mat, &stub ));
2801 if( srcA->rows != 1 && srcA->cols != 1 )
2802 CV_ERROR( CV_StsBadSize, "Input matrices must be 1-d vectors" );
2804 len = srcA->rows + srcA->cols - 1;
2806 if( !CV_ARE_SIZES_EQ(srcA,srcB) )
2807 CV_ERROR( CV_StsUnmatchedSizes, "Input vectors have different sizes" );
2809 if( mat->rows != len || mat->cols != len )
2810 CV_ERROR( CV_StsUnmatchedSizes, "Input vectors and covariation matrix have different sizes" );
2812 func = (CvMahalanobisFunc)mahal_tab.fn_2d[CV_MAT_DEPTH(srcA->type)];
2814 if( CV_MAT_CN(srcA->type) > 1 || !func )
2815 CV_ERROR( CV_StsUnsupportedFormat,
2816 "Only single-channel floating-point vectors are supported" );
2818 if( !CV_ARE_TYPES_EQ(srcA,srcB) || !CV_ARE_TYPES_EQ(srcA,mat) )
2819 CV_ERROR( CV_StsUnmatchedSizes, "Input vectors have different sizes" );
2821 elem_size = CV_ELEM_SIZE(srcA->type);
2822 buf_size = len*elem_size;
2824 if( buf_size <= CV_MAX_LOCAL_SIZE )
2826 buffer = (uchar*)cvStackAlloc( buf_size );
2831 CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
2834 temp = cvMat( srcA->rows, srcA->cols, srcA->type, buffer );
2835 CV_CALL( cvSub( srcA, srcB, &temp ));
2837 IPPI_CALL( func( mat->data.ptr, mat->step, temp.data.ptr, len, &dist ));
2842 if( buffer && !local_alloc )
2849 /****************************************************************************************\
2851 \****************************************************************************************/
2853 #define ICV_DEF_MULTRANS_R_FUNC( flavor, srctype, dsttype, load_macro ) \
2854 static CvStatus CV_STDCALL \
2855 icvMulTransposedR_##flavor( const srctype* src, int srcstep, \
2856 dsttype* dst, int dststep, \
2857 const dsttype* delta, int deltastep, \
2858 CvSize size, int delta_cols, double scale ) \
2861 dsttype* tdst = dst; \
2862 dsttype* col_buf = 0; \
2863 dsttype* delta_buf = 0; \
2864 int local_alloc = 0; \
2865 int buf_size = size.height*sizeof(dsttype); \
2867 if( delta && delta_cols < size.width ) \
2869 assert( delta_cols == 1 ); \
2870 buf_size += 4*buf_size; \
2873 if( buf_size <= CV_MAX_LOCAL_SIZE ) \
2875 col_buf = (dsttype*)cvStackAlloc( buf_size ); \
2880 col_buf = (dsttype*)cvAlloc( buf_size ); \
2882 return CV_OUTOFMEM_ERR; \
2885 srcstep /= sizeof(src[0]); dststep /= sizeof(dst[0]); \
2886 deltastep /= sizeof(delta[0]); \
2888 if( delta && delta_cols < size.width ) \
2890 delta_buf = col_buf + size.height; \
2891 for( i = 0; i < size.height; i++ ) \
2892 delta_buf[i*4] = delta_buf[i*4+1] = \
2893 delta_buf[i*4+2] = delta_buf[i*4+3] = delta[i*deltastep]; \
2894 delta = delta_buf; \
2895 deltastep = deltastep ? 4 : 0; \
2899 for( i = 0; i < size.width; i++, tdst += dststep ) \
2901 for( k = 0; k < size.height; k++ ) \
2902 col_buf[k] = src[k*srcstep+i]; \
2904 for( j = i; j <= size.width - 4; j += 4 ) \
2906 double s0 = 0, s1 = 0, s2 = 0, s3 = 0; \
2907 const srctype *tsrc = src + j; \
2909 for( k = 0; k < size.height; k++, tsrc += srcstep ) \
2911 double a = col_buf[k]; \
2912 s0 += a * load_macro(tsrc[0]); \
2913 s1 += a * load_macro(tsrc[1]); \
2914 s2 += a * load_macro(tsrc[2]); \
2915 s3 += a * load_macro(tsrc[3]); \
2918 tdst[j] = (dsttype)(s0*scale); \
2919 tdst[j+1] = (dsttype)(s1*scale); \
2920 tdst[j+2] = (dsttype)(s2*scale); \
2921 tdst[j+3] = (dsttype)(s3*scale); \
2924 for( ; j < size.width; j++ ) \
2927 const srctype *tsrc = src + j; \
2929 for( k = 0; k < size.height; k++, tsrc += srcstep ) \
2930 s0 += col_buf[k] * tsrc[0]; \
2932 tdst[j] = (dsttype)(s0*scale); \
2936 for( i = 0; i < size.width; i++, tdst += dststep ) \
2939 for( k = 0; k < size.height; k++ ) \
2940 col_buf[k] = load_macro(src[k*srcstep+i]) - delta[k*deltastep+i]; \
2942 for( k = 0; k < size.height; k++ ) \
2943 col_buf[k] = load_macro(src[k*srcstep+i]) - delta_buf[k*deltastep]; \
2945 for( j = i; j <= size.width - 4; j += 4 ) \
2947 double s0 = 0, s1 = 0, s2 = 0, s3 = 0; \
2948 const srctype *tsrc = src + j; \
2949 const dsttype *d = delta_buf ? delta_buf : delta + j; \
2951 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep ) \
2953 double a = col_buf[k]; \
2954 s0 += a * (load_macro(tsrc[0]) - d[0]); \
2955 s1 += a * (load_macro(tsrc[1]) - d[1]); \
2956 s2 += a * (load_macro(tsrc[2]) - d[2]); \
2957 s3 += a * (load_macro(tsrc[3]) - d[3]); \
2960 tdst[j] = (dsttype)(s0*scale); \
2961 tdst[j+1] = (dsttype)(s1*scale); \
2962 tdst[j+2] = (dsttype)(s2*scale); \
2963 tdst[j+3] = (dsttype)(s3*scale); \
2966 for( ; j < size.width; j++ ) \
2969 const srctype *tsrc = src + j; \
2970 const dsttype *d = delta_buf ? delta_buf : delta + j; \
2972 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep ) \
2973 s0 += col_buf[k] * (load_macro(tsrc[0]) - d[0]); \
2975 tdst[j] = (dsttype)(s0*scale); \
2979 /* fill the lower part of the destination matrix */ \
2980 for( i = 1; i < size.width; i++ ) \
2981 for( j = 0; j < i; j++ ) \
2982 dst[dststep*i + j] = dst[dststep*j + i]; \
2984 if( col_buf && !local_alloc ) \
2985 cvFree( &col_buf ); \
2991 #define ICV_DEF_MULTRANS_L_FUNC( flavor, srctype, dsttype, load_macro ) \
2992 static CvStatus CV_STDCALL \
2993 icvMulTransposedL_##flavor( const srctype* src, int srcstep, \
2994 dsttype* dst, int dststep, \
2995 dsttype* delta, int deltastep, \
2996 CvSize size, int delta_cols, double scale ) \
2999 dsttype* tdst = dst; \
3001 srcstep /= sizeof(src[0]); dststep /= sizeof(dst[0]); \
3002 deltastep /= sizeof(delta[0]); \
3005 for( i = 0; i < size.height; i++, tdst += dststep ) \
3006 for( j = i; j < size.height; j++ ) \
3009 const srctype *tsrc1 = src + i*srcstep; \
3010 const srctype *tsrc2 = src + j*srcstep; \
3012 for( k = 0; k <= size.width - 4; k += 4 ) \
3013 s += tsrc1[k]*tsrc2[k] + tsrc1[k+1]*tsrc2[k+1] + \
3014 tsrc1[k+2]*tsrc2[k+2] + tsrc1[k+3]*tsrc2[k+3]; \
3015 for( ; k < size.width; k++ ) \
3016 s += tsrc1[k] * tsrc2[k]; \
3017 tdst[j] = (dsttype)(s*scale); \
3021 dsttype* row_buf = 0; \
3022 int local_alloc = 0; \
3023 int buf_size = size.width*sizeof(dsttype); \
3024 dsttype delta_buf[4]; \
3025 int delta_shift = delta_cols == size.width ? 4 : 0; \
3027 if( buf_size <= CV_MAX_LOCAL_SIZE ) \
3029 row_buf = (dsttype*)cvStackAlloc( buf_size ); \
3034 row_buf = (dsttype*)cvAlloc( buf_size ); \
3036 return CV_OUTOFMEM_ERR; \
3039 for( i = 0; i < size.height; i++, tdst += dststep ) \
3041 const srctype *tsrc1 = src + i*srcstep; \
3042 const dsttype *tdelta1 = delta + i*deltastep; \
3044 if( delta_cols < size.width ) \
3045 for( k = 0; k < size.width; k++ ) \
3046 row_buf[k] = tsrc1[k] - tdelta1[0]; \
3048 for( k = 0; k < size.width; k++ ) \
3049 row_buf[k] = tsrc1[k] - tdelta1[k]; \
3051 for( j = i; j < size.height; j++ ) \
3054 const srctype *tsrc2 = src + j*srcstep; \
3055 const dsttype *tdelta2 = delta + j*deltastep; \
3056 if( delta_cols < size.width ) \
3058 delta_buf[0] = delta_buf[1] = \
3059 delta_buf[2] = delta_buf[3] = tdelta2[0]; \
3060 tdelta2 = delta_buf; \
3062 for( k = 0; k <= size.width-4; k += 4, tdelta2 += delta_shift ) \
3063 s += row_buf[k]*(load_macro(tsrc2[k]) - tdelta2[0]) + \
3064 row_buf[k+1]*(load_macro(tsrc2[k+1]) - tdelta2[1]) + \
3065 row_buf[k+2]*(load_macro(tsrc2[k+2]) - tdelta2[2]) + \
3066 row_buf[k+3]*(load_macro(tsrc2[k+3]) - tdelta2[3]); \
3067 for( ; k < size.width; k++, tdelta2++ ) \
3068 s += row_buf[k]*(load_macro(tsrc2[k]) - tdelta2[0]); \
3069 tdst[j] = (dsttype)(s*scale); \
3073 if( row_buf && !local_alloc ) \
3074 cvFree( &row_buf ); \
3077 /* fill the lower part of the destination matrix */ \
3078 for( j = 0; j < size.height - 1; j++ ) \
3079 for( i = j; i < size.height; i++ ) \
3080 dst[dststep*i + j] = dst[dststep*j + i]; \
3086 ICV_DEF_MULTRANS_R_FUNC( 8u32f, uchar, float, CV_8TO32F )
3087 ICV_DEF_MULTRANS_R_FUNC( 8u64f, uchar, double, CV_8TO32F )
3088 ICV_DEF_MULTRANS_R_FUNC( 32f, float, float, CV_NOP )
3089 ICV_DEF_MULTRANS_R_FUNC( 32f64f, float, double, CV_NOP )
3090 ICV_DEF_MULTRANS_R_FUNC( 64f, double, double, CV_NOP )
3091 ICV_DEF_MULTRANS_R_FUNC( 16u32f, ushort, float, CV_NOP )
3092 ICV_DEF_MULTRANS_R_FUNC( 16u64f, ushort, double, CV_NOP )
3093 ICV_DEF_MULTRANS_R_FUNC( 16s32f, short, float, CV_NOP )
3094 ICV_DEF_MULTRANS_R_FUNC( 16s64f, short, double, CV_NOP )
3096 ICV_DEF_MULTRANS_L_FUNC( 8u32f, uchar, float, CV_8TO32F )
3097 ICV_DEF_MULTRANS_L_FUNC( 8u64f, uchar, double, CV_8TO32F )
3098 ICV_DEF_MULTRANS_L_FUNC( 32f, float, float, CV_NOP )
3099 ICV_DEF_MULTRANS_L_FUNC( 32f64f, float, double, CV_NOP )
3100 ICV_DEF_MULTRANS_L_FUNC( 64f, double, double, CV_NOP )
3101 ICV_DEF_MULTRANS_L_FUNC( 16u32f, ushort, float, CV_NOP )
3102 ICV_DEF_MULTRANS_L_FUNC( 16u64f, ushort, double, CV_NOP )
3103 ICV_DEF_MULTRANS_L_FUNC( 16s32f, short, float, CV_NOP )
3104 ICV_DEF_MULTRANS_L_FUNC( 16s64f, short, double, CV_NOP )
3107 typedef CvStatus (CV_STDCALL * CvMulTransposedFunc)
3108 ( const void* src, int srcstep,
3109 void* dst, int dststep, const void* delta,
3110 int deltastep, CvSize size, int delta_cols, double scale );
3113 cvMulTransposed( const CvArr* srcarr, CvArr* dstarr,
3114 int order, const CvArr* deltaarr, double scale )
3116 const int gemm_level = 100; // boundary above which GEMM is faster.
3119 CV_FUNCNAME( "cvMulTransposed" );
3123 CvMat sstub, *src = (CvMat*)srcarr;
3124 CvMat dstub, *dst = (CvMat*)dstarr;
3125 CvMat deltastub, *delta = (CvMat*)deltaarr;
3128 if( !CV_IS_MAT( src ))
3129 CV_CALL( src = cvGetMat( src, &sstub ));
3131 if( !CV_IS_MAT( dst ))
3132 CV_CALL( dst = cvGetMat( dst, &dstub ));
3136 if( !CV_IS_MAT( delta ))
3137 CV_CALL( delta = cvGetMat( delta, &deltastub ));
3139 if( !CV_ARE_TYPES_EQ( dst, delta ))
3140 CV_ERROR( CV_StsUnmatchedFormats, "" );
3142 if( (delta->rows != src->rows && delta->rows != 1) ||
3143 (delta->cols != src->cols && delta->cols != 1) )
3144 CV_ERROR( CV_StsUnmatchedSizes, "" );
3149 delta->data.ptr = 0;
3151 delta->rows = delta->cols = 0;
3154 stype = CV_MAT_TYPE( src->type );
3155 dtype = CV_MAT_TYPE( dst->type );
3157 if( dst->rows != dst->cols )
3158 CV_ERROR( CV_StsBadSize, "The destination matrix must be square" );
3160 if( (order != 0 && src->cols != dst->cols) ||
3161 (order == 0 && src->rows != dst->rows))
3162 CV_ERROR( CV_StsUnmatchedSizes, "" );
3164 if( src->data.ptr == dst->data.ptr || stype == dtype &&
3165 (dst->cols >= gemm_level && dst->rows >= gemm_level &&
3166 src->cols >= gemm_level && src->rows >= gemm_level))
3170 CV_CALL( src2 = cvCreateMat( src->rows, src->cols, src->type ));
3171 cvRepeat( delta, src2 );
3172 cvSub( src, src2, src2 );
3175 cvGEMM( src, src, scale, 0, 0, dst, order == 0 ? CV_GEMM_B_T : CV_GEMM_A_T );
3179 CvMulTransposedFunc func =
3180 stype == CV_8U && dtype == CV_32F ?
3181 (order ? (CvMulTransposedFunc)icvMulTransposedR_8u32f :
3182 (CvMulTransposedFunc)icvMulTransposedL_8u32f) :
3183 stype == CV_8U && dtype == CV_64F ?
3184 (order ? (CvMulTransposedFunc)icvMulTransposedR_8u64f :
3185 (CvMulTransposedFunc)icvMulTransposedL_8u64f) :
3186 stype == CV_16U && dtype == CV_32F ?
3187 (order ? (CvMulTransposedFunc)icvMulTransposedR_16u32f :
3188 (CvMulTransposedFunc)icvMulTransposedL_16u32f) :
3189 stype == CV_16U && dtype == CV_64F ?
3190 (order ? (CvMulTransposedFunc)icvMulTransposedR_16u64f :
3191 (CvMulTransposedFunc)icvMulTransposedL_16u64f) :
3192 stype == CV_16S && dtype == CV_32F ?
3193 (order ? (CvMulTransposedFunc)icvMulTransposedR_16s32f :
3194 (CvMulTransposedFunc)icvMulTransposedL_16s32f) :
3195 stype == CV_16S && dtype == CV_64F ?
3196 (order ? (CvMulTransposedFunc)icvMulTransposedR_16s64f :
3197 (CvMulTransposedFunc)icvMulTransposedL_16s64f) :
3198 stype == CV_32F && dtype == CV_32F ?
3199 (order ? (CvMulTransposedFunc)icvMulTransposedR_32f :
3200 (CvMulTransposedFunc)icvMulTransposedL_32f) :
3201 stype == CV_32F && dtype == CV_64F ?
3202 (order ? (CvMulTransposedFunc)icvMulTransposedR_32f64f :
3203 (CvMulTransposedFunc)icvMulTransposedL_32f64f) :
3204 stype == CV_64F && dtype == CV_64F ?
3205 (order ? (CvMulTransposedFunc)icvMulTransposedR_64f :
3206 (CvMulTransposedFunc)icvMulTransposedL_64f) : 0;
3209 CV_ERROR( CV_StsUnsupportedFormat, "" );
3211 IPPI_CALL( func( src->data.ptr, src->step, dst->data.ptr, dst->step,
3212 delta->data.ptr, delta->step, cvGetMatSize( src ),
3213 delta->cols, scale ));
3219 cvReleaseMat( &src2 );
3223 /****************************************************************************************\
3225 \****************************************************************************************/
3227 #define ICV_DEF_DOT_PROD_FUNC_2D( flavor, arrtype, temptype, sumtype ) \
3228 static CvStatus CV_STDCALL \
3229 icvDotProduct_##flavor##_C1R( const arrtype* src1, int step1, \
3230 const arrtype* src2, int step2, \
3231 CvSize size, sumtype* _sum ) \
3234 step1 /= sizeof(src1[0]); step2 /= sizeof(src2[0]); \
3236 for( ; size.height--; src1 += step1, src2 += step2 ) \
3240 for( i = 0; i <= size.width - 4; i += 4 ) \
3242 temptype t0 = (temptype)src1[i]*src2[i]; \
3243 temptype t1 = (temptype)src1[i+1]*src2[i+1]; \
3244 t0 += (temptype)src1[i+2]*src2[i+2]; \
3245 t1 += (temptype)src1[i+3]*src2[i+3]; \
3249 for( ; i < size.width; i++ ) \
3251 sum += (temptype)src1[i]*src2[i]; \
3260 ICV_DEF_DOT_PROD_FUNC_2D( 8u, uchar, int, int64 )
3261 ICV_DEF_DOT_PROD_FUNC_2D( 16u, ushort, int64, int64 )
3262 ICV_DEF_DOT_PROD_FUNC_2D( 16s, short, int64, int64 )
3263 ICV_DEF_DOT_PROD_FUNC_2D( 32s, int, double, double )
3264 ICV_DEF_DOT_PROD_FUNC_2D( 32f, float, double, double )
3265 ICV_DEF_DOT_PROD_FUNC_2D( 64f, double, double, double )
3267 #define icvDotProduct_8s_C1R 0
3269 CV_DEF_INIT_FUNC_TAB_2D( DotProduct, C1R )
3272 cvDotProduct( const CvArr* srcAarr, const CvArr* srcBarr )
3274 static CvFuncTable tab_2d;
3275 static int inittab = 0;
3280 CV_FUNCNAME( "cvDotProduct" );
3284 CvMat stubA, *srcA = (CvMat*)srcAarr;
3285 CvMat stubB, *srcB = (CvMat*)srcBarr;
3292 icvInitDotProductC1RTable( &tab_2d );
3296 if( !CV_IS_MAT( srcA ))
3299 CV_CALL( srcA = cvGetMat( srcA, &stubA, &coi ));
3301 CV_ERROR( CV_BadCOI, "coi is not supported" );
3304 if( srcBarr == srcAarr )
3308 if( !CV_IS_MAT( srcB ))
3311 CV_CALL( srcB = cvGetMat( srcB, &stubB, &coi ));
3314 CV_ERROR( CV_BadCOI, "coi is not supported" );
3317 if( !CV_ARE_TYPES_EQ( srcA, srcB ))
3318 CV_ERROR( CV_StsUnmatchedFormats, "" );
3320 if( !CV_ARE_SIZES_EQ( srcA, srcB ))
3321 CV_ERROR( CV_StsUnmatchedSizes, "" );
3324 type = CV_MAT_TYPE( srcA->type );
3325 size = cvGetMatSize( srcA );
3327 size.width *= CV_MAT_CN( type );
3328 depth = CV_MAT_DEPTH( type );
3330 if( CV_IS_MAT_CONT( srcA->type & srcB->type ))
3332 size.width *= size.height;
3334 if( size.width <= CV_MAX_INLINE_MAT_OP_SIZE )
3336 if( depth == CV_32F )
3338 float* mA = srcA->data.fl;
3339 float* mB = srcB->data.fl;
3342 sum += (double)mA[size.width - 1]*mB[size.width - 1];
3343 while( --size.width );
3348 if( depth == CV_64F )
3350 double* mA = srcA->data.db;
3351 double* mB = srcB->data.db;
3354 sum += mA[size.width - 1]*mB[size.width - 1];
3355 while( --size.width );
3363 func = (CvFunc2D_2A1P)(tab_2d.fn_2d[depth]);
3365 CV_ERROR( CV_StsUnsupportedFormat, "" );
3367 IPPI_CALL( func( srcA->data.ptr, srcA->step,
3368 srcB->data.ptr, srcB->step,
3371 if( depth < CV_32S )
3372 result.f = (double)result.i;