1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
44 /****************************************************************************************\
45 * Splitting/extracting array channels *
46 \****************************************************************************************/
48 #define ICV_DEF_PX2PL2PX_ENTRY_C2( arrtype_ptr, ptr ) \
49 arrtype_ptr plane0 = ptr[0]; \
50 arrtype_ptr plane1 = ptr[1];
52 #define ICV_DEF_PX2PL2PX_ENTRY_C3( arrtype_ptr, ptr ) \
53 arrtype_ptr plane0 = ptr[0]; \
54 arrtype_ptr plane1 = ptr[1]; \
55 arrtype_ptr plane2 = ptr[2];
57 #define ICV_DEF_PX2PL2PX_ENTRY_C4( arrtype_ptr, ptr ) \
58 arrtype_ptr plane0 = ptr[0]; \
59 arrtype_ptr plane1 = ptr[1]; \
60 arrtype_ptr plane2 = ptr[2]; \
61 arrtype_ptr plane3 = ptr[3];
64 #define ICV_DEF_PX2PL_C2( arrtype, len ) \
68 for( j = 0; j < (len); j++, (src) += 2 ) \
70 arrtype t0 = (src)[0]; \
71 arrtype t1 = (src)[1]; \
81 #define ICV_DEF_PX2PL_C3( arrtype, len ) \
85 for( j = 0; j < (len); j++, (src) += 3 ) \
87 arrtype t0 = (src)[0]; \
88 arrtype t1 = (src)[1]; \
89 arrtype t2 = (src)[2]; \
101 #define ICV_DEF_PX2PL_C4( arrtype, len ) \
105 for( j = 0; j < (len); j++, (src) += 4 ) \
107 arrtype t0 = (src)[0]; \
108 arrtype t1 = (src)[1]; \
126 #define ICV_DEF_PX2PL_COI( arrtype, len, cn ) \
130 for( j = 0; j <= (len) - 4; j += 4, (src) += 4*(cn))\
132 arrtype t0 = (src)[0]; \
133 arrtype t1 = (src)[(cn)]; \
138 t0 = (src)[(cn)*2]; \
139 t1 = (src)[(cn)*3]; \
145 for( ; j < (len); j++, (src) += (cn)) \
147 (dst)[j] = (src)[0]; \
152 #define ICV_DEF_COPY_PX2PL_FUNC_2D( arrtype, flavor, \
154 IPCVAPI_IMPL( CvStatus, icvCopy_##flavor##_C##cn##P##cn##R,\
155 ( const arrtype* src, int srcstep, \
156 arrtype** dst, int dststep, CvSize size ), \
157 (src, srcstep, dst, dststep, size)) \
159 entry_macro(arrtype*, dst); \
160 srcstep /= sizeof(src[0]); \
161 dststep /= sizeof(dst[0][0]); \
163 for( ; size.height--; src += srcstep ) \
165 ICV_DEF_PX2PL_C##cn( arrtype, size.width ); \
166 src -= size.width*(cn); \
173 #define ICV_DEF_COPY_PX2PL_FUNC_2D_COI( arrtype, flavor )\
174 IPCVAPI_IMPL( CvStatus, icvCopy_##flavor##_CnC1CR, \
175 ( const arrtype* src, int srcstep, arrtype* dst, int dststep,\
176 CvSize size, int cn, int coi ), \
177 (src, srcstep, dst, dststep, size, cn, coi)) \
180 srcstep /= sizeof(src[0]); \
181 dststep /= sizeof(dst[0]); \
183 for( ; size.height--; src += srcstep, dst += dststep )\
185 ICV_DEF_PX2PL_COI( arrtype, size.width, cn ); \
186 src -= size.width*(cn); \
193 ICV_DEF_COPY_PX2PL_FUNC_2D( uchar, 8u, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 )
194 ICV_DEF_COPY_PX2PL_FUNC_2D( uchar, 8u, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 )
195 ICV_DEF_COPY_PX2PL_FUNC_2D( uchar, 8u, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 )
196 ICV_DEF_COPY_PX2PL_FUNC_2D( ushort, 16s, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 )
197 ICV_DEF_COPY_PX2PL_FUNC_2D( ushort, 16s, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 )
198 ICV_DEF_COPY_PX2PL_FUNC_2D( ushort, 16s, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 )
199 ICV_DEF_COPY_PX2PL_FUNC_2D( int, 32f, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 )
200 ICV_DEF_COPY_PX2PL_FUNC_2D( int, 32f, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 )
201 ICV_DEF_COPY_PX2PL_FUNC_2D( int, 32f, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 )
202 ICV_DEF_COPY_PX2PL_FUNC_2D( int64, 64f, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 )
203 ICV_DEF_COPY_PX2PL_FUNC_2D( int64, 64f, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 )
204 ICV_DEF_COPY_PX2PL_FUNC_2D( int64, 64f, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 )
207 ICV_DEF_COPY_PX2PL_FUNC_2D_COI( uchar, 8u )
208 ICV_DEF_COPY_PX2PL_FUNC_2D_COI( ushort, 16s )
209 ICV_DEF_COPY_PX2PL_FUNC_2D_COI( int, 32f )
210 ICV_DEF_COPY_PX2PL_FUNC_2D_COI( int64, 64f )
213 /****************************************************************************************\
214 * Merging/inserting array channels *
215 \****************************************************************************************/
218 #define ICV_DEF_PL2PX_C2( arrtype, len ) \
222 for( j = 0; j < (len); j++, (dst) += 2 )\
224 arrtype t0 = plane0[j]; \
225 arrtype t1 = plane1[j]; \
235 #define ICV_DEF_PL2PX_C3( arrtype, len ) \
239 for( j = 0; j < (len); j++, (dst) += 3 )\
241 arrtype t0 = plane0[j]; \
242 arrtype t1 = plane1[j]; \
243 arrtype t2 = plane2[j]; \
255 #define ICV_DEF_PL2PX_C4( arrtype, len ) \
259 for( j = 0; j < (len); j++, (dst) += 4 )\
261 arrtype t0 = plane0[j]; \
262 arrtype t1 = plane1[j]; \
280 #define ICV_DEF_PL2PX_COI( arrtype, len, cn ) \
284 for( j = 0; j <= (len) - 4; j += 4, (dst) += 4*(cn))\
286 arrtype t0 = (src)[j]; \
287 arrtype t1 = (src)[j+1]; \
295 (dst)[(cn)*2] = t0; \
296 (dst)[(cn)*3] = t1; \
299 for( ; j < (len); j++, (dst) += (cn)) \
301 (dst)[0] = (src)[j]; \
306 #define ICV_DEF_COPY_PL2PX_FUNC_2D( arrtype, flavor, cn, entry_macro ) \
307 IPCVAPI_IMPL( CvStatus, icvCopy_##flavor##_P##cn##C##cn##R, \
308 ( const arrtype** src, int srcstep, \
309 arrtype* dst, int dststep, CvSize size ), \
310 (src, srcstep, dst, dststep, size)) \
312 entry_macro(const arrtype*, src); \
313 srcstep /= sizeof(src[0][0]); \
314 dststep /= sizeof(dst[0]); \
316 for( ; size.height--; dst += dststep ) \
318 ICV_DEF_PL2PX_C##cn( arrtype, size.width ); \
319 dst -= size.width*(cn); \
326 #define ICV_DEF_COPY_PL2PX_FUNC_2D_COI( arrtype, flavor ) \
327 IPCVAPI_IMPL( CvStatus, icvCopy_##flavor##_C1CnCR, \
328 ( const arrtype* src, int srcstep, \
329 arrtype* dst, int dststep, \
330 CvSize size, int cn, int coi ), \
331 (src, srcstep, dst, dststep, size, cn, coi)) \
334 srcstep /= sizeof(src[0]); dststep /= sizeof(dst[0]); \
336 for( ; size.height--; src += srcstep, dst += dststep ) \
338 ICV_DEF_PL2PX_COI( arrtype, size.width, cn ); \
339 dst -= size.width*(cn); \
346 ICV_DEF_COPY_PL2PX_FUNC_2D( uchar, 8u, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 )
347 ICV_DEF_COPY_PL2PX_FUNC_2D( uchar, 8u, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 )
348 ICV_DEF_COPY_PL2PX_FUNC_2D( uchar, 8u, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 )
349 ICV_DEF_COPY_PL2PX_FUNC_2D( ushort, 16s, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 )
350 ICV_DEF_COPY_PL2PX_FUNC_2D( ushort, 16s, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 )
351 ICV_DEF_COPY_PL2PX_FUNC_2D( ushort, 16s, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 )
352 ICV_DEF_COPY_PL2PX_FUNC_2D( int, 32f, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 )
353 ICV_DEF_COPY_PL2PX_FUNC_2D( int, 32f, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 )
354 ICV_DEF_COPY_PL2PX_FUNC_2D( int, 32f, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 )
355 ICV_DEF_COPY_PL2PX_FUNC_2D( int64, 64f, 2, ICV_DEF_PX2PL2PX_ENTRY_C2 )
356 ICV_DEF_COPY_PL2PX_FUNC_2D( int64, 64f, 3, ICV_DEF_PX2PL2PX_ENTRY_C3 )
357 ICV_DEF_COPY_PL2PX_FUNC_2D( int64, 64f, 4, ICV_DEF_PX2PL2PX_ENTRY_C4 )
359 ICV_DEF_COPY_PL2PX_FUNC_2D_COI( uchar, 8u )
360 ICV_DEF_COPY_PL2PX_FUNC_2D_COI( ushort, 16s )
361 ICV_DEF_COPY_PL2PX_FUNC_2D_COI( int, 32f )
362 ICV_DEF_COPY_PL2PX_FUNC_2D_COI( int64, 64f )
365 #define ICV_DEF_PXPLPX_TAB( name, FROM, TO ) \
367 name( CvBigFuncTable* tab ) \
369 tab->fn_2d[CV_8UC2] = (void*)icvCopy##_8u_##FROM##2##TO##2R; \
370 tab->fn_2d[CV_8UC3] = (void*)icvCopy##_8u_##FROM##3##TO##3R; \
371 tab->fn_2d[CV_8UC4] = (void*)icvCopy##_8u_##FROM##4##TO##4R; \
373 tab->fn_2d[CV_8SC2] = (void*)icvCopy##_8u_##FROM##2##TO##2R; \
374 tab->fn_2d[CV_8SC3] = (void*)icvCopy##_8u_##FROM##3##TO##3R; \
375 tab->fn_2d[CV_8SC4] = (void*)icvCopy##_8u_##FROM##4##TO##4R; \
377 tab->fn_2d[CV_16UC2] = (void*)icvCopy##_16s_##FROM##2##TO##2R; \
378 tab->fn_2d[CV_16UC3] = (void*)icvCopy##_16s_##FROM##3##TO##3R; \
379 tab->fn_2d[CV_16UC4] = (void*)icvCopy##_16s_##FROM##4##TO##4R; \
381 tab->fn_2d[CV_16SC2] = (void*)icvCopy##_16s_##FROM##2##TO##2R; \
382 tab->fn_2d[CV_16SC3] = (void*)icvCopy##_16s_##FROM##3##TO##3R; \
383 tab->fn_2d[CV_16SC4] = (void*)icvCopy##_16s_##FROM##4##TO##4R; \
385 tab->fn_2d[CV_32SC2] = (void*)icvCopy##_32f_##FROM##2##TO##2R; \
386 tab->fn_2d[CV_32SC3] = (void*)icvCopy##_32f_##FROM##3##TO##3R; \
387 tab->fn_2d[CV_32SC4] = (void*)icvCopy##_32f_##FROM##4##TO##4R; \
389 tab->fn_2d[CV_32FC2] = (void*)icvCopy##_32f_##FROM##2##TO##2R; \
390 tab->fn_2d[CV_32FC3] = (void*)icvCopy##_32f_##FROM##3##TO##3R; \
391 tab->fn_2d[CV_32FC4] = (void*)icvCopy##_32f_##FROM##4##TO##4R; \
393 tab->fn_2d[CV_64FC2] = (void*)icvCopy##_64f_##FROM##2##TO##2R; \
394 tab->fn_2d[CV_64FC3] = (void*)icvCopy##_64f_##FROM##3##TO##3R; \
395 tab->fn_2d[CV_64FC4] = (void*)icvCopy##_64f_##FROM##4##TO##4R; \
400 #define ICV_DEF_PXPLCOI_TAB( name, FROM, TO ) \
402 name( CvFuncTable* tab ) \
404 tab->fn_2d[CV_8U] = (void*)icvCopy##_8u_##FROM##TO##CR; \
405 tab->fn_2d[CV_8S] = (void*)icvCopy##_8u_##FROM##TO##CR; \
406 tab->fn_2d[CV_16U] = (void*)icvCopy##_16s_##FROM##TO##CR; \
407 tab->fn_2d[CV_16S] = (void*)icvCopy##_16s_##FROM##TO##CR; \
408 tab->fn_2d[CV_32S] = (void*)icvCopy##_32f_##FROM##TO##CR; \
409 tab->fn_2d[CV_32F] = (void*)icvCopy##_32f_##FROM##TO##CR; \
410 tab->fn_2d[CV_64F] = (void*)icvCopy##_64f_##FROM##TO##CR; \
414 ICV_DEF_PXPLPX_TAB( icvInitSplitRTable, C, P )
415 ICV_DEF_PXPLCOI_TAB( icvInitSplitRCoiTable, Cn, C1 )
416 ICV_DEF_PXPLPX_TAB( icvInitCvtPlaneToPixRTable, P, C )
417 ICV_DEF_PXPLCOI_TAB( icvInitCvtPlaneToPixRCoiTable, C1, Cn )
419 typedef CvStatus (CV_STDCALL *CvSplitFunc)( const void* src, int srcstep,
420 void** dst, int dststep, CvSize size);
422 typedef CvStatus (CV_STDCALL *CvExtractPlaneFunc)( const void* src, int srcstep,
423 void* dst, int dststep,
424 CvSize size, int cn, int coi );
426 typedef CvStatus (CV_STDCALL *CvMergeFunc)( const void** src, int srcstep,
427 void* dst, int dststep, CvSize size);
429 typedef CvStatus (CV_STDCALL *CvInsertPlaneFunc)( const void* src, int srcstep,
430 void* dst, int dststep,
431 CvSize size, int cn, int coi );
434 cvSplit( const void* srcarr, void* dstarr0, void* dstarr1, void* dstarr2, void* dstarr3 )
436 static CvBigFuncTable pxpl_tab;
437 static CvFuncTable pxplcoi_tab;
438 static int inittab = 0;
440 CV_FUNCNAME( "cvSplit" );
444 CvMat stub[5], *dst[4], *src = (CvMat*)srcarr;
446 void* dstptr[4] = { 0, 0, 0, 0 };
447 int type, cn, coi = 0;
448 int i, nzplanes = 0, nzidx = -1;
450 int src_step, dst_step = 0;
454 icvInitSplitRTable( &pxpl_tab );
455 icvInitSplitRCoiTable( &pxplcoi_tab );
459 dst[0] = (CvMat*)dstarr0;
460 dst[1] = (CvMat*)dstarr1;
461 dst[2] = (CvMat*)dstarr2;
462 dst[3] = (CvMat*)dstarr3;
464 CV_CALL( src = cvGetMat( src, stub + 4, &coi ));
467 // CV_ERROR( CV_BadCOI, "" );
469 type = CV_MAT_TYPE( src->type );
470 cn = CV_MAT_CN( type );
472 cont_flag = src->type;
475 CV_ERROR( CV_BadNumChannels, "" );
477 for( i = 0; i < 4; i++ )
483 CV_CALL( dst[i] = cvGetMat( dst[i], stub + i ));
484 if( CV_MAT_CN( dst[i]->type ) != 1 )
485 CV_ERROR( CV_BadNumChannels, "" );
486 if( !CV_ARE_DEPTHS_EQ( dst[i], src ))
487 CV_ERROR( CV_StsUnmatchedFormats, "" );
488 if( !CV_ARE_SIZES_EQ( dst[i], src ))
489 CV_ERROR( CV_StsUnmatchedSizes, "" );
490 if( nzplanes > i && i > 0 && dst[i]->step != dst[i-1]->step )
491 CV_ERROR( CV_BadStep, "" );
492 dst_step = dst[i]->step;
493 dstptr[nzplanes-1] = dst[i]->data.ptr;
495 cont_flag &= dst[i]->type;
499 src_step = src->step;
500 size = cvGetMatSize( src );
502 if( CV_IS_MAT_CONT( cont_flag ))
504 size.width *= size.height;
505 src_step = dst_step = CV_STUB_STEP;
512 CvSplitFunc func = (CvSplitFunc)pxpl_tab.fn_2d[type];
515 CV_ERROR( CV_StsUnsupportedFormat, "" );
517 IPPI_CALL( func( src->data.ptr, src_step, dstptr, dst_step, size ));
519 else if( nzplanes == 1 )
521 CvExtractPlaneFunc func = (CvExtractPlaneFunc)pxplcoi_tab.fn_2d[CV_MAT_DEPTH(type)];
524 CV_ERROR( CV_StsUnsupportedFormat, "" );
526 IPPI_CALL( func( src->data.ptr, src_step,
527 dst[nzidx]->data.ptr, dst_step,
528 size, cn, nzidx + 1 ));
532 CV_ERROR( CV_StsBadArg,
533 "Either all output planes or only one output plane should be non zero" );
542 cvMerge( const void* srcarr0, const void* srcarr1, const void* srcarr2,
543 const void* srcarr3, void* dstarr )
545 static CvBigFuncTable plpx_tab;
546 static CvFuncTable plpxcoi_tab;
547 static int inittab = 0;
549 CV_FUNCNAME( "cvMerge" );
553 int src_step = 0, dst_step;
554 CvMat stub[5], *src[4], *dst = (CvMat*)dstarr;
556 const void* srcptr[4] = { 0, 0, 0, 0 };
557 int type, cn, coi = 0;
558 int i, nzplanes = 0, nzidx = -1;
563 icvInitCvtPlaneToPixRTable( &plpx_tab );
564 icvInitCvtPlaneToPixRCoiTable( &plpxcoi_tab );
568 src[0] = (CvMat*)srcarr0;
569 src[1] = (CvMat*)srcarr1;
570 src[2] = (CvMat*)srcarr2;
571 src[3] = (CvMat*)srcarr3;
573 CV_CALL( dst = cvGetMat( dst, stub + 4, &coi ));
575 type = CV_MAT_TYPE( dst->type );
576 cn = CV_MAT_CN( type );
578 cont_flag = dst->type;
581 CV_ERROR( CV_BadNumChannels, "" );
583 for( i = 0; i < 4; i++ )
589 CV_CALL( src[i] = cvGetMat( src[i], stub + i ));
590 if( CV_MAT_CN( src[i]->type ) != 1 )
591 CV_ERROR( CV_BadNumChannels, "" );
592 if( !CV_ARE_DEPTHS_EQ( src[i], dst ))
593 CV_ERROR( CV_StsUnmatchedFormats, "" );
594 if( !CV_ARE_SIZES_EQ( src[i], dst ))
595 CV_ERROR( CV_StsUnmatchedSizes, "" );
596 if( nzplanes > i && i > 0 && src[i]->step != src[i-1]->step )
597 CV_ERROR( CV_BadStep, "" );
598 src_step = src[i]->step;
599 srcptr[nzplanes-1] = (const void*)(src[i]->data.ptr);
601 cont_flag &= src[i]->type;
605 size = cvGetMatSize( dst );
606 dst_step = dst->step;
608 if( CV_IS_MAT_CONT( cont_flag ))
610 size.width *= size.height;
611 src_step = dst_step = CV_STUB_STEP;
617 CvMergeFunc func = (CvMergeFunc)plpx_tab.fn_2d[type];
620 CV_ERROR( CV_StsUnsupportedFormat, "" );
622 IPPI_CALL( func( srcptr, src_step, dst->data.ptr, dst_step, size ));
624 else if( nzplanes == 1 )
626 CvInsertPlaneFunc func = (CvInsertPlaneFunc)plpxcoi_tab.fn_2d[CV_MAT_DEPTH(type)];
629 CV_ERROR( CV_StsUnsupportedFormat, "" );
631 IPPI_CALL( func( src[nzidx]->data.ptr, src_step,
632 dst->data.ptr, dst_step,
633 size, cn, nzidx + 1 ));
637 CV_ERROR( CV_StsBadArg,
638 "Either all input planes or only one input plane should be non zero" );
645 /****************************************************************************************\
646 * Generalized split/merge: mixing channels *
647 \****************************************************************************************/
649 #define ICV_DEF_MIX_CH_FUNC_2D( arrtype, flavor ) \
650 static CvStatus CV_STDCALL \
651 icvMixChannels_##flavor( const arrtype** src, int* sdelta0, \
652 int* sdelta1, arrtype** dst, \
653 int* ddelta0, int* ddelta1, \
654 int n, CvSize size ) \
657 int block_size0 = n == 1 ? size.width : 1024; \
659 for( ; size.height--; ) \
661 int remaining = size.width; \
662 for( ; remaining > 0; ) \
664 int block_size = MIN( remaining, block_size0 ); \
665 for( k = 0; k < n; k++ ) \
667 const arrtype* s = src[k]; \
668 arrtype* d = dst[k]; \
669 int ds = sdelta1[k], dd = ddelta1[k]; \
672 for( i = 0; i <= block_size - 2; i += 2, \
673 s += ds*2, d += dd*2 ) \
675 arrtype t0 = s[0], t1 = s[ds]; \
676 d[0] = t0; d[dd] = t1; \
678 if( i < block_size ) \
679 d[0] = s[0], s += ds, d += dd; \
684 for( i=0; i <= block_size-2; i+=2, d+=dd*2 )\
686 if( i < block_size ) \
691 remaining -= block_size; \
693 for( k = 0; k < n; k++ ) \
694 src[k] += sdelta0[k], dst[k] += ddelta0[k]; \
701 ICV_DEF_MIX_CH_FUNC_2D( uchar, 8u )
702 ICV_DEF_MIX_CH_FUNC_2D( ushort, 16u )
703 ICV_DEF_MIX_CH_FUNC_2D( int, 32s )
704 ICV_DEF_MIX_CH_FUNC_2D( int64, 64s )
707 icvInitMixChannelsTab( CvFuncTable* tab )
709 tab->fn_2d[CV_8U] = (void*)icvMixChannels_8u;
710 tab->fn_2d[CV_8S] = (void*)icvMixChannels_8u;
711 tab->fn_2d[CV_16U] = (void*)icvMixChannels_16u;
712 tab->fn_2d[CV_16S] = (void*)icvMixChannels_16u;
713 tab->fn_2d[CV_32S] = (void*)icvMixChannels_32s;
714 tab->fn_2d[CV_32F] = (void*)icvMixChannels_32s;
715 tab->fn_2d[CV_64F] = (void*)icvMixChannels_64s;
718 typedef CvStatus (CV_STDCALL * CvMixChannelsFunc)( const void** src, int* sdelta0,
719 int* sdelta1, void** dst, int* ddelta0, int* ddelta1, int n, CvSize size );
722 cvMixChannels( const CvArr** src, int src_count,
723 CvArr** dst, int dst_count,
724 const int* from_to, int pair_count )
726 static CvFuncTable mixcn_tab;
727 static int inittab = 0;
731 CV_FUNCNAME( "cvMixChannels" );
736 int depth = -1, elem_size = 1;
737 int *sdelta0 = 0, *sdelta1 = 0, *ddelta0 = 0, *ddelta1 = 0;
738 uchar **sptr = 0, **dptr = 0;
739 uchar **src0 = 0, **dst0 = 0;
740 int* src_cn = 0, *dst_cn = 0;
741 int* src_step = 0, *dst_step = 0;
743 int cont_flag = CV_MAT_CONT_FLAG;
744 CvMixChannelsFunc func;
748 icvInitMixChannelsTab( &mixcn_tab );
752 src_count = MAX( src_count, 0 );
754 if( !src && src_count > 0 )
755 CV_ERROR( CV_StsNullPtr, "The input array of arrays is NULL" );
758 CV_ERROR( CV_StsNullPtr, "The output array of arrays is NULL" );
760 if( dst_count <= 0 || pair_count <= 0 )
761 CV_ERROR( CV_StsOutOfRange,
762 "The number of output arrays and the number of copied channels must be positive" );
765 CV_ERROR( CV_StsNullPtr, "The array of copied channel indices is NULL" );
767 buf_size = (src_count + dst_count + 2)*
768 (sizeof(src0[0]) + sizeof(src_cn[0]) + sizeof(src_step[0])) +
769 pair_count*2*(sizeof(sptr[0]) + sizeof(sdelta0[0]) + sizeof(sdelta1[0]));
771 if( buf_size > CV_MAX_LOCAL_SIZE )
773 CV_CALL( buffer = (uchar*)cvAlloc( buf_size ) );
777 buffer = (uchar*)cvStackAlloc( buf_size );
779 src0 = (uchar**)buffer;
780 dst0 = src0 + src_count;
781 src_cn = (int*)(dst0 + dst_count);
782 dst_cn = src_cn + src_count + 1;
783 src_step = dst_cn + dst_count + 1;
784 dst_step = src_step + src_count;
786 sptr = (uchar**)cvAlignPtr( dst_step + dst_count, (int)sizeof(void*) );
787 dptr = sptr + pair_count;
788 sdelta0 = (int*)(dptr + pair_count);
789 sdelta1 = sdelta0 + pair_count;
790 ddelta0 = sdelta1 + pair_count;
791 ddelta1 = ddelta0 + pair_count;
793 src_cn[0] = dst_cn[0] = 0;
795 for( k = 0; k < 2; k++ )
797 for( i = 0; i < (k == 0 ? src_count : dst_count); i++ )
799 CvMat stub, *mat = (CvMat*)(k == 0 ? src[i] : dst[i]);
802 if( !CV_IS_MAT(mat) )
803 CV_CALL( mat = cvGetMat( mat, &stub ));
807 depth = CV_MAT_DEPTH(mat->type);
808 elem_size = CV_ELEM_SIZE1(depth);
809 size = cvGetMatSize(mat);
812 if( CV_MAT_DEPTH(mat->type) != depth )
813 CV_ERROR( CV_StsUnmatchedFormats, "All the arrays must have the same bit depth" );
815 if( mat->cols != size.width || mat->rows != size.height )
816 CV_ERROR( CV_StsUnmatchedSizes, "All the arrays must have the same size" );
820 src0[i] = mat->data.ptr;
821 cn = CV_MAT_CN(mat->type);
822 src_cn[i+1] = src_cn[i] + cn;
823 src_step[i] = mat->step / elem_size - size.width * cn;
827 dst0[i] = mat->data.ptr;
828 cn = CV_MAT_CN(mat->type);
829 dst_cn[i+1] = dst_cn[i] + cn;
830 dst_step[i] = mat->step / elem_size - size.width * cn;
833 cont_flag &= mat->type;
839 size.width *= size.height;
843 for( i = 0; i < pair_count; i++ )
845 for( k = 0; k < 2; k++ )
847 int cn = from_to[i*2 + k];
848 const int* cn_arr = k == 0 ? src_cn : dst_cn;
849 int a = 0, b = k == 0 ? src_count-1 : dst_count-1;
851 if( cn < 0 || cn >= cn_arr[b+1] )
853 if( k == 0 && cn < 0 )
856 sdelta0[i] = sdelta1[i] = 0;
862 sprintf( err_str, "channel index #%d in the array of pairs is negative "
863 "or exceeds the total number of channels in all the %s arrays", i*2+k,
864 k == 0 ? "input" : "output" );
865 CV_ERROR( CV_StsOutOfRange, err_str );
869 for( ; cn >= cn_arr[a+1]; a++ )
874 sptr[i] = src0[a] + (cn - cn_arr[a])*elem_size;
875 sdelta1[i] = cn_arr[a+1] - cn_arr[a];
876 sdelta0[i] = src_step[a];
880 dptr[i] = dst0[a] + (cn - cn_arr[a])*elem_size;
881 ddelta1[i] = cn_arr[a+1] - cn_arr[a];
882 ddelta0[i] = dst_step[a];
887 func = (CvMixChannelsFunc)mixcn_tab.fn_2d[depth];
889 CV_ERROR( CV_StsUnsupportedFormat, "The data type is not supported by the function" );
891 IPPI_CALL( func( (const void**)sptr, sdelta0, sdelta1, (void**)dptr,
892 ddelta0, ddelta1, pair_count, size ));
896 if( buffer && heap_alloc )
901 /****************************************************************************************\
902 * cvConvertScaleAbs *
903 \****************************************************************************************/
905 #define ICV_DEF_CVT_SCALE_ABS_CASE( srctype, worktype, \
906 scale_macro, abs_macro, cast_macro, a, b ) \
909 const srctype* _src = (const srctype*)src; \
910 srcstep /= sizeof(_src[0]); /*dststep /= sizeof(_dst[0]);*/ \
912 for( ; size.height--; _src += srcstep, dst += dststep ) \
916 for( i = 0; i <= size.width - 4; i += 4 ) \
918 worktype t0 = scale_macro((a)*_src[i] + (b)); \
919 worktype t1 = scale_macro((a)*_src[i+1] + (b)); \
921 t0 = (worktype)abs_macro(t0); \
922 t1 = (worktype)abs_macro(t1); \
924 dst[i] = cast_macro(t0); \
925 dst[i+1] = cast_macro(t1); \
927 t0 = scale_macro((a)*_src[i+2] + (b)); \
928 t1 = scale_macro((a)*_src[i+3] + (b)); \
930 t0 = (worktype)abs_macro(t0); \
931 t1 = (worktype)abs_macro(t1); \
933 dst[i+2] = cast_macro(t0); \
934 dst[i+3] = cast_macro(t1); \
937 for( ; i < size.width; i++ ) \
939 worktype t0 = scale_macro((a)*_src[i] + (b)); \
940 t0 = (worktype)abs_macro(t0); \
941 dst[i] = cast_macro(t0); \
947 #define ICV_FIX_SHIFT 15
948 #define ICV_SCALE(x) (((x) + (1 << (ICV_FIX_SHIFT-1))) >> ICV_FIX_SHIFT)
950 static CvStatus CV_STDCALL
951 icvCvtScaleAbsTo_8u_C1R( const uchar* src, int srcstep,
952 uchar* dst, int dststep,
953 CvSize size, double scale, double shift,
957 int srcdepth = CV_MAT_DEPTH(srctype);
959 size.width *= CV_MAT_CN(srctype);
970 for( i = 0; i < 128; i++, val += scale )
972 int t = cvRound(fabs(val));
973 lut[i] = CV_CAST_8U(t);
976 if( srcdepth == CV_8S )
979 for( ; i < 256; i++, val += scale )
981 int t = cvRound(fabs(val));
982 lut[i] = CV_CAST_8U(t);
985 icvLUT_Transform8u_8u_C1R( src, srcstep, dst,
986 dststep, size, lut );
990 if( fabs( scale ) <= 1. && fabs(shift) < DBL_EPSILON )
992 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT));
994 if( iscale == ICV_FIX_SHIFT )
996 ICV_DEF_CVT_SCALE_ABS_CASE( ushort, int, CV_NOP, CV_IABS,
1001 ICV_DEF_CVT_SCALE_ABS_CASE( ushort, int, ICV_SCALE, CV_IABS,
1002 CV_CAST_8U, iscale, 0 );
1007 ICV_DEF_CVT_SCALE_ABS_CASE( ushort, int, cvRound, CV_IABS,
1008 CV_CAST_8U, scale, shift );
1012 if( fabs( scale ) <= 1. &&
1013 fabs( shift ) <= (INT_MAX*0.5)/(1 << ICV_FIX_SHIFT))
1015 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT));
1016 int ishift = cvRound(shift*(1 << ICV_FIX_SHIFT));
1018 if( iscale == ICV_FIX_SHIFT && ishift == 0 )
1020 ICV_DEF_CVT_SCALE_ABS_CASE( short, int, CV_NOP, CV_IABS,
1025 ICV_DEF_CVT_SCALE_ABS_CASE( short, int, ICV_SCALE, CV_IABS,
1026 CV_CAST_8U, iscale, ishift );
1031 ICV_DEF_CVT_SCALE_ABS_CASE( short, int, cvRound, CV_IABS,
1032 CV_CAST_8U, scale, shift );
1036 ICV_DEF_CVT_SCALE_ABS_CASE( int, int, cvRound, CV_IABS,
1037 CV_CAST_8U, scale, shift );
1040 ICV_DEF_CVT_SCALE_ABS_CASE( float, int, cvRound, CV_IABS,
1041 CV_CAST_8U, scale, shift );
1044 ICV_DEF_CVT_SCALE_ABS_CASE( double, int, cvRound, CV_IABS,
1045 CV_CAST_8U, scale, shift );
1049 return CV_BADFLAG_ERR;
1057 cvConvertScaleAbs( const void* srcarr, void* dstarr,
1058 double scale, double shift )
1060 CV_FUNCNAME( "cvConvertScaleAbs" );
1064 int coi1 = 0, coi2 = 0;
1065 CvMat srcstub, *src = (CvMat*)srcarr;
1066 CvMat dststub, *dst = (CvMat*)dstarr;
1068 int src_step, dst_step;
1070 CV_CALL( src = cvGetMat( src, &srcstub, &coi1 ));
1071 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 ));
1073 if( coi1 != 0 || coi2 != 0 )
1074 CV_ERROR( CV_BadCOI, "" );
1076 if( !CV_ARE_SIZES_EQ( src, dst ))
1077 CV_ERROR( CV_StsUnmatchedSizes, "" );
1079 if( !CV_ARE_CNS_EQ( src, dst ))
1080 CV_ERROR( CV_StsUnmatchedFormats, "" );
1082 if( CV_MAT_DEPTH( dst->type ) != CV_8U )
1083 CV_ERROR( CV_StsUnsupportedFormat, "" );
1085 size = cvGetMatSize( src );
1086 src_step = src->step;
1087 dst_step = dst->step;
1089 if( CV_IS_MAT_CONT( src->type & dst->type ))
1091 size.width *= size.height;
1092 src_step = dst_step = CV_STUB_STEP;
1096 IPPI_CALL( icvCvtScaleAbsTo_8u_C1R( src->data.ptr, src_step,
1097 (uchar*)(dst->data.ptr), dst_step,
1098 size, scale, shift, CV_MAT_TYPE(src->type)));
1102 /****************************************************************************************\
1104 \****************************************************************************************/
1106 #define ICV_DEF_CVT_SCALE_CASE( srctype, worktype, \
1107 scale_macro, cast_macro, a, b ) \
1110 const srctype* _src = (const srctype*)src; \
1111 srcstep /= sizeof(_src[0]); \
1113 for( ; size.height--; _src += srcstep, dst += dststep ) \
1115 for( i = 0; i <= size.width - 4; i += 4 ) \
1117 worktype t0 = scale_macro((a)*_src[i]+(b)); \
1118 worktype t1 = scale_macro((a)*_src[i+1]+(b)); \
1120 dst[i] = cast_macro(t0); \
1121 dst[i+1] = cast_macro(t1); \
1123 t0 = scale_macro((a)*_src[i+2] + (b)); \
1124 t1 = scale_macro((a)*_src[i+3] + (b)); \
1126 dst[i+2] = cast_macro(t0); \
1127 dst[i+3] = cast_macro(t1); \
1130 for( ; i < size.width; i++ ) \
1132 worktype t0 = scale_macro((a)*_src[i] + (b)); \
1133 dst[i] = cast_macro(t0); \
1139 #define ICV_DEF_CVT_SCALE_FUNC_INT( flavor, dsttype, cast_macro ) \
1140 static CvStatus CV_STDCALL \
1141 icvCvtScaleTo_##flavor##_C1R( const uchar* src, int srcstep, \
1142 dsttype* dst, int dststep, CvSize size, \
1143 double scale, double shift, int param ) \
1145 int i, srctype = param; \
1147 dststep /= sizeof(dst[0]); \
1149 switch( CV_MAT_DEPTH(srctype) ) \
1152 if( size.width*size.height >= 256 ) \
1154 double val = shift; \
1155 for( i = 0; i < 256; i++, val += scale ) \
1157 int t = cvRound(val); \
1158 lut[i] = cast_macro(t); \
1161 icvLUT_Transform8u_##flavor##_C1R( src, srcstep, dst, \
1162 dststep*sizeof(dst[0]), size, lut ); \
1164 else if( fabs( scale ) <= 128. && \
1165 fabs( shift ) <= (INT_MAX*0.5)/(1 << ICV_FIX_SHIFT)) \
1167 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); \
1168 int ishift = cvRound(shift*(1 << ICV_FIX_SHIFT)); \
1170 ICV_DEF_CVT_SCALE_CASE( uchar, int, ICV_SCALE, \
1171 cast_macro, iscale, ishift ); \
1175 ICV_DEF_CVT_SCALE_CASE( uchar, int, cvRound, \
1176 cast_macro, scale, shift ); \
1180 if( size.width*size.height >= 256 ) \
1182 for( i = 0; i < 256; i++ ) \
1184 int t = cvRound( (schar)i*scale + shift ); \
1185 lut[i] = cast_macro(t); \
1188 icvLUT_Transform8u_##flavor##_C1R( src, srcstep, dst, \
1189 dststep*sizeof(dst[0]), size, lut ); \
1191 else if( fabs( scale ) <= 128. && \
1192 fabs( shift ) <= (INT_MAX*0.5)/(1 << ICV_FIX_SHIFT)) \
1194 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); \
1195 int ishift = cvRound(shift*(1 << ICV_FIX_SHIFT)); \
1197 ICV_DEF_CVT_SCALE_CASE( schar, int, ICV_SCALE, \
1198 cast_macro, iscale, ishift ); \
1202 ICV_DEF_CVT_SCALE_CASE( schar, int, cvRound, \
1203 cast_macro, scale, shift ); \
1207 if( fabs( scale ) <= 1. && fabs(shift) < DBL_EPSILON ) \
1209 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); \
1211 ICV_DEF_CVT_SCALE_CASE( ushort, int, ICV_SCALE, \
1212 cast_macro, iscale, 0 ); \
1216 ICV_DEF_CVT_SCALE_CASE( ushort, int, cvRound, \
1217 cast_macro, scale, shift ); \
1221 if( fabs( scale ) <= 1. && \
1222 fabs( shift ) <= (INT_MAX*0.5)/(1 << ICV_FIX_SHIFT)) \
1224 int iscale = cvRound(scale*(1 << ICV_FIX_SHIFT)); \
1225 int ishift = cvRound(shift*(1 << ICV_FIX_SHIFT)); \
1227 ICV_DEF_CVT_SCALE_CASE( short, int, ICV_SCALE, \
1228 cast_macro, iscale, ishift ); \
1232 ICV_DEF_CVT_SCALE_CASE( short, int, cvRound, \
1233 cast_macro, scale, shift ); \
1237 ICV_DEF_CVT_SCALE_CASE( int, int, cvRound, \
1238 cast_macro, scale, shift ); \
1241 ICV_DEF_CVT_SCALE_CASE( float, int, cvRound, \
1242 cast_macro, scale, shift ); \
1245 ICV_DEF_CVT_SCALE_CASE( double, int, cvRound, \
1246 cast_macro, scale, shift ); \
1250 return CV_BADFLAG_ERR; \
1257 #define ICV_DEF_CVT_SCALE_FUNC_FLT( flavor, dsttype, cast_macro ) \
1258 static CvStatus CV_STDCALL \
1259 icvCvtScaleTo_##flavor##_C1R( const uchar* src, int srcstep, \
1260 dsttype* dst, int dststep, CvSize size, \
1261 double scale, double shift, int param ) \
1263 int i, srctype = param; \
1265 dststep /= sizeof(dst[0]); \
1267 switch( CV_MAT_DEPTH(srctype) ) \
1270 if( size.width*size.height >= 256 ) \
1272 double val = shift; \
1273 for( i = 0; i < 256; i++, val += scale ) \
1274 lut[i] = (dsttype)val; \
1276 icvLUT_Transform8u_##flavor##_C1R( src, srcstep, dst, \
1277 dststep*sizeof(dst[0]), size, lut ); \
1281 ICV_DEF_CVT_SCALE_CASE( uchar, double, CV_NOP, \
1282 cast_macro, scale, shift ); \
1286 if( size.width*size.height >= 256 ) \
1288 for( i = 0; i < 256; i++ ) \
1289 lut[i] = (dsttype)((schar)i*scale + shift); \
1291 icvLUT_Transform8u_##flavor##_C1R( src, srcstep, dst, \
1292 dststep*sizeof(dst[0]), size, lut ); \
1296 ICV_DEF_CVT_SCALE_CASE( schar, double, CV_NOP, \
1297 cast_macro, scale, shift ); \
1301 ICV_DEF_CVT_SCALE_CASE( ushort, double, CV_NOP, \
1302 cast_macro, scale, shift ); \
1305 ICV_DEF_CVT_SCALE_CASE( short, double, CV_NOP, \
1306 cast_macro, scale, shift ); \
1309 ICV_DEF_CVT_SCALE_CASE( int, double, CV_NOP, \
1310 cast_macro, scale, shift ); \
1313 ICV_DEF_CVT_SCALE_CASE( float, double, CV_NOP, \
1314 cast_macro, scale, shift ); \
1317 ICV_DEF_CVT_SCALE_CASE( double, double, CV_NOP, \
1318 cast_macro, scale, shift ); \
1322 return CV_BADFLAG_ERR; \
1329 ICV_DEF_CVT_SCALE_FUNC_INT( 8u, uchar, CV_CAST_8U )
1330 ICV_DEF_CVT_SCALE_FUNC_INT( 8s, schar, CV_CAST_8S )
1331 ICV_DEF_CVT_SCALE_FUNC_INT( 16s, short, CV_CAST_16S )
1332 ICV_DEF_CVT_SCALE_FUNC_INT( 16u, ushort, CV_CAST_16U )
1333 ICV_DEF_CVT_SCALE_FUNC_INT( 32s, int, CV_CAST_32S )
1335 ICV_DEF_CVT_SCALE_FUNC_FLT( 32f, float, CV_CAST_32F )
1336 ICV_DEF_CVT_SCALE_FUNC_FLT( 64f, double, CV_CAST_64F )
1338 CV_DEF_INIT_FUNC_TAB_2D( CvtScaleTo, C1R )
1341 /****************************************************************************************\
1342 * Conversion w/o scaling macros *
1343 \****************************************************************************************/
1345 #define ICV_DEF_CVT_CASE_2D( srctype, worktype, \
1346 cast_macro1, cast_macro2 ) \
1348 const srctype* _src = (const srctype*)src; \
1349 srcstep /= sizeof(_src[0]); \
1351 for( ; size.height--; _src += srcstep, dst += dststep ) \
1355 for( i = 0; i <= size.width - 4; i += 4 ) \
1357 worktype t0 = cast_macro1(_src[i]); \
1358 worktype t1 = cast_macro1(_src[i+1]); \
1360 dst[i] = cast_macro2(t0); \
1361 dst[i+1] = cast_macro2(t1); \
1363 t0 = cast_macro1(_src[i+2]); \
1364 t1 = cast_macro1(_src[i+3]); \
1366 dst[i+2] = cast_macro2(t0); \
1367 dst[i+3] = cast_macro2(t1); \
1370 for( ; i < size.width; i++ ) \
1372 worktype t0 = cast_macro1(_src[i]); \
1373 dst[i] = cast_macro2(t0); \
1379 #define ICV_DEF_CVT_FUNC_2D( flavor, dsttype, worktype, cast_macro2, \
1380 srcdepth1, srctype1, cast_macro11, \
1381 srcdepth2, srctype2, cast_macro12, \
1382 srcdepth3, srctype3, cast_macro13, \
1383 srcdepth4, srctype4, cast_macro14, \
1384 srcdepth5, srctype5, cast_macro15, \
1385 srcdepth6, srctype6, cast_macro16 ) \
1386 static CvStatus CV_STDCALL \
1387 icvCvtTo_##flavor##_C1R( const uchar* src, int srcstep, \
1388 dsttype* dst, int dststep, \
1389 CvSize size, int param ) \
1391 int srctype = param; \
1392 dststep /= sizeof(dst[0]); \
1394 switch( CV_MAT_DEPTH(srctype) ) \
1397 ICV_DEF_CVT_CASE_2D( srctype1, worktype, \
1398 cast_macro11, cast_macro2 ); \
1401 ICV_DEF_CVT_CASE_2D( srctype2, worktype, \
1402 cast_macro12, cast_macro2 ); \
1405 ICV_DEF_CVT_CASE_2D( srctype3, worktype, \
1406 cast_macro13, cast_macro2 ); \
1409 ICV_DEF_CVT_CASE_2D( srctype4, worktype, \
1410 cast_macro14, cast_macro2 ); \
1413 ICV_DEF_CVT_CASE_2D( srctype5, worktype, \
1414 cast_macro15, cast_macro2 ); \
1417 ICV_DEF_CVT_CASE_2D( srctype6, worktype, \
1418 cast_macro16, cast_macro2 ); \
1426 ICV_DEF_CVT_FUNC_2D( 8u, uchar, int, CV_CAST_8U,
1427 CV_8S, schar, CV_NOP,
1428 CV_16U, ushort, CV_NOP,
1429 CV_16S, short, CV_NOP,
1430 CV_32S, int, CV_NOP,
1431 CV_32F, float, cvRound,
1432 CV_64F, double, cvRound )
1434 ICV_DEF_CVT_FUNC_2D( 8s, schar, int, CV_CAST_8S,
1435 CV_8U, uchar, CV_NOP,
1436 CV_16U, ushort, CV_NOP,
1437 CV_16S, short, CV_NOP,
1438 CV_32S, int, CV_NOP,
1439 CV_32F, float, cvRound,
1440 CV_64F, double, cvRound )
1442 ICV_DEF_CVT_FUNC_2D( 16u, ushort, int, CV_CAST_16U,
1443 CV_8U, uchar, CV_NOP,
1444 CV_8S, schar, CV_NOP,
1445 CV_16S, short, CV_NOP,
1446 CV_32S, int, CV_NOP,
1447 CV_32F, float, cvRound,
1448 CV_64F, double, cvRound )
1450 ICV_DEF_CVT_FUNC_2D( 16s, short, int, CV_CAST_16S,
1451 CV_8U, uchar, CV_NOP,
1452 CV_8S, schar, CV_NOP,
1453 CV_16U, ushort, CV_NOP,
1454 CV_32S, int, CV_NOP,
1455 CV_32F, float, cvRound,
1456 CV_64F, double, cvRound )
1458 ICV_DEF_CVT_FUNC_2D( 32s, int, int, CV_NOP,
1459 CV_8U, uchar, CV_NOP,
1460 CV_8S, schar, CV_NOP,
1461 CV_16U, ushort, CV_NOP,
1462 CV_16S, short, CV_NOP,
1463 CV_32F, float, cvRound,
1464 CV_64F, double, cvRound )
1466 ICV_DEF_CVT_FUNC_2D( 32f, float, float, CV_NOP,
1467 CV_8U, uchar, CV_8TO32F,
1468 CV_8S, schar, CV_8TO32F,
1469 CV_16U, ushort, CV_NOP,
1470 CV_16S, short, CV_NOP,
1471 CV_32S, int, CV_CAST_32F,
1472 CV_64F, double, CV_CAST_32F )
1474 ICV_DEF_CVT_FUNC_2D( 64f, double, double, CV_NOP,
1475 CV_8U, uchar, CV_8TO32F,
1476 CV_8S, schar, CV_8TO32F,
1477 CV_16U, ushort, CV_NOP,
1478 CV_16S, short, CV_NOP,
1479 CV_32S, int, CV_NOP,
1480 CV_32F, float, CV_NOP )
1482 CV_DEF_INIT_FUNC_TAB_2D( CvtTo, C1R )
1485 typedef CvStatus (CV_STDCALL *CvCvtFunc)( const void* src, int srcstep,
1486 void* dst, int dststep, CvSize size,
1489 typedef CvStatus (CV_STDCALL *CvCvtScaleFunc)( const void* src, int srcstep,
1490 void* dst, int dststep, CvSize size,
1491 double scale, double shift,
1495 cvConvertScale( const void* srcarr, void* dstarr,
1496 double scale, double shift )
1498 static CvFuncTable cvt_tab, cvtscale_tab;
1499 static int inittab = 0;
1501 CV_FUNCNAME( "cvConvertScale" );
1507 CvMat srcstub, *src = (CvMat*)srcarr;
1508 CvMat dststub, *dst = (CvMat*)dstarr;
1510 int src_step, dst_step;
1511 int no_scale = scale == 1 && shift == 0;
1513 if( !CV_IS_MAT(src) )
1515 if( CV_IS_MATND(src) )
1520 CV_CALL( src = cvGetMat( src, &srcstub, &coi ));
1523 CV_ERROR( CV_BadCOI, "" );
1527 if( !CV_IS_MAT(dst) )
1529 if( CV_IS_MATND(dst) )
1534 CV_CALL( dst = cvGetMat( dst, &dststub, &coi ));
1537 CV_ERROR( CV_BadCOI, "" );
1543 CvArr* arrs[] = { src, dst };
1545 CvNArrayIterator iterator;
1548 CV_CALL( cvInitNArrayIterator( 2, arrs, 0, stubs, &iterator, CV_NO_DEPTH_CHECK ));
1550 type = iterator.hdr[0]->type;
1551 dsttype = iterator.hdr[1]->type;
1552 iterator.size.width *= CV_MAT_CN(type);
1556 icvInitCvtToC1RTable( &cvt_tab );
1557 icvInitCvtScaleToC1RTable( &cvtscale_tab );
1563 CvCvtFunc func = (CvCvtFunc)(cvt_tab.fn_2d[CV_MAT_DEPTH(dsttype)]);
1565 CV_ERROR( CV_StsUnsupportedFormat, "" );
1569 IPPI_CALL( func( iterator.ptr[0], CV_STUB_STEP,
1570 iterator.ptr[1], CV_STUB_STEP,
1571 iterator.size, type ));
1573 while( cvNextNArraySlice( &iterator ));
1577 CvCvtScaleFunc func =
1578 (CvCvtScaleFunc)(cvtscale_tab.fn_2d[CV_MAT_DEPTH(dsttype)]);
1580 CV_ERROR( CV_StsUnsupportedFormat, "" );
1584 IPPI_CALL( func( iterator.ptr[0], CV_STUB_STEP,
1585 iterator.ptr[1], CV_STUB_STEP,
1586 iterator.size, scale, shift, type ));
1588 while( cvNextNArraySlice( &iterator ));
1593 if( no_scale && CV_ARE_TYPES_EQ( src, dst ) )
1600 if( !CV_ARE_SIZES_EQ( src, dst ))
1601 CV_ERROR( CV_StsUnmatchedSizes, "" );
1603 size = cvGetMatSize( src );
1604 type = CV_MAT_TYPE(src->type);
1605 src_step = src->step;
1606 dst_step = dst->step;
1608 if( CV_IS_MAT_CONT( src->type & dst->type ))
1610 size.width *= size.height;
1611 src_step = dst_step = CV_STUB_STEP;
1615 size.width *= CV_MAT_CN( type );
1617 if( CV_ARE_TYPES_EQ( src, dst ) && size.height == 1 &&
1618 size.width <= CV_MAX_INLINE_MAT_OP_SIZE )
1620 if( CV_MAT_DEPTH(type) == CV_32F )
1622 const float* srcdata = (const float*)(src->data.ptr);
1623 float* dstdata = (float*)(dst->data.ptr);
1627 dstdata[size.width - 1] = (float)(srcdata[size.width-1]*scale + shift);
1629 while( --size.width );
1634 if( CV_MAT_DEPTH(type) == CV_64F )
1636 const double* srcdata = (const double*)(src->data.ptr);
1637 double* dstdata = (double*)(dst->data.ptr);
1641 dstdata[size.width - 1] = srcdata[size.width-1]*scale + shift;
1643 while( --size.width );
1651 icvInitCvtToC1RTable( &cvt_tab );
1652 icvInitCvtScaleToC1RTable( &cvtscale_tab );
1656 if( !CV_ARE_CNS_EQ( src, dst ))
1657 CV_ERROR( CV_StsUnmatchedFormats, "" );
1661 CvCvtFunc func = (CvCvtFunc)(cvt_tab.fn_2d[CV_MAT_DEPTH(dst->type)]);
1664 CV_ERROR( CV_StsUnsupportedFormat, "" );
1666 IPPI_CALL( func( src->data.ptr, src_step,
1667 dst->data.ptr, dst_step, size, type ));
1671 CvCvtScaleFunc func = (CvCvtScaleFunc)
1672 (cvtscale_tab.fn_2d[CV_MAT_DEPTH(dst->type)]);
1675 CV_ERROR( CV_StsUnsupportedFormat, "" );
1677 IPPI_CALL( func( src->data.ptr, src_step,
1678 dst->data.ptr, dst_step, size,
1679 scale, shift, type ));
1685 /********************* helper functions for converting 32f<->64f ************************/
1687 IPCVAPI_IMPL( CvStatus, icvCvt_32f64f,
1688 ( const float* src, double* dst, int len ), (src, dst, len) )
1691 for( i = 0; i <= len - 4; i += 4 )
1694 double t1 = src[i+1];
1706 for( ; i < len; i++ )
1713 IPCVAPI_IMPL( CvStatus, icvCvt_64f32f,
1714 ( const double* src, float* dst, int len ), (src, dst, len) )
1717 for( ; i <= len - 4; i += 4 )
1720 double t1 = src[i+1];
1723 dst[i+1] = (float)t1;
1728 dst[i+2] = (float)t0;
1729 dst[i+3] = (float)t1;
1732 for( ; i < len; i++ )
1733 dst[i] = (float)src[i];
1739 CvStatus CV_STDCALL icvScale_32f( const float* src, float* dst, int len, float a, float b )
1742 for( i = 0; i <= len - 4; i += 4 )
1744 double t0 = src[i]*a + b;
1745 double t1 = src[i+1]*a + b;
1748 dst[i+1] = (float)t1;
1750 t0 = src[i+2]*a + b;
1751 t1 = src[i+3]*a + b;
1753 dst[i+2] = (float)t0;
1754 dst[i+3] = (float)t1;
1757 for( ; i < len; i++ )
1758 dst[i] = (float)(src[i]*a + b);
1764 CvStatus CV_STDCALL icvScale_64f( const double* src, double* dst, int len, double a, double b )
1767 for( i = 0; i <= len - 4; i += 4 )
1769 double t0 = src[i]*a + b;
1770 double t1 = src[i+1]*a + b;
1775 t0 = src[i+2]*a + b;
1776 t1 = src[i+3]*a + b;
1782 for( ; i < len; i++ )
1783 dst[i] = src[i]*a + b;