1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
42 /* ////////////////////////////////////////////////////////////////////
44 // CvMat logical operations: &, |, ^ ...
50 /////////////////////////////////////////////////////////////////////////////////////////
52 // Macros for logic operations //
54 /////////////////////////////////////////////////////////////////////////////////////////
56 /* //////////////////////////////////////////////////////////////////////////////////////
58 ////////////////////////////////////////////////////////////////////////////////////// */
61 #define ICV_DEF_BIN_LOG_OP_2D( __op__, name ) \
62 IPCVAPI_IMPL( CvStatus, icv##name##_8u_C1R, \
63 ( const uchar* src1, int step1, const uchar* src2, int step2, \
64 uchar* dst, int step, CvSize size ), (src1, step1, src2, step2, dst, step, size) )\
66 for( ; size.height--; src1 += step1, src2 += step2, dst += step ) \
70 if( (((size_t)src1 | (size_t)src2 | (size_t)dst) & 3) == 0 ) \
72 for( ; i <= size.width - 16; i += 16 ) \
74 int t0 = __op__(((const int*)(src1+i))[0], ((const int*)(src2+i))[0]);\
75 int t1 = __op__(((const int*)(src1+i))[1], ((const int*)(src2+i))[1]);\
77 ((int*)(dst+i))[0] = t0; \
78 ((int*)(dst+i))[1] = t1; \
80 t0 = __op__(((const int*)(src1+i))[2], ((const int*)(src2+i))[2]); \
81 t1 = __op__(((const int*)(src1+i))[3], ((const int*)(src2+i))[3]); \
83 ((int*)(dst+i))[2] = t0; \
84 ((int*)(dst+i))[3] = t1; \
87 for( ; i <= size.width - 4; i += 4 ) \
89 int t = __op__(*(const int*)(src1+i), *(const int*)(src2+i)); \
94 for( ; i < size.width; i++ ) \
96 int t = __op__(((const uchar*)src1)[i],((const uchar*)src2)[i]); \
105 /* //////////////////////////////////////////////////////////////////////////////////////
107 ////////////////////////////////////////////////////////////////////////////////////// */
110 #define ICV_DEF_UN_LOG_OP_2D( __op__, name ) \
111 static CvStatus CV_STDCALL icv##name##_8u_CnR \
112 ( const uchar* src0, int step1, uchar* dst0, int step, CvSize size, \
113 const uchar* scalar, int pix_size ) \
115 int delta = 12*pix_size; \
117 for( ; size.height--; src0 += step1, dst0 += step ) \
119 const uchar* src = (const uchar*)src0; \
121 int i, len = size.width; \
123 if( (((size_t)src|(size_t)dst) & 3) == 0 ) \
125 while( (len -= delta) >= 0 ) \
127 for( i = 0; i < (delta); i += 12 ) \
129 int t0 = __op__(((const int*)(src+i))[0], ((const int*)(scalar+i))[0]); \
130 int t1 = __op__(((const int*)(src+i))[1], ((const int*)(scalar+i))[1]); \
131 ((int*)(dst+i))[0] = t0; \
132 ((int*)(dst+i))[1] = t1; \
134 t0 = __op__(((const int*)(src+i))[2], ((const int*)(scalar+i))[2]); \
135 ((int*)(dst+i))[2] = t0; \
143 while( (len -= delta) >= 0 ) \
145 for( i = 0; i < (delta); i += 4 ) \
147 int t0 = __op__(src[i], scalar[i]); \
148 int t1 = __op__(src[i+1], scalar[i+1]); \
149 dst[i] = (uchar)t0; \
150 dst[i+1] = (uchar)t1; \
152 t0 = __op__(src[i+2], scalar[i+2]); \
153 t1 = __op__(src[i+3], scalar[i+3]); \
154 dst[i+2] = (uchar)t0; \
155 dst[i+3] = (uchar)t1; \
162 for( len += delta, i = 0; i < len; i++ ) \
164 int t = __op__(src[i],scalar[i]); \
172 /////////////////////////////////////////////////////////////////////////////////////////
174 // LOGIC OPERATIONS //
176 /////////////////////////////////////////////////////////////////////////////////////////
179 icvLogicS( const void* srcarr, CvScalar* scalar, void* dstarr,
180 const void* maskarr, CvFunc2D_2A1P1I fn_2d )
185 CV_FUNCNAME( "icvLogicS" );
189 CvMat srcstub, *src = (CvMat*)srcarr;
190 CvMat dststub, *dst = (CvMat*)dstarr;
191 CvMat maskstub, *mask = (CvMat*)maskarr;
193 CvCopyMaskFunc copym_func = 0;
196 int coi1 = 0, coi2 = 0;
197 int is_nd = 0, cont_flag = 0;
198 int elem_size, elem_size1, type, depth;
201 int src_step, dst_step, tdst_step, mask_step;
205 if( CV_IS_MATND(src) )
208 CV_CALL( src = cvGetMat( src, &srcstub, &coi1 ));
213 if( CV_IS_MATND(dst) )
216 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 ));
221 CvArr* arrs[] = { src, dst };
223 CvNArrayIterator iterator;
226 CV_ERROR( CV_StsBadMask,
227 "This operation on multi-dimensional arrays does not support mask" );
229 CV_CALL( cvInitNArrayIterator( 2, arrs, 0, stubs, &iterator ));
231 type = CV_MAT_TYPE(iterator.hdr[0]->type);
232 depth = CV_MAT_DEPTH(type);
233 iterator.size.width *= CV_ELEM_SIZE(type);
234 elem_size1 = CV_ELEM_SIZE1(depth);
236 CV_CALL( cvScalarToRawData( scalar, buf, type, 1 ));
240 IPPI_CALL( fn_2d( iterator.ptr[0], CV_STUB_STEP,
241 iterator.ptr[1], CV_STUB_STEP,
242 iterator.size, buf, elem_size1 ));
244 while( cvNextNArraySlice( &iterator ));
248 if( coi1 != 0 || coi2 != 0 )
249 CV_ERROR( CV_BadCOI, "" );
251 if( !CV_ARE_TYPES_EQ( src, dst ) )
252 CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats );
254 if( !CV_ARE_SIZES_EQ( src, dst ) )
255 CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes );
257 size = cvGetMatSize( src );
258 type = CV_MAT_TYPE(src->type);
259 depth = CV_MAT_DEPTH(type);
260 elem_size = CV_ELEM_SIZE(type);
261 elem_size1 = CV_ELEM_SIZE1(depth);
265 cont_flag = CV_IS_MAT_CONT( src->type & dst->type );
273 if( !CV_IS_MAT(mask) )
274 CV_CALL( mask = cvGetMat( mask, &maskstub ));
276 if( !CV_IS_MASK_ARR(mask))
277 CV_ERROR( CV_StsBadMask, "" );
279 if( !CV_ARE_SIZES_EQ( mask, dst ))
280 CV_ERROR( CV_StsUnmatchedSizes, "" );
282 cont_flag = CV_IS_MAT_CONT( src->type & dst->type & mask->type );
283 dy = CV_MAX_LOCAL_SIZE/(elem_size*size.height);
285 dy = MIN(dy,size.height);
286 dstbuf = cvMat( dy, size.width, type );
288 dstbuf.step = cvAlign( dstbuf.step, 8 );
289 buf_size = dstbuf.step ? dstbuf.step*dy : size.width*elem_size;
290 if( buf_size > CV_MAX_LOCAL_SIZE )
292 CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
296 buffer = (uchar*)cvStackAlloc( buf_size );
297 dstbuf.data.ptr = buffer;
300 copym_func = icvGetCopyMaskFunc( elem_size );
303 src_step = src->step;
304 dst_step = dst->step;
305 tdst_step = tdst->step;
306 mask_step = mask ? mask->step : 0;
307 CV_CALL( cvScalarToRawData( scalar, buf, type, 1 ));
309 for( y = 0; y < size.height; y += dy )
311 tsize.width = size.width;
313 if( y + dy > size.height )
314 tsize.height = size.height - y;
315 if( cont_flag || tsize.height == 1 )
317 tsize.width *= tsize.height;
319 src_step = tdst_step = dst_step = mask_step = CV_STUB_STEP;
321 IPPI_CALL( fn_2d( src->data.ptr + y*src->step, src_step, tdst->data.ptr, tdst_step,
322 cvSize(tsize.width*elem_size, tsize.height), buf, elem_size1 ));
325 IPPI_CALL( copym_func( tdst->data.ptr, tdst_step, dst->data.ptr + y*dst->step,
326 dst_step, tsize, mask->data.ptr + y*mask->step, mask_step ));
338 icvLogic( const void* srcarr1, const void* srcarr2, void* dstarr,
339 const void* maskarr, CvFunc2D_3A fn_2d )
344 CV_FUNCNAME( "icvLogic" );
349 int coi1 = 0, coi2 = 0, coi3 = 0;
351 int is_nd = 0, cont_flag = 0;
352 CvMat srcstub1, *src1 = (CvMat*)srcarr1;
353 CvMat srcstub2, *src2 = (CvMat*)srcarr2;
354 CvMat dststub, *dst = (CvMat*)dstarr;
355 CvMat maskstub, *mask = (CvMat*)maskarr;
357 int src1_step, src2_step, tdst_step, dst_step, mask_step;
359 CvCopyMaskFunc copym_func = 0;
361 if( !CV_IS_MAT(src1))
363 if( CV_IS_MATND(src1) )
366 CV_CALL( src1 = cvGetMat( src1, &srcstub1, &coi1 ));
369 if( !CV_IS_MAT(src2))
371 if( CV_IS_MATND(src2) )
374 CV_CALL( src2 = cvGetMat( src2, &srcstub2, &coi2 ));
379 if( CV_IS_MATND(dst) )
382 CV_CALL( dst = cvGetMat( dst, &dststub, &coi3 ));
387 CvArr* arrs[] = { src1, src2, dst };
389 CvNArrayIterator iterator;
392 CV_ERROR( CV_StsBadMask,
393 "This operation on multi-dimensional arrays does not support mask" );
395 CV_CALL( cvInitNArrayIterator( 3, arrs, 0, stubs, &iterator ));
397 type = CV_MAT_TYPE(iterator.hdr[0]->type);
398 iterator.size.width *= CV_ELEM_SIZE(type);
402 IPPI_CALL( fn_2d( iterator.ptr[0], CV_STUB_STEP,
403 iterator.ptr[1], CV_STUB_STEP,
404 iterator.ptr[2], CV_STUB_STEP,
407 while( cvNextNArraySlice( &iterator ));
411 if( coi1 != 0 || coi2 != 0 || coi3 != 0 )
412 CV_ERROR_FROM_CODE( CV_BadCOI );
414 if( !CV_ARE_TYPES_EQ( src1, src2 ) )
415 CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats );
417 if( !CV_ARE_SIZES_EQ( src1, src2 ) )
418 CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes );
420 if( !CV_ARE_TYPES_EQ( src1, dst ) )
421 CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats );
423 if( !CV_ARE_SIZES_EQ( src1, dst ) )
424 CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes );
426 size = cvGetMatSize( src1 );
427 type = CV_MAT_TYPE( src1->type );
428 elem_size = CV_ELEM_SIZE(type);
432 cont_flag = CV_IS_MAT_CONT( src1->type & src2->type & dst->type );
440 if( !CV_IS_MAT(mask) )
441 CV_CALL( mask = cvGetMat( mask, &maskstub ));
443 if( !CV_IS_MASK_ARR(mask))
444 CV_ERROR( CV_StsBadMask, "" );
446 if( !CV_ARE_SIZES_EQ( mask, dst ))
447 CV_ERROR( CV_StsUnmatchedSizes, "" );
449 cont_flag = CV_IS_MAT_CONT( src1->type & src2->type & dst->type & mask->type );
450 dy = CV_MAX_LOCAL_SIZE/(elem_size*size.height);
452 dy = MIN(dy,size.height);
453 dstbuf = cvMat( dy, size.width, type );
455 dstbuf.step = cvAlign( dstbuf.step, 8 );
456 buf_size = dstbuf.step ? dstbuf.step*dy : size.width*elem_size;
457 if( buf_size > CV_MAX_LOCAL_SIZE )
459 CV_CALL( buffer = (uchar*)cvAlloc( buf_size ));
463 buffer = (uchar*)cvStackAlloc( buf_size );
464 dstbuf.data.ptr = buffer;
467 copym_func = icvGetCopyMaskFunc( elem_size );
470 src1_step = src1->step;
471 src2_step = src2->step;
472 dst_step = dst->step;
473 tdst_step = tdst->step;
474 mask_step = mask ? mask->step : 0;
476 for( y = 0; y < size.height; y += dy )
478 tsize.width = size.width;
480 if( y + dy > size.height )
481 tsize.height = size.height - y;
482 if( cont_flag || tsize.height == 1 )
484 tsize.width *= tsize.height;
486 src1_step = src2_step = tdst_step = dst_step = mask_step = CV_STUB_STEP;
488 IPPI_CALL( fn_2d( src1->data.ptr + y*src1->step, src1_step,
489 src2->data.ptr + y*src2->step, src2_step,
490 tdst->data.ptr, tdst_step,
491 cvSize(tsize.width*elem_size, tsize.height) ));
494 IPPI_CALL( copym_func( tdst->data.ptr, tdst_step, dst->data.ptr + y*dst->step,
495 dst_step, tsize, mask->data.ptr + y*mask->step, mask_step ));
505 ICV_DEF_BIN_LOG_OP_2D( CV_XOR, Xor )
506 ICV_DEF_UN_LOG_OP_2D( CV_XOR, XorC )
508 ICV_DEF_BIN_LOG_OP_2D( CV_AND, And )
509 ICV_DEF_UN_LOG_OP_2D( CV_AND, AndC )
511 ICV_DEF_BIN_LOG_OP_2D( CV_OR, Or )
512 ICV_DEF_UN_LOG_OP_2D( CV_OR, OrC )
515 /////////////////////////////////////////////////////////////////////////////////////////
517 /////////////////////////////////////////////////////////////////////////////////////////
520 cvXorS( const void* src, CvScalar scalar, void* dst, const void* mask )
522 icvLogicS( src, &scalar, dst, mask, (CvFunc2D_2A1P1I)icvXorC_8u_CnR );
527 cvXor( const void* src1, const void* src2, void* dst, const void* mask )
529 icvLogic( src1, src2, dst, mask, (CvFunc2D_3A)icvXor_8u_C1R );
532 /////////////////////////////////////////////////////////////////////////////////////////
534 /////////////////////////////////////////////////////////////////////////////////////////
537 cvAndS( const void* src, CvScalar scalar, void* dst, const void* mask )
539 icvLogicS( src, &scalar, dst, mask, (CvFunc2D_2A1P1I)icvAndC_8u_CnR );
544 cvAnd( const void* src1, const void* src2, void* dst, const void* mask )
546 icvLogic( src1, src2, dst, mask, (CvFunc2D_3A)icvAnd_8u_C1R );
550 /////////////////////////////////////////////////////////////////////////////////////////
552 /////////////////////////////////////////////////////////////////////////////////////////
555 cvOrS( const void* src, CvScalar scalar, void* dst, const void* mask )
557 icvLogicS( src, &scalar, dst, mask, (CvFunc2D_2A1P1I)icvOrC_8u_CnR );
562 cvOr( const void* src1, const void* src2, void* dst, const void* mask )
564 icvLogic( src1, src2, dst, mask, (CvFunc2D_3A)icvOr_8u_C1R );
568 /////////////////////////////////////////////////////////////////////////////////////////
570 /////////////////////////////////////////////////////////////////////////////////////////
573 IPCVAPI_IMPL( CvStatus, icvNot_8u_C1R,
574 ( const uchar* src1, int step1, uchar* dst, int step, CvSize size ),
575 (src1, step1, dst, step, size) )
577 for( ; size.height--; src1 += step1, dst += step )
581 if( (((size_t)src1 | (size_t)dst) & 3) == 0 )
583 for( ; i <= size.width - 16; i += 16 )
585 int t0 = ~((const int*)(src1+i))[0];
586 int t1 = ~((const int*)(src1+i))[1];
588 ((int*)(dst+i))[0] = t0;
589 ((int*)(dst+i))[1] = t1;
591 t0 = ~((const int*)(src1+i))[2];
592 t1 = ~((const int*)(src1+i))[3];
594 ((int*)(dst+i))[2] = t0;
595 ((int*)(dst+i))[3] = t1;
598 for( ; i <= size.width - 4; i += 4 )
600 int t = ~*(const int*)(src1+i);
605 for( ; i < size.width; i++ )
607 int t = ~((const uchar*)src1)[i];
617 cvNot( const void* srcarr, void* dstarr )
619 CV_FUNCNAME( "cvNot" );
623 CvMat srcstub, *src = (CvMat*)srcarr;
624 CvMat dststub, *dst = (CvMat*)dstarr;
626 int coi1 = 0, coi2 = 0;
629 int src_step, dst_step;
633 if( CV_IS_MATND(src) )
636 CV_CALL( src = cvGetMat( src, &srcstub, &coi1 ));
641 if( CV_IS_MATND(src) )
644 CV_CALL( dst = cvGetMat( dst, &dststub, &coi2 ));
649 CvArr* arrs[] = { src, dst };
651 CvNArrayIterator iterator;
653 CV_CALL( cvInitNArrayIterator( 2, arrs, 0, stubs, &iterator ));
655 type = CV_MAT_TYPE(iterator.hdr[0]->type);
656 iterator.size.width *= CV_ELEM_SIZE(type);
660 IPPI_CALL( icvNot_8u_C1R( iterator.ptr[0], CV_STUB_STEP,
661 iterator.ptr[1], CV_STUB_STEP,
664 while( cvNextNArraySlice( &iterator ));
668 if( coi1 != 0 || coi2 != 0 )
669 CV_ERROR( CV_BadCOI, "" );
671 if( !CV_ARE_TYPES_EQ( src, dst ) )
672 CV_ERROR_FROM_CODE( CV_StsUnmatchedFormats );
674 if( !CV_ARE_SIZES_EQ( src, dst ) )
675 CV_ERROR_FROM_CODE( CV_StsUnmatchedSizes );
677 size = cvGetMatSize( src );
678 src_step = src->step;
679 dst_step = dst->step;
681 if( CV_IS_MAT_CONT( src->type & dst->type ))
683 size.width *= size.height;
684 src_step = dst_step = CV_STUB_STEP;
688 type = CV_MAT_TYPE( src->type );
689 size.width *= CV_ELEM_SIZE(type);
691 IPPI_CALL( icvNot_8u_C1R( src->data.ptr, src_step, dst->data.ptr, dst_step, size ));