1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
43 /* ////////////////////////////////////////////////////////////////////
45 // Matrix arithmetic and logical operations: +, -, *, /, &, |, ^, ~, abs ...
56 template<class Op8> struct VBinOp8
58 int operator()(const uchar* src1, const uchar* src2, uchar* dst, int len) const
61 for( ; x <= len - 32; x += 32 )
63 __m128i r0 = _mm_loadu_si128((const __m128i*)(src1 + x));
64 __m128i r1 = _mm_loadu_si128((const __m128i*)(src1 + x + 16));
65 r0 = op(r0,_mm_loadu_si128((const __m128i*)(src2 + x)));
66 r1 = op(r1,_mm_loadu_si128((const __m128i*)(src2 + x + 16)));
67 _mm_storeu_si128((__m128i*)(dst + x), r0);
68 _mm_storeu_si128((__m128i*)(dst + x + 16), r1);
70 for( ; x <= len - 8; x += 8 )
72 __m128i r0 = _mm_loadl_epi64((const __m128i*)(src1 + x));
73 r0 = op(r0,_mm_loadl_epi64((const __m128i*)(src2 + x)));
74 _mm_storel_epi64((__m128i*)(dst + x), r0);
81 template<typename T, class Op16> struct VBinOp16
83 int operator()(const T* src1, const T* src2, T* dst, int len) const
86 for( ; x <= len - 16; x += 16 )
88 __m128i r0 = _mm_loadu_si128((const __m128i*)(src1 + x));
89 __m128i r1 = _mm_loadu_si128((const __m128i*)(src1 + x + 8));
90 r0 = op(r0,_mm_loadu_si128((const __m128i*)(src2 + x)));
91 r1 = op(r1,_mm_loadu_si128((const __m128i*)(src2 + x + 8)));
92 _mm_storeu_si128((__m128i*)(dst + x), r0);
93 _mm_storeu_si128((__m128i*)(dst + x + 8), r1);
95 for( ; x <= len - 4; x += 4 )
97 __m128i r0 = _mm_loadl_epi64((const __m128i*)(src1 + x));
98 r0 = op(r0,_mm_loadl_epi64((const __m128i*)(src2 + x)));
99 _mm_storel_epi64((__m128i*)(dst + x), r0);
106 template<class Op32f> struct VBinOp32f
108 int operator()(const float* src1, const float* src2, float* dst, int len) const
111 if( (((size_t)src1|(size_t)src2|(size_t)dst)&15) == 0 )
112 for( ; x <= len - 8; x += 8 )
114 __m128 r0 = _mm_load_ps(src1 + x);
115 __m128 r1 = _mm_load_ps(src1 + x + 4);
116 r0 = op(r0,_mm_load_ps(src2 + x));
117 r1 = op(r1,_mm_load_ps(src2 + x + 4));
118 _mm_store_ps(dst + x, r0);
119 _mm_store_ps(dst + x + 4, r1);
122 for( ; x <= len - 8; x += 8 )
124 __m128 r0 = _mm_loadu_ps(src1 + x);
125 __m128 r1 = _mm_loadu_ps(src1 + x + 4);
126 r0 = op(r0,_mm_loadu_ps(src2 + x));
127 r1 = op(r1,_mm_loadu_ps(src2 + x + 4));
128 _mm_storeu_ps(dst + x, r0);
129 _mm_storeu_ps(dst + x + 4, r1);
136 struct _VAdd8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epu8(a,b); }};
137 struct _VSub8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epu8(a,b); }};
138 struct _VMin8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epu8(a,b); }};
139 struct _VMax8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epu8(a,b); }};
140 struct _VCmpGT8u { __m128i operator()(const __m128i& a, const __m128i& b) const
142 __m128i delta = _mm_set1_epi32(0x80808080);
143 return _mm_cmpgt_epi8(_mm_xor_si128(a,delta),_mm_xor_si128(b,delta));
145 struct _VCmpEQ8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_cmpeq_epi8(a,b); }};
148 __m128i operator()(const __m128i& a, const __m128i& b) const
149 { return _mm_add_epi8(_mm_subs_epu8(a,b),_mm_subs_epu8(b,a)); }
151 struct _VAdd16u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epu16(a,b); }};
152 struct _VSub16u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epu16(a,b); }};
155 __m128i operator()(const __m128i& a, const __m128i& b) const
156 { return _mm_subs_epu16(a,_mm_subs_epu16(a,b)); }
160 __m128i operator()(const __m128i& a, const __m128i& b) const
161 { return _mm_adds_epu16(_mm_subs_epu16(a,b),b); }
165 __m128i operator()(const __m128i& a, const __m128i& b) const
166 { return _mm_add_epi16(_mm_subs_epu16(a,b),_mm_subs_epu16(b,a)); }
168 struct _VAdd16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_adds_epi16(a,b); }};
169 struct _VSub16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_subs_epi16(a,b); }};
170 struct _VMin16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_min_epi16(a,b); }};
171 struct _VMax16s { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_max_epi16(a,b); }};
174 __m128i operator()(const __m128i& a, const __m128i& b) const
176 __m128i M = _mm_max_epi16(a,b), m = _mm_min_epi16(a,b);
177 return _mm_subs_epi16(M, m);
180 struct _VAdd32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_add_ps(a,b); }};
181 struct _VSub32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_sub_ps(a,b); }};
182 struct _VMin32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_min_ps(a,b); }};
183 struct _VMax32f { __m128 operator()(const __m128& a, const __m128& b) const { return _mm_max_ps(a,b); }};
184 static const __m128i v32f_absmask = _mm_set1_epi32(0x7fffffff);
187 __m128 operator()(const __m128& a, const __m128& b) const
189 return _mm_and_ps(_mm_sub_ps(a,b), (__m128&)v32f_absmask);
193 struct _VAnd8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_and_si128(a,b); }};
194 struct _VOr8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_or_si128(a,b); }};
195 struct _VXor8u { __m128i operator()(const __m128i& a, const __m128i& b) const { return _mm_xor_si128(a,b); }};
197 typedef VBinOp8<_VAdd8u> VAdd8u;
198 typedef VBinOp8<_VSub8u> VSub8u;
199 typedef VBinOp8<_VMin8u> VMin8u;
200 typedef VBinOp8<_VMax8u> VMax8u;
201 typedef VBinOp8<_VAbsDiff8u> VAbsDiff8u;
202 typedef VBinOp8<_VCmpEQ8u> VCmpEQ8u;
203 typedef VBinOp8<_VCmpGT8u> VCmpGT8u;
205 typedef VBinOp16<ushort, _VAdd16u> VAdd16u;
206 typedef VBinOp16<ushort, _VSub16u> VSub16u;
207 typedef VBinOp16<ushort, _VMin16u> VMin16u;
208 typedef VBinOp16<ushort, _VMax16u> VMax16u;
209 typedef VBinOp16<ushort, _VAbsDiff16u> VAbsDiff16u;
211 typedef VBinOp16<short, _VAdd16s> VAdd16s;
212 typedef VBinOp16<short, _VSub16s> VSub16s;
213 typedef VBinOp16<short, _VMin16s> VMin16s;
214 typedef VBinOp16<short, _VMax16s> VMax16s;
215 typedef VBinOp16<short, _VAbsDiff16s> VAbsDiff16s;
217 typedef VBinOp32f<_VAdd32f> VAdd32f;
218 typedef VBinOp32f<_VSub32f> VSub32f;
219 typedef VBinOp32f<_VMin32f> VMin32f;
220 typedef VBinOp32f<_VMax32f> VMax32f;
221 typedef VBinOp32f<_VAbsDiff32f> VAbsDiff32f;
223 typedef VBinOp8<_VAnd8u> VAnd8u;
224 typedef VBinOp8<_VOr8u> VOr8u;
225 typedef VBinOp8<_VXor8u> VXor8u;
229 typedef NoVec VAdd8u;
230 typedef NoVec VSub8u;
231 typedef NoVec VMin8u;
232 typedef NoVec VMax8u;
233 typedef NoVec VAbsDiff8u;
234 typedef NoVec VCmpEQ8u;
235 typedef NoVec VCmpGT8u;
237 typedef NoVec VAdd16u;
238 typedef NoVec VSub16u;
239 typedef NoVec VMin16u;
240 typedef NoVec VMax16u;
241 typedef NoVec VAbsDiff16u;
243 typedef NoVec VAdd16s;
244 typedef NoVec VSub16s;
245 typedef NoVec VMin16s;
246 typedef NoVec VMax16s;
247 typedef NoVec VAbsDiff16s;
249 typedef NoVec VAdd32f;
250 typedef NoVec VSub32f;
251 typedef NoVec VMin32f;
252 typedef NoVec VMax32f;
253 typedef NoVec VAbsDiff32f;
255 typedef NoVec VAnd8u;
257 typedef NoVec VXor8u;
261 /****************************************************************************************\
262 * logical operations *
263 \****************************************************************************************/
265 template<typename T> struct AndOp
270 T operator()( T a, T b ) const { return a & b; }
273 template<typename T> struct OrOp
278 T operator()( T a, T b ) const { return a | b; }
281 template<typename T> struct XorOp
286 T operator()( T a, T b ) const { return a ^ b; }
289 template<class OPB, class OPI, class OPV> static void
290 bitwiseOp_( const Mat& srcmat1, const Mat& srcmat2, Mat& dstmat )
292 OPB opb; OPI opi; OPV opv;
293 const uchar* src1 = srcmat1.data;
294 const uchar* src2 = srcmat2.data;
295 uchar* dst = dstmat.data;
296 size_t step1 = srcmat1.step, step2 = srcmat2.step, step = dstmat.step;
297 Size size = getContinuousSize( srcmat1, srcmat2, dstmat, (int)srcmat1.elemSize() );
299 for( ; size.height--; src1 += step1, src2 += step2, dst += step )
301 int i = opv(src1, src2, dst, size.width);
303 if( (((size_t)src1 | (size_t)src2 | (size_t)dst) & 3) == 0 )
305 for( ; i <= size.width - 16; i += 16 )
307 int t0 = opi(((const int*)(src1+i))[0], ((const int*)(src2+i))[0]);
308 int t1 = opi(((const int*)(src1+i))[1], ((const int*)(src2+i))[1]);
310 ((int*)(dst+i))[0] = t0;
311 ((int*)(dst+i))[1] = t1;
313 t0 = opi(((const int*)(src1+i))[2], ((const int*)(src2+i))[2]);
314 t1 = opi(((const int*)(src1+i))[3], ((const int*)(src2+i))[3]);
316 ((int*)(dst+i))[2] = t0;
317 ((int*)(dst+i))[3] = t1;
320 for( ; i <= size.width - 4; i += 4 )
322 int t = opi(*(const int*)(src1+i), *(const int*)(src2+i));
327 for( ; i < size.width; i++ )
328 dst[i] = opb(src1[i], src2[i]);
333 template<class OPB, class OPI, class OPV> static void
334 bitwiseSOp_( const Mat& srcmat, Mat& dstmat, const Scalar& _scalar )
336 OPB opb; OPI opi; OPV opv;
337 const uchar* src0 = srcmat.data;
338 uchar* dst0 = dstmat.data;
339 size_t step1 = srcmat.step, step = dstmat.step;
340 Size size = getContinuousSize( srcmat, dstmat, (int)srcmat.elemSize() );
341 const int delta = 96;
343 scalarToRawData(_scalar, scalar, srcmat.type(), (int)(delta/srcmat.elemSize1()) );
345 for( ; size.height--; src0 += step1, dst0 += step )
347 const uchar* src = (const uchar*)src0;
349 int i, len = size.width;
351 if( (((size_t)src|(size_t)dst) & 3) == 0 )
353 while( (len -= delta) >= 0 )
355 i = opv(src, scalar, dst, delta);
356 for( ; i < delta; i += 16 )
358 int t0 = opi(((const int*)(src+i))[0], ((const int*)(scalar+i))[0]);
359 int t1 = opi(((const int*)(src+i))[1], ((const int*)(scalar+i))[1]);
360 ((int*)(dst+i))[0] = t0;
361 ((int*)(dst+i))[1] = t1;
363 t0 = opi(((const int*)(src+i))[2], ((const int*)(scalar+i))[2]);
364 t1 = opi(((const int*)(src+i))[3], ((const int*)(scalar+i))[3]);
365 ((int*)(dst+i))[2] = t0;
366 ((int*)(dst+i))[3] = t1;
374 while( (len -= delta) >= 0 )
376 for( i = 0; i < delta; i += 4 )
378 uchar t0 = opb(src[i], scalar[i]);
379 uchar t1 = opb(src[i+1], scalar[i+1]);
380 dst[i] = t0; dst[i+1] = t1;
382 t0 = opb(src[i+2], scalar[i+2]);
383 t1 = opb(src[i+3], scalar[i+3]);
384 dst[i+2] = t0; dst[i+3] = t1;
391 for( len += delta, i = 0; i < len; i++ )
392 dst[i] = opb(src[i],scalar[i]);
397 binaryMaskOp( const Mat& src1, const Mat& src2, Mat& dst,
398 const Mat& mask, BinaryFunc func )
400 CV_Assert( src1.size() == src2.size() && src1.type() == src2.type() && func != 0 );
401 dst.create( src1.size(), src1.type() );
404 func(src1, src2, dst);
407 AutoBuffer<uchar> buf;
408 size_t esz = dst.elemSize(), buf_step = dst.cols*esz;
409 CopyMaskFunc copym_func = getCopyMaskFunc((int)esz);
412 CV_Assert(mask.type() == CV_8UC1 && mask.size() == dst.size());
413 dy = std::min(std::max((int)(CV_MAX_LOCAL_SIZE/buf_step), 1), dst.rows);
414 buf.allocate( buf_step*dy );
416 for( y = 0; y < dst.rows; y += dy )
418 dy = std::min(dy, dst.rows - y);
419 Mat dstpart = dst.rowRange(y, y + dy);
420 Mat temp(dy, dst.cols, dst.type(), (uchar*)buf );
421 func( src1.rowRange(y, y + dy), src2.rowRange(y, y + dy), temp );
422 copym_func( temp, dstpart, mask.rowRange(y, y + dy) );
429 binarySMaskOp( const Mat& src1, const Scalar& s, Mat& dst,
430 const Mat& mask, BinarySFuncCn func )
432 CV_Assert( func != 0 );
433 dst.create( src1.size(), src1.type() );
439 AutoBuffer<uchar> buf;
440 size_t esz = dst.elemSize(), buf_step = dst.cols*esz;
441 CopyMaskFunc copym_func = getCopyMaskFunc((int)esz);
444 CV_Assert(mask.type() == CV_8UC1 && mask.size() == dst.size());
445 dy = std::min(std::max((int)(CV_MAX_LOCAL_SIZE/buf_step), 1), dst.rows);
446 buf.allocate( buf_step*dy );
448 for( y = 0; y < dst.rows; y += dy )
450 dy = std::min(dy, dst.rows - y);
451 Mat dstpart = dst.rowRange(y, y + dy);
452 Mat temp(dy, dst.cols, dst.type(), (uchar*)buf);
453 func( src1.rowRange(y, y + dy), temp, s );
454 copym_func( temp, dstpart, mask.rowRange(y, y + dy) );
460 void bitwise_and(const Mat& a, const Mat& b, Mat& c, const Mat& mask)
462 binaryMaskOp(a, b, c, mask, bitwiseOp_<AndOp<uchar>, AndOp<int>, VAnd8u>);
465 void bitwise_or(const Mat& a, const Mat& b, Mat& c, const Mat& mask)
467 binaryMaskOp(a, b, c, mask, bitwiseOp_<OrOp<uchar>, OrOp<int>, VOr8u>);
470 void bitwise_xor(const Mat& a, const Mat& b, Mat& c, const Mat& mask)
472 binaryMaskOp(a, b, c, mask, bitwiseOp_<XorOp<uchar>, XorOp<int>, VXor8u>);
475 void bitwise_and(const Mat& a, const Scalar& s, Mat& c, const Mat& mask)
477 binarySMaskOp(a, s, c, mask,
478 bitwiseSOp_<AndOp<uchar>, AndOp<int>, VAnd8u>);
481 void bitwise_or(const Mat& a, const Scalar& s, Mat& c, const Mat& mask)
483 binarySMaskOp(a, s, c, mask,
484 bitwiseSOp_<OrOp<uchar>, OrOp<int>, VOr8u>);
487 void bitwise_xor(const Mat& a, const Scalar& s, Mat& c, const Mat& mask)
489 binarySMaskOp(a, s, c, mask,
490 bitwiseSOp_<XorOp<uchar>, XorOp<int>, VXor8u>);
494 void bitwise_not(const Mat& src, Mat& dst)
496 const uchar* sptr = src.data;
497 uchar* dptr = dst.data;
498 dst.create( src.size(), src.type() );
499 Size size = getContinuousSize( src, dst, (int)src.elemSize() );
501 for( ; size.height--; sptr += src.step, dptr += dst.step )
504 if( (((size_t)sptr | (size_t)dptr) & 3) == 0 )
506 for( ; i <= size.width - 16; i += 16 )
508 int t0 = ~((const int*)(sptr+i))[0];
509 int t1 = ~((const int*)(sptr+i))[1];
511 ((int*)(dptr+i))[0] = t0;
512 ((int*)(dptr+i))[1] = t1;
514 t0 = ~((const int*)(sptr+i))[2];
515 t1 = ~((const int*)(sptr+i))[3];
517 ((int*)(dptr+i))[2] = t0;
518 ((int*)(dptr+i))[3] = t1;
521 for( ; i <= size.width - 4; i += 4 )
522 *(int*)(dptr+i) = ~*(const int*)(sptr+i);
525 for( ; i < size.width; i++ )
527 dptr[i] = (uchar)(~sptr[i]);
532 /****************************************************************************************\
534 \****************************************************************************************/
536 template<> inline uchar OpAdd<uchar>::operator ()(uchar a, uchar b) const
537 { return CV_FAST_CAST_8U(a + b); }
538 template<> inline uchar OpSub<uchar>::operator ()(uchar a, uchar b) const
539 { return CV_FAST_CAST_8U(a - b); }
541 static BinaryFunc addTab[] =
543 binaryOpC1_<OpAdd<uchar>,VAdd8u>, 0,
544 binaryOpC1_<OpAdd<ushort>,VAdd16u>,
545 binaryOpC1_<OpAdd<short>,VAdd16s>,
546 binaryOpC1_<OpAdd<int>,NoVec>,
547 binaryOpC1_<OpAdd<float>,VAdd32f>,
548 binaryOpC1_<OpAdd<double>,NoVec>, 0
551 static BinaryFunc subTab[] =
553 binaryOpC1_<OpSub<uchar>,VSub8u>, 0,
554 binaryOpC1_<OpSub<ushort>,VSub16u>,
555 binaryOpC1_<OpSub<short>,VSub16s>,
556 binaryOpC1_<OpSub<int>,NoVec>,
557 binaryOpC1_<OpSub<float>,VSub32f>,
558 binaryOpC1_<OpSub<double>,NoVec>, 0
562 void add( const Mat& src1, const Mat& src2, Mat& dst )
564 Size size = src1.size(); int type = src1.type();
565 BinaryFunc func = addTab[CV_MAT_DEPTH(type)];
566 CV_Assert( size == src2.size() && type == src2.type() && func != 0 );
567 dst.create( size, type );
568 func(src1, src2, dst);
571 void subtract( const Mat& src1, const Mat& src2, Mat& dst )
573 Size size = src1.size(); int type = src1.type();
574 BinaryFunc func = subTab[CV_MAT_DEPTH(type)];
575 CV_Assert( size == src2.size() && type == src2.type() && func != 0 );
576 dst.create( size, type );
577 func(src1, src2, dst);
580 void subtract(const Mat& a, const Scalar& s, Mat& c, const Mat& mask)
585 void add(const Mat& src1, const Mat& src2, Mat& dst, const Mat& mask)
587 binaryMaskOp(src1, src2, dst, mask, addTab[src1.depth()] );
590 void subtract(const Mat& src1, const Mat& src2, Mat& dst, const Mat& mask)
592 binaryMaskOp(src1, src2, dst, mask, subTab[src1.depth()] );
595 void add(const Mat& src1, const Scalar& s, Mat& dst, const Mat& mask)
597 static BinarySFuncCn addSTab[] =
599 binarySOpCn_<OpAdd<uchar, int, uchar> >, 0,
600 binarySOpCn_<OpAdd<ushort, int, ushort> >,
601 binarySOpCn_<OpAdd<short, int, short> >,
602 binarySOpCn_<OpAdd<int> >,
603 binarySOpCn_<OpAdd<float> >,
604 binarySOpCn_<OpAdd<double> >, 0
606 int depth = src1.depth();
607 binarySMaskOp(src1, s, dst, mask, addSTab[depth]);
610 void subtract(const Scalar& s, const Mat& src1, Mat& dst, const Mat& mask)
612 static BinarySFuncCn rsubSTab[] =
614 binarySOpCn_<OpRSub<uchar, int, uchar> >, 0,
615 binarySOpCn_<OpRSub<ushort, int, ushort> >,
616 binarySOpCn_<OpRSub<short, int, short> >,
617 binarySOpCn_<OpRSub<int> >,
618 binarySOpCn_<OpRSub<float> >,
619 binarySOpCn_<OpRSub<double> >, 0
621 int depth = src1.depth();
622 binarySMaskOp(src1, s, dst, mask, rsubSTab[depth]);
625 /****************************************************************************************\
627 \****************************************************************************************/
629 template<typename T, typename WT> static void
630 mul_( const Mat& srcmat1, const Mat& srcmat2, Mat& dstmat, double _scale )
632 const T* src1 = (const T*)srcmat1.data;
633 const T* src2 = (const T*)srcmat2.data;
634 T* dst = (T*)dstmat.data;
635 size_t step1 = srcmat1.step/sizeof(src1[0]);
636 size_t step2 = srcmat2.step/sizeof(src2[0]);
637 size_t step = dstmat.step/sizeof(dst[0]);
638 Size size = getContinuousSize( srcmat1, srcmat2, dstmat, dstmat.channels() );
640 if( fabs(_scale - 1.) < DBL_EPSILON )
642 for( ; size.height--; src1+=step1, src2+=step2, dst+=step )
645 for( i = 0; i <= size.width - 4; i += 4 )
647 T t0 = saturate_cast<T>(src1[i] * src2[i]);
648 T t1 = saturate_cast<T>(src1[i+1] * src2[i+1]);
649 dst[i] = t0; dst[i+1] = t1;
651 t0 = saturate_cast<T>(src1[i+2] * src2[i+2]);
652 t1 = saturate_cast<T>(src1[i+3] * src2[i+3]);
653 dst[i+2] = t0; dst[i+3] = t1;
656 for( ; i < size.width; i++ )
657 dst[i] = saturate_cast<T>(src1[i] * src2[i]);
662 WT scale = (WT)_scale;
663 for( ; size.height--; src1+=step1, src2+=step2, dst+=step )
666 for( i = 0; i <= size.width - 4; i += 4 )
668 T t0 = saturate_cast<T>(scale*(WT)src1[i]*src2[i]);
669 T t1 = saturate_cast<T>(scale*(WT)src1[i+1]*src2[i+1]);
670 dst[i] = t0; dst[i+1] = t1;
672 t0 = saturate_cast<T>(scale*(WT)src1[i+2]*src2[i+2]);
673 t1 = saturate_cast<T>(scale*(WT)src1[i+3]*src2[i+3]);
674 dst[i+2] = t0; dst[i+3] = t1;
677 for( ; i < size.width; i++ )
678 dst[i] = saturate_cast<T>(scale*(WT)src1[i]*src2[i]);
683 typedef void (*MulDivFunc)( const Mat& src1, const Mat& src2,
684 Mat& dst, double scale );
686 void multiply(const Mat& src1, const Mat& src2, Mat& dst, double scale)
688 static MulDivFunc tab[] =
690 mul_<uchar, float>, 0, mul_<ushort, float>, mul_<short, float>,
691 mul_<int, double>, mul_<float, float>, mul_<double, double>, 0
694 MulDivFunc func = tab[src1.depth()];
695 CV_Assert( src1.size() == src2.size() && src1.type() == src2.type() && func != 0 );
696 dst.create( src1.size(), src1.type() );
697 func( src1, src2, dst, scale );
701 template<typename T> static void
702 div_( const Mat& srcmat1, const Mat& srcmat2, Mat& dstmat, double scale )
704 const T* src1 = (const T*)srcmat1.data;
705 const T* src2 = (const T*)srcmat2.data;
706 T* dst = (T*)dstmat.data;
707 size_t step1 = srcmat1.step/sizeof(src1[0]);
708 size_t step2 = srcmat2.step/sizeof(src2[0]);
709 size_t step = dstmat.step/sizeof(dst[0]);
710 Size size = getContinuousSize( srcmat1, srcmat2, dstmat, dstmat.channels() );
712 for( ; size.height--; src1+=step1, src2+=step2, dst+=step )
715 for( ; i <= size.width - 4; i += 4 )
717 if( src2[i] != 0 && src2[i+1] != 0 && src2[i+2] != 0 && src2[i+3] != 0 )
719 double a = (double)src2[i] * src2[i+1];
720 double b = (double)src2[i+2] * src2[i+3];
721 double d = scale/(a * b);
725 T z0 = saturate_cast<T>(src2[i+1] * src1[i] * b);
726 T z1 = saturate_cast<T>(src2[i] * src1[i+1] * b);
727 T z2 = saturate_cast<T>(src2[i+3] * src1[i+2] * a);
728 T z3 = saturate_cast<T>(src2[i+2] * src1[i+3] * a);
730 dst[i] = z0; dst[i+1] = z1;
731 dst[i+2] = z2; dst[i+3] = z3;
735 T z0 = src2[i] != 0 ? saturate_cast<T>(src1[i]*scale/src2[i]) : 0;
736 T z1 = src2[i+1] != 0 ? saturate_cast<T>(src1[i+1]*scale/src2[i+1]) : 0;
737 T z2 = src2[i+2] != 0 ? saturate_cast<T>(src1[i+2]*scale/src2[i+2]) : 0;
738 T z3 = src2[i+3] != 0 ? saturate_cast<T>(src1[i+3]*scale/src2[i+3]) : 0;
740 dst[i] = z0; dst[i+1] = z1;
741 dst[i+2] = z2; dst[i+3] = z3;
745 for( ; i < size.width; i++ )
746 dst[i] = src2[i] != 0 ? saturate_cast<T>(src1[i]*scale/src2[i]) : 0;
751 void divide(const Mat& src1, const Mat& src2, Mat& dst, double scale)
753 static MulDivFunc tab[] =
755 div_<uchar>, 0, div_<ushort>, div_<short>,
756 div_<int>, div_<float>, div_<double>, 0
759 MulDivFunc func = tab[src1.depth()];
760 CV_Assert( src1.size() == src2.size() && src1.type() == src2.type() && func != 0 );
761 dst.create( src1.size(), src1.type() );
762 func( src1, src2, dst, scale );
765 template<typename T> static void
766 recip_( double scale, const Mat& srcmat2, Mat& dstmat )
768 const T* src2 = (const T*)srcmat2.data;
769 T* dst = (T*)dstmat.data;
770 size_t step2 = srcmat2.step/sizeof(src2[0]);
771 size_t step = dstmat.step/sizeof(dst[0]);
772 Size size = getContinuousSize( srcmat2, dstmat, dstmat.channels() );
774 for( ; size.height--; src2+=step2, dst+=step )
777 for( ; i <= size.width - 4; i += 4 )
779 if( src2[i] != 0 && src2[i+1] != 0 && src2[i+2] != 0 && src2[i+3] != 0 )
781 double a = (double)src2[i] * src2[i+1];
782 double b = (double)src2[i+2] * src2[i+3];
783 double d = scale/(a * b);
787 T z0 = saturate_cast<T>(src2[i+1] * b);
788 T z1 = saturate_cast<T>(src2[i] * b);
789 T z2 = saturate_cast<T>(src2[i+3] * a);
790 T z3 = saturate_cast<T>(src2[i+2] * a);
792 dst[i] = z0; dst[i+1] = z1;
793 dst[i+2] = z2; dst[i+3] = z3;
797 T z0 = src2[i] != 0 ? saturate_cast<T>(scale/src2[i]) : 0;
798 T z1 = src2[i+1] != 0 ? saturate_cast<T>(scale/src2[i+1]) : 0;
799 T z2 = src2[i+2] != 0 ? saturate_cast<T>(scale/src2[i+2]) : 0;
800 T z3 = src2[i+3] != 0 ? saturate_cast<T>(scale/src2[i+3]) : 0;
802 dst[i] = z0; dst[i+1] = z1;
803 dst[i+2] = z2; dst[i+3] = z3;
807 for( ; i < size.width; i++ )
808 dst[i] = src2[i] != 0 ? saturate_cast<T>(scale/src2[i]) : 0;
812 typedef void (*RecipFunc)( double scale, const Mat& src, Mat& dst );
814 void divide(double scale, const Mat& src, Mat& dst)
816 static RecipFunc tab[] =
818 recip_<uchar>, 0, recip_<ushort>, recip_<short>,
819 recip_<int>, recip_<float>, recip_<double>, 0
822 RecipFunc func = tab[src.depth()];
823 CV_Assert( func != 0 );
824 dst.create( src.size(), src.type() );
825 func( scale, src, dst );
828 /****************************************************************************************\
830 \****************************************************************************************/
832 template<typename T, typename WT> static void
833 addWeighted_( const Mat& srcmat1, double _alpha, const Mat& srcmat2,
834 double _beta, double _gamma, Mat& dstmat )
836 const T* src1 = (const T*)srcmat1.data;
837 const T* src2 = (const T*)srcmat2.data;
838 T* dst = (T*)dstmat.data;
839 size_t step1 = srcmat1.step/sizeof(src1[0]);
840 size_t step2 = srcmat2.step/sizeof(src2[0]);
841 size_t step = dstmat.step/sizeof(dst[0]);
842 Size size = getContinuousSize( srcmat1, srcmat2, dstmat, dstmat.channels() );
843 WT alpha = (WT)_alpha, beta = (WT)_beta, gamma = (WT)_gamma;
845 for( ; size.height--; src1+=step1, src2+=step2, dst+=step )
848 for( ; i <= size.width - 4; i += 4 )
850 T t0 = saturate_cast<T>(src1[i]*alpha + src2[i]*beta + gamma);
851 T t1 = saturate_cast<T>(src1[i+1]*alpha + src2[i+1]*beta + gamma);
852 dst[i] = t0; dst[i+1] = t1;
854 t0 = saturate_cast<T>(src1[i+2]*alpha + src2[i+2]*beta + gamma);
855 t1 = saturate_cast<T>(src1[i+3]*alpha + src2[i+3]*beta + gamma);
856 dst[i+2] = t0; dst[i+3] = t1;
859 for( ; i < size.width; i++ )
860 dst[i] = saturate_cast<T>(src1[i]*alpha + src2[i]*beta + gamma);
866 addWeighted8u( const Mat& srcmat1, double alpha,
867 const Mat& srcmat2, double beta,
868 double gamma, Mat& dstmat )
870 const int shift = 14;
871 if( srcmat1.rows*srcmat1.cols*srcmat1.channels() <= 256 ||
872 fabs(alpha) > 256 || fabs(beta) > 256 || fabs(gamma) > 256*256 )
874 addWeighted_<uchar, float>(srcmat1, alpha, srcmat2, beta, gamma, dstmat);
877 const uchar* src1 = srcmat1.data;
878 const uchar* src2 = srcmat2.data;
879 uchar* dst = dstmat.data;
880 size_t step1 = srcmat1.step;
881 size_t step2 = srcmat2.step;
882 size_t step = dstmat.step;
883 Size size = getContinuousSize( srcmat1, srcmat2, dstmat, dstmat.channels() );
885 int tab1[256], tab2[256];
887 int j, t0, t1, t2, t3;
890 gamma = gamma*(1 << shift) + (1 << (shift - 1));
893 for( j = 0; j < 256; j++ )
895 tab1[j] = cvRound(t);
896 tab2[j] = cvRound(gamma);
901 t0 = (tab1[0] + tab2[0]) >> shift;
902 t1 = (tab1[0] + tab2[255]) >> shift;
903 t2 = (tab1[255] + tab2[0]) >> shift;
904 t3 = (tab1[255] + tab2[255]) >> shift;
906 if( (unsigned)(t0+256) < 768 && (unsigned)(t1+256) < 768 &&
907 (unsigned)(t2+256) < 768 && (unsigned)(t3+256) < 768 )
909 // use faster table-based convertion back to 8u
910 for( ; size.height--; src1 += step1, src2 += step2, dst += step )
914 for( i = 0; i <= size.width - 4; i += 4 )
916 t0 = CV_FAST_CAST_8U((tab1[src1[i]] + tab2[src2[i]]) >> shift);
917 t1 = CV_FAST_CAST_8U((tab1[src1[i+1]] + tab2[src2[i+1]]) >> shift);
920 dst[i+1] = (uchar)t1;
922 t0 = CV_FAST_CAST_8U((tab1[src1[i+2]] + tab2[src2[i+2]]) >> shift);
923 t1 = CV_FAST_CAST_8U((tab1[src1[i+3]] + tab2[src2[i+3]]) >> shift);
925 dst[i+2] = (uchar)t0;
926 dst[i+3] = (uchar)t1;
929 for( ; i < size.width; i++ )
931 t0 = CV_FAST_CAST_8U((tab1[src1[i]] + tab2[src2[i]]) >> shift);
938 // use universal macro for convertion back to 8u
939 for( ; size.height--; src1 += step1, src2 += step2, dst += step )
943 for( i = 0; i <= size.width - 4; i += 4 )
945 t0 = (tab1[src1[i]] + tab2[src2[i]]) >> shift;
946 t1 = (tab1[src1[i+1]] + tab2[src2[i+1]]) >> shift;
948 dst[i] = CV_CAST_8U( t0 );
949 dst[i+1] = CV_CAST_8U( t1 );
951 t0 = (tab1[src1[i+2]] + tab2[src2[i+2]]) >> shift;
952 t1 = (tab1[src1[i+3]] + tab2[src2[i+3]]) >> shift;
954 dst[i+2] = CV_CAST_8U( t0 );
955 dst[i+3] = CV_CAST_8U( t1 );
958 for( ; i < size.width; i++ )
960 t0 = (tab1[src1[i]] + tab2[src2[i]]) >> shift;
961 dst[i] = CV_CAST_8U( t0 );
967 typedef void (*AddWeightedFunc)( const Mat& src1, double alpha, const Mat& src2,
968 double beta, double gamma, Mat& dst );
970 void addWeighted( const Mat& src1, double alpha, const Mat& src2,
971 double beta, double gamma, Mat& dst )
973 static AddWeightedFunc tab[]=
975 addWeighted8u, 0, addWeighted_<ushort, float>, addWeighted_<short, float>,
976 addWeighted_<int, double>, addWeighted_<float, float>, addWeighted_<double, double>, 0
979 AddWeightedFunc func = tab[src1.depth()];
980 CV_Assert( src1.size() == src2.size() && src1.type() == src2.type() && func != 0 );
981 dst.create( src1.size(), src1.type() );
982 func( src1, alpha, src2, beta, gamma, dst );
986 /****************************************************************************************\
988 \****************************************************************************************/
990 template<typename T> struct OpAbsDiff
995 T operator()(T a, T b) { return (T)std::abs(a - b); }
998 template<> inline short OpAbsDiff<short>::operator ()(short a, short b)
999 { return saturate_cast<short>(std::abs(a - b)); }
1001 template<typename T, typename WT=T> struct OpAbsDiffS
1006 T operator()(T a, WT b) { return saturate_cast<T>(std::abs(a - b)); }
1009 void absdiff( const Mat& src1, const Mat& src2, Mat& dst )
1011 static BinaryFunc tab[] =
1013 binaryOpC1_<OpAbsDiff<uchar>,VAbsDiff8u>, 0,
1014 binaryOpC1_<OpAbsDiff<ushort>,VAbsDiff16u>,
1015 binaryOpC1_<OpAbsDiff<short>,VAbsDiff16s>,
1016 binaryOpC1_<OpAbsDiff<int>,NoVec>,
1017 binaryOpC1_<OpAbsDiff<float>,VAbsDiff32f>,
1018 binaryOpC1_<OpAbsDiff<double>,NoVec>, 0
1021 dst.create(src1.size(), src1.type());
1022 BinaryFunc func = tab[src1.depth()];
1023 CV_Assert(src1.size() == src2.size() && src1.type() == src2.type() && func != 0);
1024 func( src1, src2, dst );
1028 void absdiff( const Mat& src1, const Scalar& s, Mat& dst )
1030 static BinarySFuncCn tab[] =
1032 binarySOpCn_<OpAbsDiffS<uchar, int> >, 0,
1033 binarySOpCn_<OpAbsDiffS<ushort, int> >,
1034 binarySOpCn_<OpAbsDiffS<short, int> >,
1035 binarySOpCn_<OpAbsDiffS<int> >,
1036 binarySOpCn_<OpAbsDiffS<float> >,
1037 binarySOpCn_<OpAbsDiffS<double> >, 0
1040 dst.create(src1.size(), src1.type());
1041 BinarySFuncCn func = tab[src1.depth()];
1042 CV_Assert(src1.channels() <= 4 && func != 0);
1043 func( src1, dst, s );
1046 /****************************************************************************************\
1048 \****************************************************************************************/
1050 template<typename T, typename WT> struct InRangeC1
1054 uchar operator()(xtype x, btype a, btype b) const
1055 { return (uchar)-(a <= x && x < b); }
1058 template<typename T, typename WT> struct InRangeC2
1060 typedef Vec<T,2> xtype;
1061 typedef Vec<WT,2> btype;
1062 uchar operator()(const xtype& x, const btype& a, const btype& b) const
1064 return (uchar)-(a[0] <= x[0] && x[0] < b[0] &&
1065 a[1] <= x[1] && x[1] < b[1]);
1069 template<typename T, typename WT> struct InRangeC3
1071 typedef Vec<T,3> xtype;
1072 typedef Vec<WT,3> btype;
1073 uchar operator()(const xtype& x, const btype& a, const btype& b) const
1075 return (uchar)-(a[0] <= x[0] && x[0] < b[0] &&
1076 a[1] <= x[1] && x[1] < b[1] &&
1077 a[2] <= x[2] && x[2] < b[2]);
1081 template<typename T, typename WT> struct InRangeC4
1083 typedef Vec<T,4> xtype;
1084 typedef Vec<WT,4> btype;
1085 uchar operator()(const xtype& x, const btype& a, const btype& b) const
1087 return (uchar)-(a[0] <= x[0] && x[0] < b[0] &&
1088 a[1] <= x[1] && x[1] < b[1] &&
1089 a[2] <= x[2] && x[2] < b[2] &&
1090 a[3] <= x[3] && x[3] < b[3]);
1094 template<class Op> static void
1095 inRange_( const Mat& srcmat1, const Mat& srcmat2, const Mat& srcmat3, Mat& dstmat )
1098 uchar* dst = dstmat.data;
1099 size_t dstep = dstmat.step;
1100 Size size = getContinuousSize( srcmat1, srcmat2, srcmat3, dstmat );
1102 for( int y = 0; y < size.height; y++, dst += dstep )
1104 const typename Op::xtype* src1 = (const typename Op::xtype*)(srcmat1.data + srcmat1.step*y);
1105 const typename Op::xtype* src2 = (const typename Op::xtype*)(srcmat2.data + srcmat2.step*y);
1106 const typename Op::xtype* src3 = (const typename Op::xtype*)(srcmat3.data + srcmat3.step*y);
1107 for( int x = 0; x < size.width; x++ )
1108 dst[x] = op( src1[x], src2[x], src3[x] );
1112 template<class Op> static void
1113 inRangeS_( const Mat& srcmat1, const Scalar& _a, const Scalar& _b, Mat& dstmat )
1116 typedef typename Op::btype WT;
1117 typedef typename DataType<WT>::channel_type WT1;
1119 uchar* dst = dstmat.data;
1120 size_t dstep = dstmat.step;
1121 Size size = getContinuousSize( srcmat1, dstmat );
1122 int cn = srcmat1.channels();
1123 _a.convertTo((WT1*)&a, cn);
1124 _b.convertTo((WT1*)&b, cn);
1126 for( int y = 0; y < size.height; y++, dst += dstep )
1128 const typename Op::xtype* src1 = (const typename Op::xtype*)(srcmat1.data + srcmat1.step*y);
1129 for( int x = 0; x < size.width; x++ )
1130 dst[x] = op( src1[x], a, b );
1134 typedef void (*InRangeFunc)( const Mat& src1, const Mat& src2, const Mat& src3, Mat& dst );
1135 typedef void (*InRangeSFunc)( const Mat& src1, const Scalar& a, const Scalar& b, Mat& dst );
1137 void inRange(const Mat& src, const Mat& lowerb,
1138 const Mat& upperb, Mat& dst)
1140 static InRangeFunc tab[] =
1142 inRange_<InRangeC1<uchar, uchar> >, 0,
1143 inRange_<InRangeC1<ushort, ushort> >,
1144 inRange_<InRangeC1<short, short> >,
1145 inRange_<InRangeC1<int, int> >,
1146 inRange_<InRangeC1<float, float> >,
1147 inRange_<InRangeC1<double, double> >, 0,
1149 inRange_<InRangeC2<uchar, uchar> >, 0,
1150 inRange_<InRangeC2<ushort, ushort> >,
1151 inRange_<InRangeC2<short, short> >,
1152 inRange_<InRangeC2<int, int> >,
1153 inRange_<InRangeC2<float, float> >,
1154 inRange_<InRangeC2<double, double> >, 0,
1156 inRange_<InRangeC3<uchar, uchar> >, 0,
1157 inRange_<InRangeC3<ushort, ushort> >,
1158 inRange_<InRangeC3<short, short> >,
1159 inRange_<InRangeC3<int, int> >,
1160 inRange_<InRangeC3<float, float> >,
1161 inRange_<InRangeC3<double, double> >, 0,
1163 inRange_<InRangeC4<uchar, uchar> >, 0,
1164 inRange_<InRangeC4<ushort, ushort> >,
1165 inRange_<InRangeC4<short, short> >,
1166 inRange_<InRangeC4<int, int> >,
1167 inRange_<InRangeC4<float, float> >,
1168 inRange_<InRangeC4<double, double> >, 0
1171 CV_Assert( src.size() == lowerb.size() && src.size() == upperb.size() &&
1172 src.type() == lowerb.type() && src.type() == upperb.type() &&
1173 src.channels() <= 4 );
1175 InRangeFunc func = tab[src.type()];
1176 CV_Assert( func != 0 );
1178 dst.create(src.size(), CV_8U);
1179 func( src, lowerb, upperb, dst );
1182 void inRange(const Mat& src, const Scalar& lowerb,
1183 const Scalar& upperb, Mat& dst)
1185 static InRangeSFunc tab[] =
1187 inRangeS_<InRangeC1<uchar, int> >, 0,
1188 inRangeS_<InRangeC1<ushort, int> >,
1189 inRangeS_<InRangeC1<short, int> >,
1190 inRangeS_<InRangeC1<int, int> >,
1191 inRangeS_<InRangeC1<float, float> >,
1192 inRangeS_<InRangeC1<double, double> >, 0,
1194 inRangeS_<InRangeC2<uchar, int> >, 0,
1195 inRangeS_<InRangeC2<ushort, int> >,
1196 inRangeS_<InRangeC2<short, int> >,
1197 inRangeS_<InRangeC2<int, int> >,
1198 inRangeS_<InRangeC2<float, float> >,
1199 inRangeS_<InRangeC2<double, double> >, 0,
1201 inRangeS_<InRangeC3<uchar, int> >, 0,
1202 inRangeS_<InRangeC3<ushort, int> >,
1203 inRangeS_<InRangeC3<short, int> >,
1204 inRangeS_<InRangeC3<int, int> >,
1205 inRangeS_<InRangeC3<float, float> >,
1206 inRangeS_<InRangeC3<double, double> >, 0,
1208 inRangeS_<InRangeC4<uchar, int> >, 0,
1209 inRangeS_<InRangeC4<ushort, int> >,
1210 inRangeS_<InRangeC4<short, int> >,
1211 inRangeS_<InRangeC4<int, int> >,
1212 inRangeS_<InRangeC4<float, float> >,
1213 inRangeS_<InRangeC4<double, double> >, 0
1216 CV_Assert( src.channels() <= 4 );
1218 InRangeSFunc func = tab[src.type()];
1219 CV_Assert( func != 0 );
1221 dst.create(src.size(), CV_8U);
1222 func( src, lowerb, upperb, dst );
1225 /****************************************************************************************\
1227 \****************************************************************************************/
1229 template<typename T, typename WT=T> struct CmpEQ
1233 typedef uchar rtype;
1234 uchar operator()(T a, WT b) const { return (uchar)-(a == b); }
1237 template<typename T, typename WT=T> struct CmpGT
1241 typedef uchar rtype;
1242 uchar operator()(T a, WT b) const { return (uchar)-(a > b); }
1245 template<typename T, typename WT=T> struct CmpGE
1249 typedef uchar rtype;
1250 uchar operator()(T a, WT b) const { return (uchar)-(a >= b); }
1253 void compare( const Mat& src1, const Mat& src2, Mat& dst, int cmpOp )
1255 static BinaryFunc tab[][8] =
1257 {binaryOpC1_<CmpGT<uchar>,VCmpGT8u>, 0,
1258 binaryOpC1_<CmpGT<ushort>,NoVec>,
1259 binaryOpC1_<CmpGT<short>,NoVec>,
1260 binaryOpC1_<CmpGT<int>,NoVec>,
1261 binaryOpC1_<CmpGT<float>,NoVec>,
1262 binaryOpC1_<CmpGT<double>,NoVec>, 0},
1264 {binaryOpC1_<CmpEQ<uchar>,VCmpEQ8u>, 0,
1265 binaryOpC1_<CmpEQ<ushort>,NoVec>,
1266 binaryOpC1_<CmpEQ<ushort>,NoVec>, // same function as for ushort's
1267 binaryOpC1_<CmpEQ<int>,NoVec>,
1268 binaryOpC1_<CmpEQ<float>,NoVec>,
1269 binaryOpC1_<CmpEQ<double>,NoVec>, 0},
1272 dst.create(src1.rows, src1.cols, CV_8U);
1273 CV_Assert(src1.size() == src2.size() && src1.type() == src2.type() && src1.channels() == 1);
1275 int depth = src1.depth();
1276 const Mat *psrc1 = &src1, *psrc2 = &src2;
1277 bool invflag = false;
1285 std::swap( psrc1, psrc2 );
1289 std::swap( psrc1, psrc2 );
1299 CV_Error(CV_StsBadArg, "Unknown comparison method");
1302 BinaryFunc func = tab[cmpOp == CMP_EQ][depth];
1303 CV_Assert( func != 0 );
1304 func( *psrc1, *psrc2, dst );
1306 bitwise_not(dst, dst);
1310 void compare( const Mat& src1, double value, Mat& dst, int cmpOp )
1312 static BinarySFuncC1 tab[][8] =
1314 {binarySOpC1_<CmpEQ<uchar, int> >, 0,
1315 binarySOpC1_<CmpEQ<ushort, int> >,
1316 binarySOpC1_<CmpEQ<short, int> >,
1317 binarySOpC1_<CmpEQ<int> >,
1318 binarySOpC1_<CmpEQ<float> >,
1319 binarySOpC1_<CmpEQ<double> >, 0},
1321 {binarySOpC1_<CmpGT<uchar, int> >, 0,
1322 binarySOpC1_<CmpGT<ushort, int> >,
1323 binarySOpC1_<CmpGT<short, int> >,
1324 binarySOpC1_<CmpGT<int> >,
1325 binarySOpC1_<CmpGT<float> >,
1326 binarySOpC1_<CmpGT<double> >, 0},
1328 {binarySOpC1_<CmpGE<uchar, int> >, 0,
1329 binarySOpC1_<CmpGE<ushort, int> >,
1330 binarySOpC1_<CmpGE<short, int> >,
1331 binarySOpC1_<CmpGE<int> >,
1332 binarySOpC1_<CmpGE<float> >,
1333 binarySOpC1_<CmpGE<double> >, 0},
1336 dst.create(src1.rows, src1.cols, CV_8U);
1337 CV_Assert(src1.channels() == 1);
1338 int depth = src1.depth();
1339 bool invflag = false;
1360 CV_Error(CV_StsBadArg, "Unknown comparison method");
1363 BinarySFuncC1 func = tab[cmpOp == CMP_EQ ? 0 : cmpOp == CMP_GT ? 1 : 2][depth];
1364 CV_Assert( func != 0 );
1365 func( src1, dst, value );
1367 bitwise_not(dst, dst);
1370 /****************************************************************************************\
1372 \****************************************************************************************/
1374 template<typename T> struct MinOp
1379 T operator ()(T a, T b) const { return std::min(a, b); }
1382 template<typename T> struct MaxOp
1387 T operator ()(T a, T b) const { return std::max(a, b); }
1390 template<> inline uchar MinOp<uchar>::operator ()(uchar a, uchar b) const { return CV_MIN_8U(a, b); }
1391 template<> inline uchar MaxOp<uchar>::operator ()(uchar a, uchar b) const { return CV_MAX_8U(a, b); }
1393 void min( const Mat& src1, const Mat& src2, Mat& dst )
1395 static BinaryFunc tab[] =
1397 binaryOpC1_<MinOp<uchar>,VMin8u>, 0, binaryOpC1_<MinOp<ushort>,VMin16u>,
1398 binaryOpC1_<MinOp<short>,VMin16s>, binaryOpC1_<MinOp<int>,NoVec>,
1399 binaryOpC1_<MinOp<float>,VMin32f>, binaryOpC1_<MinOp<double>,NoVec>, 0
1402 BinaryFunc func = tab[src1.depth()];
1403 CV_Assert(src1.size() == src2.size() && src1.type() == src2.type() && func != 0);
1404 dst.create(src1.size(), src1.type());
1406 return func( src1, src2, dst );
1409 void max( const Mat& src1, const Mat& src2, Mat& dst )
1411 static BinaryFunc tab[] =
1413 binaryOpC1_<MaxOp<uchar>,VMax8u>, 0, binaryOpC1_<MaxOp<ushort>,VMax16u>,
1414 binaryOpC1_<MaxOp<short>,VMax16s>, binaryOpC1_<MaxOp<int>,NoVec>,
1415 binaryOpC1_<MaxOp<float>,VMax32f>, binaryOpC1_<MaxOp<double>,NoVec>, 0
1418 BinaryFunc func = tab[src1.depth()];
1419 CV_Assert(src1.size() == src2.size() && src1.type() == src2.type() && func != 0);
1420 dst.create(src1.size(), src1.type());
1422 return func( src1, src2, dst );
1425 void min( const Mat& src1, double value, Mat& dst )
1427 static BinarySFuncC1 tab[] =
1429 binarySOpC1_<MinOp<uchar> >, 0,
1430 binarySOpC1_<MinOp<ushort> >,
1431 binarySOpC1_<MinOp<short> >,
1432 binarySOpC1_<MinOp<int> >,
1433 binarySOpC1_<MinOp<float> >,
1434 binarySOpC1_<MinOp<double> >, 0
1437 BinarySFuncC1 func = tab[src1.depth()];
1438 CV_Assert(func != 0);
1439 dst.create(src1.size(), src1.type());
1440 return func( src1, dst, value );
1443 void max( const Mat& src1, double value, Mat& dst )
1445 static BinarySFuncC1 tab[] =
1447 binarySOpC1_<MaxOp<uchar> >, 0,
1448 binarySOpC1_<MaxOp<ushort> >,
1449 binarySOpC1_<MaxOp<short> >,
1450 binarySOpC1_<MaxOp<int> >,
1451 binarySOpC1_<MaxOp<float> >,
1452 binarySOpC1_<MaxOp<double> >, 0
1455 BinarySFuncC1 func = tab[src1.depth()];
1456 CV_Assert(func != 0);
1457 dst.create(src1.size(), src1.type());
1458 return func( src1, dst, value );
1463 /****************************************************************************************\
1464 * Earlier API: cvAdd etc. *
1465 \****************************************************************************************/
1468 cvNot( const CvArr* srcarr, CvArr* dstarr )
1470 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1471 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1472 cv::bitwise_not( src, dst );
1477 cvAnd( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
1479 cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
1480 dst = cv::cvarrToMat(dstarr), mask;
1481 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1483 mask = cv::cvarrToMat(maskarr);
1484 cv::bitwise_and( src1, src2, dst, mask );
1488 cvOr( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
1490 cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
1491 dst = cv::cvarrToMat(dstarr), mask;
1492 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1494 mask = cv::cvarrToMat(maskarr);
1495 cv::bitwise_or( src1, src2, dst, mask );
1500 cvXor( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
1502 cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
1503 dst = cv::cvarrToMat(dstarr), mask;
1504 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1506 mask = cv::cvarrToMat(maskarr);
1507 cv::bitwise_xor( src1, src2, dst, mask );
1512 cvAndS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
1514 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
1515 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1517 mask = cv::cvarrToMat(maskarr);
1518 cv::bitwise_and( src, s, dst, mask );
1523 cvOrS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
1525 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
1526 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1528 mask = cv::cvarrToMat(maskarr);
1529 cv::bitwise_or( src, s, dst, mask );
1534 cvXorS( const CvArr* srcarr, CvScalar s, CvArr* dstarr, const CvArr* maskarr )
1536 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), mask;
1537 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1539 mask = cv::cvarrToMat(maskarr);
1540 cv::bitwise_xor( src, s, dst, mask );
1543 CV_IMPL void cvAdd( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
1545 cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
1546 dst = cv::cvarrToMat(dstarr), mask;
1547 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1549 mask = cv::cvarrToMat(maskarr);
1550 cv::add( src1, src2, dst, mask );
1553 CV_IMPL void cvSub( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr, const CvArr* maskarr )
1555 cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
1556 dst = cv::cvarrToMat(dstarr), mask;
1557 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1559 mask = cv::cvarrToMat(maskarr);
1560 cv::subtract( src1, src2, dst, mask );
1563 CV_IMPL void cvAddS( const CvArr* srcarr1, CvScalar value, CvArr* dstarr, const CvArr* maskarr )
1565 cv::Mat src1 = cv::cvarrToMat(srcarr1),
1566 dst = cv::cvarrToMat(dstarr), mask;
1567 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1569 mask = cv::cvarrToMat(maskarr);
1570 cv::add( src1, value, dst, mask );
1573 CV_IMPL void cvSubRS( const CvArr* srcarr1, CvScalar value, CvArr* dstarr, const CvArr* maskarr )
1575 cv::Mat src1 = cv::cvarrToMat(srcarr1),
1576 dst = cv::cvarrToMat(dstarr), mask;
1577 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1579 mask = cv::cvarrToMat(maskarr);
1580 cv::subtract( value, src1, dst, mask );
1583 CV_IMPL void cvMul( const CvArr* srcarr1, const CvArr* srcarr2,
1584 CvArr* dstarr, double scale )
1586 cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
1587 dst = cv::cvarrToMat(dstarr);
1588 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1589 cv::multiply( src1, src2, dst, scale );
1592 CV_IMPL void cvDiv( const CvArr* srcarr1, const CvArr* srcarr2,
1593 CvArr* dstarr, double scale )
1595 cv::Mat src2 = cv::cvarrToMat(srcarr2),
1596 dst = cv::cvarrToMat(dstarr), mask;
1597 CV_Assert( src2.size() == dst.size() && src2.type() == dst.type() );
1600 cv::divide( cv::cvarrToMat(srcarr1), src2, dst, scale );
1602 cv::divide( scale, src2, dst );
1607 cvAddWeighted( const CvArr* srcarr1, double alpha,
1608 const CvArr* srcarr2, double beta,
1609 double gamma, CvArr* dstarr )
1611 cv::Mat src1 = cv::cvarrToMat(srcarr1), src2 = cv::cvarrToMat(srcarr2),
1612 dst = cv::cvarrToMat(dstarr);
1613 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1614 cv::addWeighted( src1, alpha, src2, beta, gamma, dst );
1619 cvAbsDiff( const CvArr* srcarr1, const CvArr* srcarr2, CvArr* dstarr )
1621 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1622 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1624 cv::absdiff( src1, cv::cvarrToMat(srcarr2), dst );
1629 cvAbsDiffS( const CvArr* srcarr1, CvArr* dstarr, CvScalar scalar )
1631 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1632 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1634 cv::absdiff( src1, scalar, dst );
1638 cvInRange( const void* srcarr1, const void* srcarr2,
1639 const void* srcarr3, void* dstarr )
1641 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1642 CV_Assert( src1.size() == dst.size() && dst.type() == CV_8U );
1644 cv::inRange( src1, cv::cvarrToMat(srcarr2), cv::cvarrToMat(srcarr3), dst );
1648 cvInRangeS( const void* srcarr1, CvScalar lowerb, CvScalar upperb, void* dstarr )
1650 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1651 CV_Assert( src1.size() == dst.size() && dst.type() == CV_8U );
1653 cv::inRange( src1, lowerb, upperb, dst );
1658 cvCmp( const void* srcarr1, const void* srcarr2, void* dstarr, int cmp_op )
1660 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1661 CV_Assert( src1.size() == dst.size() && dst.type() == CV_8U );
1663 cv::compare( src1, cv::cvarrToMat(srcarr2), dst, cmp_op );
1668 cvCmpS( const void* srcarr1, double value, void* dstarr, int cmp_op )
1670 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1671 CV_Assert( src1.size() == dst.size() && dst.type() == CV_8U );
1673 cv::compare( src1, value, dst, cmp_op );
1678 cvMin( const void* srcarr1, const void* srcarr2, void* dstarr )
1680 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1681 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1683 cv::min( src1, cv::cvarrToMat(srcarr2), dst );
1688 cvMax( const void* srcarr1, const void* srcarr2, void* dstarr )
1690 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1691 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1693 cv::max( src1, cv::cvarrToMat(srcarr2), dst );
1697 cvMinS( const void* srcarr1, double value, void* dstarr )
1699 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1700 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1702 cv::min( src1, value, dst );
1707 cvMaxS( const void* srcarr1, double value, void* dstarr )
1709 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
1710 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
1712 cv::max( src1, value, dst );