1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
48 inline float sqr(uchar a) { return CV_8TO32F_SQR(a); }
49 inline float sqr(float a) { return a*a; }
51 inline double sqr(double a) { return a*a; }
53 inline Vec3f sqr(const Vec3b& a)
55 return Vec3f(CV_8TO32F_SQR(a[0]), CV_8TO32F_SQR(a[1]), CV_8TO32F_SQR(a[2]));
57 inline Vec3f sqr(const Vec3f& a)
59 return Vec3f(a[0]*a[0], a[1]*a[1], a[2]*a[2]);
61 inline Vec3d sqr(const Vec3d& a)
63 return Vec3d(a[0]*a[0], a[1]*a[1], a[2]*a[2]);
65 inline float multiply(uchar a, uchar b) { return CV_8TO32F(a)*CV_8TO32F(b); }
66 inline float multiply(float a, float b) { return a*b; }
67 inline double multiply(double a, double b) { return a*b; }
68 inline Vec3f multiply(const Vec3b& a, const Vec3b& b)
71 CV_8TO32F(a[0])*CV_8TO32F(b[0]),
72 CV_8TO32F(a[1])*CV_8TO32F(b[1]),
73 CV_8TO32F(a[2])*CV_8TO32F(b[2]));
75 inline Vec3f multiply(const Vec3f& a, const Vec3f& b)
77 return Vec3f(a[0]*b[0], a[1]*b[1], a[2]*b[2]);
79 inline Vec3d multiply(const Vec3d& a, const Vec3d& b)
81 return Vec3d(a[0]*b[0], a[1]*b[1], a[2]*b[2]);
84 inline float addw(uchar a, float alpha, float b, float beta)
86 return b*beta + CV_8TO32F(a)*alpha;
88 inline float addw(float a, float alpha, float b, float beta)
90 return b*beta + a*alpha;
92 inline double addw(uchar a, double alpha, double b, double beta)
94 return b*beta + CV_8TO32F(a)*alpha;
96 inline double addw(float a, double alpha, double b, double beta)
98 return b*beta + a*alpha;
100 inline double addw(double a, double alpha, double b, double beta)
102 return b*beta + a*alpha;
105 inline Vec3f addw(const Vec3b& a, float alpha, const Vec3f& b, float beta)
107 return Vec3f(b[0]*beta + CV_8TO32F(a[0])*alpha,
108 b[1]*beta + CV_8TO32F(a[1])*alpha,
109 b[2]*beta + CV_8TO32F(a[2])*alpha);
111 inline Vec3f addw(const Vec3f& a, float alpha, const Vec3f& b, float beta)
113 return Vec3f(b[0]*beta + a[0]*alpha, b[1]*beta + a[1]*alpha, b[2]*beta + a[2]*alpha);
115 inline Vec3d addw(const Vec3b& a, double alpha, const Vec3d& b, double beta)
117 return Vec3d(b[0]*beta + CV_8TO32F(a[0])*alpha,
118 b[1]*beta + CV_8TO32F(a[1])*alpha,
119 b[2]*beta + CV_8TO32F(a[2])*alpha);
121 inline Vec3d addw(const Vec3f& a, double alpha, const Vec3d& b, double beta)
123 return Vec3d(b[0]*beta + a[0]*alpha, b[1]*beta + a[1]*alpha, b[2]*beta + a[2]*alpha);
125 inline Vec3d addw(const Vec3d& a, double alpha, const Vec3d& b, double beta)
127 return Vec3d(b[0]*beta + a[0]*alpha, b[1]*beta + a[1]*alpha, b[2]*beta + a[2]*alpha);
130 template<typename T, typename AT> void
131 acc_( const Mat& _src, Mat& _dst )
133 Size size = _src.size();
134 size.width *= _src.channels();
136 if( _src.isContinuous() && _dst.isContinuous() )
138 size.width *= size.height;
143 for( i = 0; i < size.height; i++ )
145 const T* src = (const T*)(_src.data + _src.step*i);
146 AT* dst = (AT*)(_dst.data + _dst.step*i);
148 for( j = 0; j <= size.width - 4; j += 4 )
150 AT t0 = dst[j] + src[j], t1 = dst[j+1] + src[j+1];
151 dst[j] = t0; dst[j+1] = t1;
152 t0 = dst[j+2] + src[j+2]; t1 = dst[j+3] + src[j+3];
153 dst[j+2] = t0; dst[j+3] = t1;
156 for( ; j < size.width; j++ )
162 template<typename T, typename AT> void
163 accSqr_( const Mat& _src, Mat& _dst )
165 Size size = _src.size();
166 size.width *= _src.channels();
168 if( _src.isContinuous() && _dst.isContinuous() )
170 size.width *= size.height;
175 for( i = 0; i < size.height; i++ )
177 const T* src = (const T*)(_src.data + _src.step*i);
178 AT* dst = (AT*)(_dst.data + _dst.step*i);
180 for( j = 0; j <= size.width - 4; j += 4 )
182 AT t0 = dst[j] + sqr(src[j]), t1 = dst[j+1] + sqr(src[j+1]);
183 dst[j] = t0; dst[j+1] = t1;
184 t0 = dst[j+2] + sqr(src[j+2]); t1 = dst[j+3] + sqr(src[j+3]);
185 dst[j+2] = t0; dst[j+3] = t1;
188 for( ; j < size.width; j++ )
189 dst[j] += sqr(src[j]);
194 template<typename T, typename AT> void
195 accProd_( const Mat& _src1, const Mat& _src2, Mat& _dst )
197 Size size = _src1.size();
198 size.width *= _src1.channels();
200 if( _src1.isContinuous() && _src2.isContinuous() && _dst.isContinuous() )
202 size.width *= size.height;
207 for( i = 0; i < size.height; i++ )
209 const T* src1 = (const T*)(_src1.data + _src1.step*i);
210 const T* src2 = (const T*)(_src2.data + _src2.step*i);
211 AT* dst = (AT*)(_dst.data + _dst.step*i);
213 for( j = 0; j <= size.width - 4; j += 4 )
216 t0 = dst[j] + multiply(src1[j], src2[j]);
217 t1 = dst[j+1] + multiply(src1[j+1], src2[j+1]);
218 dst[j] = t0; dst[j+1] = t1;
219 t0 = dst[j+2] + multiply(src1[j+2], src2[j+2]);
220 t1 = dst[j+3] + multiply(src1[j+3], src2[j+3]);
221 dst[j+2] = t0; dst[j+3] = t1;
224 for( ; j < size.width; j++ )
225 dst[j] += multiply(src1[j], src2[j]);
230 template<typename T, typename AT> void
231 accW_( const Mat& _src, Mat& _dst, double _alpha )
233 AT alpha = (AT)_alpha, beta = (AT)(1 - _alpha);
234 Size size = _src.size();
235 size.width *= _src.channels();
237 if( _src.isContinuous() && _dst.isContinuous() )
239 size.width *= size.height;
244 for( i = 0; i < size.height; i++ )
246 const T* src = (const T*)(_src.data + _src.step*i);
247 AT* dst = (AT*)(_dst.data + _dst.step*i);
249 for( j = 0; j <= size.width - 4; j += 4 )
252 t0 = addw(src[j], alpha, dst[j], beta);
253 t1 = addw(src[j+1], alpha, dst[j+1], beta);
254 dst[j] = t0; dst[j+1] = t1;
255 t0 = addw(src[j+2], alpha, dst[j+2], beta);
256 t1 = addw(src[j+3], alpha, dst[j+3], beta);
257 dst[j+2] = t0; dst[j+3] = t1;
260 for( ; j < size.width; j++ )
261 dst[j] = addw(src[j], alpha, dst[j], beta);
266 template<typename T, typename AT> void
267 accMask_( const Mat& _src, Mat& _dst, const Mat& _mask )
269 Size size = _src.size();
271 if( _src.isContinuous() && _dst.isContinuous() && _mask.isContinuous() )
273 size.width *= size.height;
278 for( i = 0; i < size.height; i++ )
280 const T* src = (const T*)(_src.data + _src.step*i);
281 AT* dst = (AT*)(_dst.data + _dst.step*i);
282 const uchar* mask = _mask.data + _mask.step*i;
284 for( j = 0; j < size.width; j++ )
291 template<typename T, typename AT> void
292 accSqrMask_( const Mat& _src, Mat& _dst, const Mat& _mask )
294 Size size = _src.size();
296 if( _src.isContinuous() && _dst.isContinuous() && _mask.isContinuous() )
298 size.width *= size.height;
303 for( i = 0; i < size.height; i++ )
305 const T* src = (const T*)(_src.data + _src.step*i);
306 AT* dst = (AT*)(_dst.data + _dst.step*i);
307 const uchar* mask = _mask.data + _mask.step*i;
309 for( j = 0; j < size.width; j++ )
311 dst[j] += sqr(src[j]);
316 template<typename T, typename AT> void
317 accProdMask_( const Mat& _src1, const Mat& _src2, Mat& _dst, const Mat& _mask )
319 Size size = _src1.size();
321 if( _src1.isContinuous() && _src2.isContinuous() &&
322 _dst.isContinuous() && _mask.isContinuous() )
324 size.width *= size.height;
329 for( i = 0; i < size.height; i++ )
331 const T* src1 = (const T*)(_src1.data + _src1.step*i);
332 const T* src2 = (const T*)(_src2.data + _src2.step*i);
333 AT* dst = (AT*)(_dst.data + _dst.step*i);
334 const uchar* mask = _mask.data + _mask.step*i;
336 for( j = 0; j < size.width; j++ )
338 dst[j] += multiply(src1[j], src2[j]);
343 template<typename T, typename AT> void
344 accWMask_( const Mat& _src, Mat& _dst, double _alpha, const Mat& _mask )
346 typedef typename DataType<AT>::channel_type AT1;
347 AT1 alpha = (AT1)_alpha, beta = (AT1)(1 - _alpha);
348 Size size = _src.size();
350 if( _src.isContinuous() && _dst.isContinuous() && _mask.isContinuous() )
352 size.width *= size.height;
357 for( i = 0; i < size.height; i++ )
359 const T* src = (const T*)(_src.data + _src.step*i);
360 AT* dst = (AT*)(_dst.data + _dst.step*i);
361 const uchar* mask = _mask.data + _mask.step*i;
363 for( j = 0; j < size.width; j++ )
365 dst[j] = addw(src[j], alpha, dst[j], beta);
370 typedef void (*AccFunc)(const Mat&, Mat&);
371 typedef void (*AccMaskFunc)(const Mat&, Mat&, const Mat&);
372 typedef void (*AccProdFunc)(const Mat&, const Mat&, Mat&);
373 typedef void (*AccProdMaskFunc)(const Mat&, const Mat&, Mat&, const Mat&);
374 typedef void (*AccWFunc)(const Mat&, Mat&, double);
375 typedef void (*AccWMaskFunc)(const Mat&, Mat&, double, const Mat&);
377 void accumulate( const Mat& src, Mat& dst, const Mat& mask )
379 CV_Assert( dst.size() == src.size() && dst.channels() == src.channels() );
384 if( src.depth() == CV_8U && dst.depth() == CV_32F )
385 func = acc_<uchar, float>;
386 else if( src.depth() == CV_8U && dst.depth() == CV_64F )
387 func = acc_<uchar, double>;
388 else if( src.depth() == CV_32F && dst.depth() == CV_32F )
389 func = acc_<float, float>;
390 else if( src.depth() == CV_32F && dst.depth() == CV_64F )
391 func = acc_<float, double>;
392 else if( src.depth() == CV_64F && dst.depth() == CV_64F )
393 func = acc_<double, double>;
395 CV_Error( CV_StsUnsupportedFormat, "" );
401 CV_Assert( mask.size() == src.size() && mask.type() == CV_8UC1 );
403 AccMaskFunc func = 0;
404 if( src.type() == CV_8UC1 && dst.type() == CV_32FC1 )
405 func = accMask_<uchar, float>;
406 else if( src.type() == CV_8UC3 && dst.type() == CV_32FC3 )
407 func = accMask_<Vec3b, Vec3f>;
408 else if( src.type() == CV_8UC1 && dst.type() == CV_64FC1 )
409 func = accMask_<uchar, double>;
410 else if( src.type() == CV_8UC3 && dst.type() == CV_64FC3 )
411 func = accMask_<Vec3b, Vec3d>;
412 else if( src.type() == CV_32FC1 && dst.type() == CV_32FC1 )
413 func = accMask_<float, float>;
414 else if( src.type() == CV_32FC3 && dst.type() == CV_32FC3 )
415 func = accMask_<Vec3f, Vec3f>;
416 else if( src.type() == CV_32FC1 && dst.type() == CV_64FC1 )
417 func = accMask_<float, double>;
418 else if( src.type() == CV_32FC3 && dst.type() == CV_64FC3 )
419 func = accMask_<Vec3f, Vec3d>;
420 else if( src.type() == CV_64FC1 && dst.type() == CV_64FC1 )
421 func = accMask_<double, double>;
422 else if( src.type() == CV_64FC3 && dst.type() == CV_64FC3 )
423 func = accMask_<Vec3d, Vec3d>;
425 CV_Error( CV_StsUnsupportedFormat, "" );
427 func( src, dst, mask );
432 void accumulateSquare( const Mat& src, Mat& dst, const Mat& mask )
434 CV_Assert( dst.size() == src.size() && dst.channels() == src.channels() );
439 if( src.depth() == CV_8U && dst.depth() == CV_32F )
440 func = accSqr_<uchar, float>;
441 else if( src.depth() == CV_8U && dst.depth() == CV_64F )
442 func = accSqr_<uchar, double>;
443 else if( src.depth() == CV_32F && dst.depth() == CV_32F )
444 func = accSqr_<float, float>;
445 else if( src.depth() == CV_32F && dst.depth() == CV_64F )
446 func = accSqr_<float, double>;
447 else if( src.depth() == CV_64F && dst.depth() == CV_64F )
448 func = accSqr_<double, double>;
450 CV_Error( CV_StsUnsupportedFormat, "" );
456 CV_Assert( mask.size() == src.size() && mask.type() == CV_8UC1 );
458 AccMaskFunc func = 0;
459 if( src.type() == CV_8UC1 && dst.type() == CV_32FC1 )
460 func = accSqrMask_<uchar, float>;
461 else if( src.type() == CV_8UC3 && dst.type() == CV_32FC3 )
462 func = accSqrMask_<Vec3b, Vec3f>;
463 else if( src.type() == CV_8UC1 && dst.type() == CV_64FC1 )
464 func = accSqrMask_<uchar, double>;
465 else if( src.type() == CV_8UC3 && dst.type() == CV_64FC3 )
466 func = accSqrMask_<Vec3b, Vec3d>;
467 else if( src.type() == CV_32FC1 && dst.type() == CV_32FC1 )
468 func = accSqrMask_<float, float>;
469 else if( src.type() == CV_32FC3 && dst.type() == CV_32FC3 )
470 func = accSqrMask_<Vec3f, Vec3f>;
471 else if( src.type() == CV_32FC1 && dst.type() == CV_64FC1 )
472 func = accSqrMask_<float, double>;
473 else if( src.type() == CV_32FC3 && dst.type() == CV_64FC3 )
474 func = accSqrMask_<Vec3f, Vec3d>;
475 else if( src.type() == CV_64FC1 && dst.type() == CV_64FC1 )
476 func = accSqrMask_<double, double>;
477 else if( src.type() == CV_64FC3 && dst.type() == CV_64FC3 )
478 func = accSqrMask_<Vec3d, Vec3d>;
480 CV_Error( CV_StsUnsupportedFormat, "" );
482 func( src, dst, mask );
487 void accumulateProduct( const Mat& src1, const Mat& src2, Mat& dst, const Mat& mask )
489 CV_Assert( dst.size() == src1.size() && dst.channels() == src1.channels() &&
490 src1.size() == src2.size() && src1.type() == src2.type() );
494 AccProdFunc func = 0;
495 if( src1.depth() == CV_8U && dst.depth() == CV_32F )
496 func = accProd_<uchar, float>;
497 else if( src1.depth() == CV_8U && dst.depth() == CV_64F )
498 func = accProd_<uchar, double>;
499 else if( src1.depth() == CV_32F && dst.depth() == CV_32F )
500 func = accProd_<float, float>;
501 else if( src1.depth() == CV_32F && dst.depth() == CV_64F )
502 func = accProd_<float, double>;
503 else if( src1.depth() == CV_64F && dst.depth() == CV_64F )
504 func = accProd_<double, double>;
506 CV_Error( CV_StsUnsupportedFormat, "" );
508 func( src1, src2, dst );
512 CV_Assert( mask.size() == src1.size() && mask.type() == CV_8UC1 );
514 AccProdMaskFunc func = 0;
515 if( src1.type() == CV_8UC1 && dst.type() == CV_32FC1 )
516 func = accProdMask_<uchar, float>;
517 else if( src1.type() == CV_8UC3 && dst.type() == CV_32FC3 )
518 func = accProdMask_<Vec3b, Vec3f>;
519 else if( src1.type() == CV_8UC1 && dst.type() == CV_64FC1 )
520 func = accProdMask_<uchar, double>;
521 else if( src1.type() == CV_8UC3 && dst.type() == CV_64FC3 )
522 func = accProdMask_<Vec3b, Vec3d>;
523 else if( src1.type() == CV_32FC1 && dst.type() == CV_32FC1 )
524 func = accProdMask_<float, float>;
525 else if( src1.type() == CV_32FC3 && dst.type() == CV_32FC3 )
526 func = accProdMask_<Vec3f, Vec3f>;
527 else if( src1.type() == CV_32FC1 && dst.type() == CV_64FC1 )
528 func = accProdMask_<float, double>;
529 else if( src1.type() == CV_32FC3 && dst.type() == CV_64FC3 )
530 func = accProdMask_<Vec3f, Vec3d>;
531 else if( src1.type() == CV_64FC1 && dst.type() == CV_64FC1 )
532 func = accProdMask_<double, double>;
533 else if( src1.type() == CV_64FC3 && dst.type() == CV_64FC3 )
534 func = accProdMask_<Vec3d, Vec3d>;
536 CV_Error( CV_StsUnsupportedFormat, "" );
538 func( src1, src2, dst, mask );
543 void accumulateWeighted( const Mat& src, Mat& dst, double alpha, const Mat& mask )
545 CV_Assert( dst.size() == src.size() && dst.channels() == src.channels() );
550 if( src.depth() == CV_8U && dst.depth() == CV_32F )
551 func = accW_<uchar, float>;
552 else if( src.depth() == CV_8U && dst.depth() == CV_64F )
553 func = accW_<uchar, double>;
554 else if( src.depth() == CV_32F && dst.depth() == CV_32F )
555 func = accW_<float, float>;
556 else if( src.depth() == CV_32F && dst.depth() == CV_64F )
557 func = accW_<float, double>;
558 else if( src.depth() == CV_64F && dst.depth() == CV_64F )
559 func = accW_<double, double>;
561 CV_Error( CV_StsUnsupportedFormat, "" );
563 func( src, dst, alpha );
567 CV_Assert( mask.size() == src.size() && mask.type() == CV_8UC1 );
569 AccWMaskFunc func = 0;
570 if( src.type() == CV_8UC1 && dst.type() == CV_32FC1 )
571 func = accWMask_<uchar, float>;
572 else if( src.type() == CV_8UC3 && dst.type() == CV_32FC3 )
573 func = accWMask_<Vec3b, Vec3f>;
574 else if( src.type() == CV_8UC1 && dst.type() == CV_64FC1 )
575 func = accWMask_<uchar, double>;
576 else if( src.type() == CV_8UC3 && dst.type() == CV_64FC3 )
577 func = accWMask_<Vec3b, Vec3d>;
578 else if( src.type() == CV_32FC1 && dst.type() == CV_32FC1 )
579 func = accWMask_<float, float>;
580 else if( src.type() == CV_32FC3 && dst.type() == CV_32FC3 )
581 func = accWMask_<Vec3f, Vec3f>;
582 else if( src.type() == CV_32FC1 && dst.type() == CV_64FC1 )
583 func = accWMask_<float, double>;
584 else if( src.type() == CV_32FC3 && dst.type() == CV_64FC3 )
585 func = accWMask_<Vec3f, Vec3d>;
586 else if( src.type() == CV_64FC1 && dst.type() == CV_64FC1 )
587 func = accWMask_<double, double>;
588 else if( src.type() == CV_64FC3 && dst.type() == CV_64FC3 )
589 func = accWMask_<Vec3d, Vec3d>;
591 CV_Error( CV_StsUnsupportedFormat, "" );
593 func( src, dst, alpha, mask );
601 cvAcc( const void* arr, void* sumarr, const void* maskarr )
603 cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask;
605 mask = cv::cvarrToMat(maskarr);
606 cv::accumulate( src, dst, mask );
610 cvSquareAcc( const void* arr, void* sumarr, const void* maskarr )
612 cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask;
614 mask = cv::cvarrToMat(maskarr);
615 cv::accumulateSquare( src, dst, mask );
619 cvMultiplyAcc( const void* arr1, const void* arr2,
620 void* sumarr, const void* maskarr )
622 cv::Mat src1 = cv::cvarrToMat(arr1), src2 = cv::cvarrToMat(arr2);
623 cv::Mat dst = cv::cvarrToMat(sumarr), mask;
625 mask = cv::cvarrToMat(maskarr);
626 cv::accumulateProduct( src1, src2, dst, mask );
630 cvRunningAvg( const void* arr, void* sumarr, double alpha, const void* maskarr )
632 cv::Mat src = cv::cvarrToMat(arr), dst = cv::cvarrToMat(sumarr), mask;
634 mask = cv::cvarrToMat(maskarr);
635 cv::accumulateWeighted( src, dst, alpha, mask );