1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
45 /****************************************************************************************\
46 * [scaled] Identity matrix initialization *
47 \****************************************************************************************/
51 Mat::Mat(const IplImage* img, bool copyData)
52 : flags(0), rows(0), cols(0), step(0), data(0),
53 refcount(0), datastart(0), dataend(0)
57 cvGetMat( img, &m, &coi );
63 create( m.rows, m.cols, CV_MAT_TYPE(m.type) );
69 create( m.rows, m.cols, CV_MAT_DEPTH(m.type) );
72 const int pairs[] = { coi-1, 0 };
73 cvMixChannels( (const CvArr**)&img, 1, (CvArr**)&pdst, 1, pairs, 1 );
79 CV_Error(CV_BadCOI, "When copyData=false, COI must not be set");*/
81 *this = Mat(m.rows, m.cols, CV_MAT_TYPE(m.type), m.data.ptr, m.step);
84 datastart = (uchar*)img->imageData;
85 dataend = datastart + img->imageSize;
90 Mat cvarrToMat(const CvArr* arr, bool copyData, bool allowND, int coiMode)
94 m = Mat((const CvMat*)arr, copyData );
95 else if( CV_IS_IMAGE(arr) )
97 const IplImage* iplimg = (const IplImage*)arr;
98 m = Mat(iplimg, copyData );
99 if( coiMode == 0 && cvGetImageCOI(iplimg) > 0 )
100 CV_Error(CV_BadCOI, "COI is not supported by the function");
104 CvMat hdr, *cvmat = cvGetMat( arr, &hdr, 0, allowND ? 1 : 0 );
106 m = Mat(cvmat, copyData);
111 void extractImageCOI(const CvArr* arr, Mat& ch, int coi)
113 Mat mat = cvarrToMat(arr, false, true, 1);
114 ch.create(mat.size(), mat.depth());
116 CV_Assert( CV_IS_IMAGE(arr) && (coi = cvGetImageCOI((const IplImage*)arr)-1) >= 0 );
117 CV_Assert(0 <= coi && coi < mat.channels());
118 int _pairs[] = { coi, 0 };
119 mixChannels( &mat, 1, &ch, 1, _pairs, 1 );
122 void insertImageCOI(const Mat& ch, CvArr* arr, int coi)
124 Mat mat = cvarrToMat(arr, false, true, 1);
126 CV_Assert( CV_IS_IMAGE(arr) && (coi = cvGetImageCOI((const IplImage*)arr)-1) >= 0 );
127 CV_Assert(ch.size() == mat.size() && ch.depth() == mat.depth() && 0 <= coi && coi < mat.channels());
128 int _pairs[] = { 0, coi };
129 mixChannels( &ch, 1, &mat, 1, _pairs, 1 );
133 Mat Mat::reshape(int new_cn, int new_rows) const
141 int total_width = cols * cn;
143 if( (new_cn > total_width || total_width % new_cn != 0) && new_rows == 0 )
144 new_rows = rows * total_width / new_cn;
146 if( new_rows != 0 && new_rows != rows )
148 int total_size = total_width * rows;
149 if( !isContinuous() )
150 CV_Error( CV_BadStep,
151 "The matrix is not continuous, thus its number of rows can not be changed" );
153 if( (unsigned)new_rows > (unsigned)total_size )
154 CV_Error( CV_StsOutOfRange, "Bad new number of rows" );
156 total_width = total_size / new_rows;
158 if( total_width * new_rows != total_size )
159 CV_Error( CV_StsBadArg, "The total number of matrix elements "
160 "is not divisible by the new number of rows" );
163 hdr.step = total_width * elemSize1();
166 int new_width = total_width / new_cn;
168 if( new_width * new_cn != total_width )
169 CV_Error( CV_BadNumChannels,
170 "The total width is not divisible by the new number of channels" );
172 hdr.cols = new_width;
173 hdr.flags = (hdr.flags & ~CV_MAT_CN_MASK) | ((new_cn-1) << CV_CN_SHIFT);
179 setIdentity( Mat& m, const Scalar& s )
181 int i, j, rows = m.rows, cols = m.cols, type = m.type();
183 if( type == CV_32FC1 )
185 float* data = (float*)m.data;
186 float val = (float)s[0];
187 size_t step = m.step/sizeof(data[0]);
189 for( i = 0; i < rows; i++, data += step )
191 for( j = 0; j < cols; j++ )
197 else if( type == CV_64FC1 )
199 double* data = (double*)m.data;
201 size_t step = m.step/sizeof(data[0]);
203 for( i = 0; i < rows; i++, data += step )
205 for( j = 0; j < cols; j++ )
218 Scalar trace( const Mat& m )
220 int i, type = m.type();
221 int nm = std::min(m.rows, m.cols);
223 if( type == CV_32FC1 )
225 const float* ptr = (const float*)m.data;
226 size_t step = m.step/sizeof(ptr[0]) + 1;
228 for( i = 0; i < nm; i++ )
233 if( type == CV_64FC1 )
235 const double* ptr = (const double*)m.data;
236 size_t step = m.step/sizeof(ptr[0]) + 1;
238 for( i = 0; i < nm; i++ )
243 return cv::sum(m.diag());
247 /****************************************************************************************\
249 \****************************************************************************************/
251 template<typename T> static void
252 transposeI_( Mat& mat )
254 int rows = mat.rows, cols = mat.cols;
255 uchar* data = mat.data;
256 size_t step = mat.step;
258 for( int i = 0; i < rows; i++ )
260 T* row = (T*)(data + step*i);
261 uchar* data1 = data + i*sizeof(T);
262 for( int j = i+1; j < cols; j++ )
263 std::swap( row[j], *(T*)(data1 + step*j) );
267 template<typename T> static void
268 transpose_( const Mat& src, Mat& dst )
270 int rows = dst.rows, cols = dst.cols;
271 uchar* data = src.data;
272 size_t step = src.step;
274 for( int i = 0; i < rows; i++ )
276 T* row = (T*)(dst.data + dst.step*i);
277 uchar* data1 = data + i*sizeof(T);
278 for( int j = 0; j < cols; j++ )
279 row[j] = *(T*)(data1 + step*j);
283 typedef void (*TransposeInplaceFunc)( Mat& mat );
284 typedef void (*TransposeFunc)( const Mat& src, Mat& dst );
286 void transpose( const Mat& src, Mat& dst )
288 TransposeInplaceFunc itab[] =
291 transposeI_<uchar>, // 1
292 transposeI_<ushort>, // 2
293 transposeI_<Vec<uchar,3> >, // 3
294 transposeI_<int>, // 4
296 transposeI_<Vec<ushort,3> >, // 6
298 transposeI_<int64>, // 8
300 transposeI_<Vec<int,3> >, // 12
302 transposeI_<Vec<int64,2> >, // 16
304 transposeI_<Vec<int64,3> >, // 24
306 transposeI_<Vec<int64,4> > // 32
309 TransposeFunc tab[] =
312 transpose_<uchar>, // 1
313 transpose_<ushort>, // 2
314 transpose_<Vec<uchar,3> >, // 3
315 transpose_<int>, // 4
317 transpose_<Vec<ushort,3> >, // 6
319 transpose_<int64>, // 8
321 transpose_<Vec<int,3> >, // 12
323 transpose_<Vec<int64,2> >, // 16
325 transpose_<Vec<int64,3> >, // 24
327 transpose_<Vec<int64,4> > // 32
330 size_t esz = src.elemSize();
331 CV_Assert( esz <= (size_t)32 );
333 if( dst.data == src.data && dst.cols == dst.rows )
335 TransposeInplaceFunc func = itab[esz];
336 CV_Assert( func != 0 );
341 dst.create( src.cols, src.rows, src.type() );
342 TransposeFunc func = tab[esz];
343 CV_Assert( func != 0 );
349 void completeSymm( Mat& matrix, bool LtoR )
351 int i, j, nrows = matrix.rows, type = matrix.type();
352 int j0 = 0, j1 = nrows;
353 CV_Assert( matrix.rows == matrix.cols );
355 if( type == CV_32FC1 || type == CV_32SC1 )
357 int* data = (int*)matrix.data;
358 size_t step = matrix.step/sizeof(data[0]);
359 for( i = 0; i < nrows; i++ )
361 if( !LtoR ) j1 = i; else j0 = i+1;
362 for( j = j0; j < j1; j++ )
363 data[i*step + j] = data[j*step + i];
366 else if( type == CV_64FC1 )
368 double* data = (double*)matrix.data;
369 size_t step = matrix.step/sizeof(data[0]);
370 for( i = 0; i < nrows; i++ )
372 if( !LtoR ) j1 = i; else j0 = i+1;
373 for( j = j0; j < j1; j++ )
374 data[i*step + j] = data[j*step + i];
378 CV_Error( CV_StsUnsupportedFormat, "" );
381 Mat Mat::cross(const Mat& m) const
383 int t = type(), d = CV_MAT_DEPTH(t);
384 CV_Assert( size() == m.size() && t == m.type() &&
385 ((rows == 3 && cols == 1) || (cols*channels() == 3 && rows == 1)));
386 Mat result(rows, cols, t);
390 const float *a = (const float*)data, *b = (const float*)m.data;
391 float* c = (float*)result.data;
392 size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
393 size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
395 c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
396 c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
397 c[2] = a[0] * b[ldb] - a[lda] * b[0];
399 else if( d == CV_64F )
401 const double *a = (const double*)data, *b = (const double*)m.data;
402 double* c = (double*)result.data;
403 size_t lda = rows > 1 ? step/sizeof(a[0]) : 1;
404 size_t ldb = rows > 1 ? m.step/sizeof(b[0]) : 1;
406 c[0] = a[lda] * b[ldb*2] - a[lda*2] * b[ldb];
407 c[1] = a[lda*2] * b[0] - a[0] * b[ldb*2];
408 c[2] = a[0] * b[ldb] - a[lda] * b[0];
415 /****************************************************************************************\
416 * Reduce Mat to vector *
417 \****************************************************************************************/
419 template<typename T, typename ST, class Op> static void
420 reduceR_( const Mat& srcmat, Mat& dstmat )
422 typedef typename Op::rtype WT;
423 Size size = srcmat.size();
424 size.width *= srcmat.channels();
425 AutoBuffer<WT> buffer(size.width);
427 ST* dst = (ST*)dstmat.data;
428 const T* src = (const T*)srcmat.data;
429 size_t srcstep = srcmat.step/sizeof(src[0]);
433 for( i = 0; i < size.width; i++ )
436 for( ; --size.height; )
439 for( i = 0; i <= size.width - 4; i += 4 )
442 s0 = op(buf[i], (WT)src[i]);
443 s1 = op(buf[i+1], (WT)src[i+1]);
444 buf[i] = s0; buf[i+1] = s1;
446 s0 = op(buf[i+2], (WT)src[i+2]);
447 s1 = op(buf[i+3], (WT)src[i+3]);
448 buf[i+2] = s0; buf[i+3] = s1;
451 for( ; i < size.width; i++ )
452 buf[i] = op(buf[i], (WT)src[i]);
455 for( i = 0; i < size.width; i++ )
460 template<typename T, typename ST, class Op> static void
461 reduceC_( const Mat& srcmat, Mat& dstmat )
463 typedef typename Op::rtype WT;
464 Size size = srcmat.size();
465 int i, k, cn = srcmat.channels();
469 for( int y = 0; y < size.height; y++ )
471 const T* src = (const T*)(srcmat.data + srcmat.step*y);
472 ST* dst = (ST*)(dstmat.data + dstmat.step*y);
473 if( size.width == cn )
474 for( k = 0; k < cn; k++ )
478 for( k = 0; k < cn; k++ )
480 WT a0 = src[k], a1 = src[k+cn];
481 for( i = 2*cn; i <= size.width - 4*cn; i += 4*cn )
483 a0 = op(a0, (WT)src[i+k]);
484 a1 = op(a1, (WT)src[i+k+cn]);
485 a0 = op(a0, (WT)src[i+k+cn*2]);
486 a1 = op(a1, (WT)src[i+k+cn*3]);
489 for( ; i < size.width; i += cn )
491 a0 = op(a0, (WT)src[i]);
500 typedef void (*ReduceFunc)( const Mat& src, Mat& dst );
502 void reduce(const Mat& src, Mat& dst, int dim, int op, int dtype)
505 int stype = src.type(), sdepth = src.depth();
508 int ddepth = CV_MAT_DEPTH(dtype);
510 dst.create(dim == 0 ? 1 : src.rows, dim == 0 ? src.cols : 1, dtype >= 0 ? dtype : stype);
513 CV_Assert( op == CV_REDUCE_SUM || op == CV_REDUCE_MAX ||
514 op == CV_REDUCE_MIN || op == CV_REDUCE_AVG );
515 CV_Assert( src.channels() == dst.channels() );
517 if( op == CV_REDUCE_AVG )
520 if( sdepth < CV_32S && ddepth < CV_32S )
521 temp.create(dst.rows, dst.cols, CV_32SC(src.channels()));
527 if( op == CV_REDUCE_SUM )
529 if(sdepth == CV_8U && ddepth == CV_32S)
530 func = reduceR_<uchar,int,OpAdd<int> >;
531 if(sdepth == CV_8U && ddepth == CV_32F)
532 func = reduceR_<uchar,float,OpAdd<int> >;
533 if(sdepth == CV_8U && ddepth == CV_64F)
534 func = reduceR_<uchar,double,OpAdd<int> >;
535 if(sdepth == CV_16U && ddepth == CV_32F)
536 func = reduceR_<ushort,float,OpAdd<float> >;
537 if(sdepth == CV_16U && ddepth == CV_64F)
538 func = reduceR_<ushort,double,OpAdd<double> >;
539 if(sdepth == CV_16S && ddepth == CV_32F)
540 func = reduceR_<short,float,OpAdd<float> >;
541 if(sdepth == CV_16S && ddepth == CV_64F)
542 func = reduceR_<short,double,OpAdd<double> >;
543 if(sdepth == CV_32F && ddepth == CV_32F)
544 func = reduceR_<float,float,OpAdd<float> >;
545 if(sdepth == CV_32F && ddepth == CV_64F)
546 func = reduceR_<float,double,OpAdd<double> >;
547 if(sdepth == CV_64F && ddepth == CV_64F)
548 func = reduceR_<double,double,OpAdd<double> >;
550 else if(op == CV_REDUCE_MAX)
552 if(sdepth == CV_8U && ddepth == CV_8U)
553 func = reduceR_<uchar, uchar, OpMax<uchar> >;
554 if(sdepth == CV_32F && ddepth == CV_32F)
555 func = reduceR_<float, float, OpMax<float> >;
556 if(sdepth == CV_64F && ddepth == CV_64F)
557 func = reduceR_<double, double, OpMax<double> >;
559 else if(op == CV_REDUCE_MIN)
561 if(sdepth == CV_8U && ddepth == CV_8U)
562 func = reduceR_<uchar, uchar, OpMin<uchar> >;
563 if(sdepth == CV_32F && ddepth == CV_32F)
564 func = reduceR_<float, float, OpMin<float> >;
565 if(sdepth == CV_64F && ddepth == CV_64F)
566 func = reduceR_<double, double, OpMin<double> >;
571 if(op == CV_REDUCE_SUM)
573 if(sdepth == CV_8U && ddepth == CV_32S)
574 func = reduceC_<uchar,int,OpAdd<int> >;
575 if(sdepth == CV_8U && ddepth == CV_32F)
576 func = reduceC_<uchar,float,OpAdd<int> >;
577 if(sdepth == CV_8U && ddepth == CV_64F)
578 func = reduceC_<uchar,double,OpAdd<int> >;
579 if(sdepth == CV_16U && ddepth == CV_32F)
580 func = reduceC_<ushort,float,OpAdd<float> >;
581 if(sdepth == CV_16U && ddepth == CV_64F)
582 func = reduceC_<ushort,double,OpAdd<double> >;
583 if(sdepth == CV_16S && ddepth == CV_32F)
584 func = reduceC_<short,float,OpAdd<float> >;
585 if(sdepth == CV_16S && ddepth == CV_64F)
586 func = reduceC_<short,double,OpAdd<double> >;
587 if(sdepth == CV_32F && ddepth == CV_32F)
588 func = reduceC_<float,float,OpAdd<float> >;
589 if(sdepth == CV_32F && ddepth == CV_64F)
590 func = reduceC_<float,double,OpAdd<double> >;
591 if(sdepth == CV_64F && ddepth == CV_64F)
592 func = reduceC_<double,double,OpAdd<double> >;
594 else if(op == CV_REDUCE_MAX)
596 if(sdepth == CV_8U && ddepth == CV_8U)
597 func = reduceC_<uchar, uchar, OpMax<uchar> >;
598 if(sdepth == CV_32F && ddepth == CV_32F)
599 func = reduceC_<float, float, OpMax<float> >;
600 if(sdepth == CV_64F && ddepth == CV_64F)
601 func = reduceC_<double, double, OpMax<double> >;
603 else if(op == CV_REDUCE_MIN)
605 if(sdepth == CV_8U && ddepth == CV_8U)
606 func = reduceC_<uchar, uchar, OpMin<uchar> >;
607 if(sdepth == CV_32F && ddepth == CV_32F)
608 func = reduceC_<float, float, OpMin<float> >;
609 if(sdepth == CV_64F && ddepth == CV_64F)
610 func = reduceC_<double, double, OpMin<double> >;
615 CV_Error( CV_StsUnsupportedFormat,
616 "Unsupported combination of input and output array formats" );
620 if( op0 == CV_REDUCE_AVG )
621 temp.convertTo(dst, dst.type(), 1./(dim == 0 ? src.rows : src.cols));
625 template<typename T> static void sort_( const Mat& src, Mat& dst, int flags )
630 bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
631 bool inplace = src.data == dst.data;
632 bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
635 n = src.rows, len = src.cols;
638 n = src.cols, len = src.rows;
643 for( i = 0; i < n; i++ )
648 T* dptr = (T*)(dst.data + dst.step*i);
651 const T* sptr = (const T*)(src.data + src.step*i);
652 for( j = 0; j < len; j++ )
659 for( j = 0; j < len; j++ )
660 ptr[j] = ((const T*)(src.data + src.step*j))[i];
662 std::sort( ptr, ptr + len, LessThan<T>() );
664 for( j = 0; j < len/2; j++ )
665 std::swap(ptr[j], ptr[len-1-j]);
667 for( j = 0; j < len; j++ )
668 ((T*)(dst.data + dst.step*j))[i] = ptr[j];
673 template<typename T> static void sortIdx_( const Mat& src, Mat& dst, int flags )
676 AutoBuffer<int> ibuf;
680 bool sortRows = (flags & 1) == CV_SORT_EVERY_ROW;
681 bool sortDescending = (flags & CV_SORT_DESCENDING) != 0;
683 CV_Assert( src.data != dst.data );
686 n = src.rows, len = src.cols;
689 n = src.cols, len = src.rows;
696 for( i = 0; i < n; i++ )
703 ptr = (T*)(src.data + src.step*i);
704 iptr = (int*)(dst.data + dst.step*i);
708 for( j = 0; j < len; j++ )
709 ptr[j] = ((const T*)(src.data + src.step*j))[i];
711 for( j = 0; j < len; j++ )
713 std::sort( iptr, iptr + len, LessThanIdx<T>(ptr) );
715 for( j = 0; j < len/2; j++ )
716 std::swap(iptr[j], iptr[len-1-j]);
718 for( j = 0; j < len; j++ )
719 ((int*)(dst.data + dst.step*j))[i] = iptr[j];
723 typedef void (*SortFunc)(const Mat& src, Mat& dst, int flags);
725 void sort( const Mat& src, Mat& dst, int flags )
727 static SortFunc tab[] =
729 sort_<uchar>, sort_<schar>, sort_<ushort>, sort_<short>,
730 sort_<int>, sort_<float>, sort_<double>, 0
732 SortFunc func = tab[src.depth()];
733 CV_Assert( src.channels() == 1 && func != 0 );
734 dst.create( src.size(), src.type() );
735 func( src, dst, flags );
738 void sortIdx( const Mat& src, Mat& dst, int flags )
740 static SortFunc tab[] =
742 sortIdx_<uchar>, sortIdx_<schar>, sortIdx_<ushort>, sortIdx_<short>,
743 sortIdx_<int>, sortIdx_<float>, sortIdx_<double>, 0
745 SortFunc func = tab[src.depth()];
746 CV_Assert( src.channels() == 1 && func != 0 );
747 if( dst.data == src.data )
749 dst.create( src.size(), CV_32S );
750 func( src, dst, flags );
753 static void generateRandomCenter(const vector<Vec2f>& box, float* center, RNG& rng)
755 size_t j, dims = box.size();
756 float margin = 1.f/dims;
757 for( j = 0; j < dims; j++ )
758 center[j] = ((float)rng*(1.f+margin*2.f)-margin)*(box[j][1] - box[j][0]) + box[j][0];
762 static inline float distance(const float* a, const float* b, int n)
764 int j = 0; float d = 0.f;
766 float CV_DECL_ALIGNED(16) buf[4];
767 __m128 d0 = _mm_setzero_ps(), d1 = _mm_setzero_ps();
769 for( ; j <= n - 8; j += 8 )
771 __m128 t0 = _mm_sub_ps(_mm_loadu_ps(a + j), _mm_loadu_ps(b + j));
772 __m128 t1 = _mm_sub_ps(_mm_loadu_ps(a + j + 4), _mm_loadu_ps(b + j + 4));
773 d0 = _mm_add_ps(d0, _mm_mul_ps(t0, t0));
774 d1 = _mm_add_ps(d1, _mm_mul_ps(t1, t1));
776 _mm_store_ps(buf, _mm_add_ps(d0, d1));
777 d = buf[0] + buf[1] + buf[2] + buf[3];
779 for( ; j <= n - 4; j += 4 )
781 float t0 = a[j] - b[j], t1 = a[j+1] - b[j+1], t2 = a[j+2] - b[j+2], t3 = a[j+3] - b[j+3];
782 d += t0*t0 + t1*t1 + t2*t2 + t3*t3;
787 float t = a[j] - b[j];
794 k-means center initialization using the following algorithm:
795 Arthur & Vassilvitskii (2007) k-means++: The Advantages of Careful Seeding
797 static void generateCentersPP(const Mat& _data, Mat& _out_centers,
798 int K, RNG& rng, int trials)
800 int i, j, k, dims = _data.cols, N = _data.rows;
801 const float* data = _data.ptr<float>(0);
802 int step = _data.step/sizeof(data[0]);
803 vector<int> _centers(K);
804 int* centers = &_centers[0];
805 vector<float> _dist(N*3);
806 float* dist = &_dist[0], *tdist = dist + N, *tdist2 = tdist + N;
809 centers[0] = (unsigned)rng % N;
811 for( i = 0; i < N; i++ )
813 dist[i] = distance(data + step*i, data + step*centers[0], dims);
817 for( k = 1; k < K; k++ )
819 double bestSum = DBL_MAX;
822 for( j = 0; j < trials; j++ )
824 double p = (double)rng*sum0, s = 0;
825 for( i = 0; i < N-1; i++ )
826 if( (p -= dist[i]) <= 0 )
829 for( i = 0; i < N; i++ )
831 tdist2[i] = std::min(distance(data + step*i, data + step*ci, dims), dist[i]);
839 std::swap(tdist, tdist2);
842 centers[k] = bestCenter;
844 std::swap(dist, tdist);
847 for( k = 0; k < K; k++ )
849 const float* src = data + step*centers[k];
850 float* dst = _out_centers.ptr<float>(k);
851 for( j = 0; j < dims; j++ )
856 double kmeans( const Mat& data, int K, Mat& best_labels,
857 TermCriteria criteria, int attempts,
858 int flags, Mat* _centers )
860 const int SPP_TRIALS = 3;
861 int N = data.rows > 1 ? data.rows : data.cols;
862 int dims = (data.rows > 1 ? data.cols : 1)*data.channels();
863 int type = data.depth();
865 attempts = std::max(attempts, 1);
866 CV_Assert( type == CV_32F && K > 0 );
869 if( flags & CV_KMEANS_USE_INITIAL_LABELS )
871 CV_Assert( (best_labels.cols == 1 || best_labels.rows == 1) &&
872 best_labels.cols*best_labels.rows == N &&
873 best_labels.type() == CV_32S &&
874 best_labels.isContinuous());
875 best_labels.copyTo(_labels);
879 if( !((best_labels.cols == 1 || best_labels.rows == 1) &&
880 best_labels.cols*best_labels.rows == N &&
881 best_labels.type() == CV_32S &&
882 best_labels.isContinuous()))
883 best_labels.create(N, 1, CV_32S);
884 _labels.create(best_labels.size(), best_labels.type());
886 int* labels = _labels.ptr<int>();
888 Mat centers(K, dims, type), old_centers(K, dims, type);
889 vector<int> counters(K);
890 vector<Vec2f> _box(dims);
891 Vec2f* box = &_box[0];
893 double best_compactness = DBL_MAX, compactness = 0;
895 int a, iter, i, j, k;
897 if( criteria.type & TermCriteria::EPS )
898 criteria.epsilon = std::max(criteria.epsilon, 0.);
900 criteria.epsilon = FLT_EPSILON;
901 criteria.epsilon *= criteria.epsilon;
903 if( criteria.type & TermCriteria::COUNT )
904 criteria.maxCount = std::min(std::max(criteria.maxCount, 2), 100);
906 criteria.maxCount = 100;
911 criteria.maxCount = 2;
914 const float* sample = data.ptr<float>(0);
915 for( j = 0; j < dims; j++ )
916 box[j] = Vec2f(sample[j], sample[j]);
918 for( i = 1; i < N; i++ )
920 sample = data.ptr<float>(i);
921 for( j = 0; j < dims; j++ )
924 box[j][0] = std::min(box[j][0], v);
925 box[j][1] = std::max(box[j][1], v);
929 for( a = 0; a < attempts; a++ )
931 double max_center_shift = DBL_MAX;
932 for( iter = 0; iter < criteria.maxCount && max_center_shift > criteria.epsilon; iter++ )
934 swap(centers, old_centers);
936 if( iter == 0 && (a > 0 || !(flags & KMEANS_USE_INITIAL_LABELS)) )
938 if( flags & KMEANS_PP_CENTERS )
939 generateCentersPP(data, centers, K, rng, SPP_TRIALS);
942 for( k = 0; k < K; k++ )
943 generateRandomCenter(_box, centers.ptr<float>(k), rng);
948 if( iter == 0 && a == 0 && (flags & KMEANS_USE_INITIAL_LABELS) )
950 for( i = 0; i < N; i++ )
951 CV_Assert( (unsigned)labels[i] < (unsigned)K );
956 for( k = 0; k < K; k++ )
959 for( i = 0; i < N; i++ )
961 sample = data.ptr<float>(i);
963 float* center = centers.ptr<float>(k);
964 for( j = 0; j <= dims - 4; j += 4 )
966 float t0 = center[j] + sample[j];
967 float t1 = center[j+1] + sample[j+1];
972 t0 = center[j+2] + sample[j+2];
973 t1 = center[j+3] + sample[j+3];
978 for( ; j < dims; j++ )
979 center[j] += sample[j];
984 max_center_shift = 0;
986 for( k = 0; k < K; k++ )
988 float* center = centers.ptr<float>(k);
989 if( counters[k] != 0 )
991 float scale = 1.f/counters[k];
992 for( j = 0; j < dims; j++ )
996 generateRandomCenter(_box, center, rng);
1001 const float* old_center = old_centers.ptr<float>(k);
1002 for( j = 0; j < dims; j++ )
1004 double t = center[j] - old_center[j];
1007 max_center_shift = std::max(max_center_shift, dist);
1014 for( i = 0; i < N; i++ )
1016 sample = data.ptr<float>(i);
1018 double min_dist = DBL_MAX;
1020 for( k = 0; k < K; k++ )
1022 const float* center = centers.ptr<float>(k);
1023 double dist = distance(sample, center, dims);
1025 if( min_dist > dist )
1032 compactness += min_dist;
1037 if( compactness < best_compactness )
1039 best_compactness = compactness;
1041 centers.copyTo(*_centers);
1042 _labels.copyTo(best_labels);
1046 return best_compactness;
1052 CV_IMPL void cvSetIdentity( CvArr* arr, CvScalar value )
1054 cv::Mat m = cv::cvarrToMat(arr);
1055 cv::setIdentity(m, value);
1059 CV_IMPL CvScalar cvTrace( const CvArr* arr )
1061 return cv::trace(cv::cvarrToMat(arr));
1065 CV_IMPL void cvTranspose( const CvArr* srcarr, CvArr* dstarr )
1067 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1069 CV_Assert( src.rows == dst.cols && src.cols == dst.rows && src.type() == dst.type() );
1070 transpose( src, dst );
1074 CV_IMPL void cvCompleteSymm( CvMat* matrix, int LtoR )
1077 cv::completeSymm( m, LtoR != 0 );
1081 CV_IMPL void cvCrossProduct( const CvArr* srcAarr, const CvArr* srcBarr, CvArr* dstarr )
1083 cv::Mat srcA = cv::cvarrToMat(srcAarr), dst = cv::cvarrToMat(dstarr);
1085 CV_Assert( srcA.size() == dst.size() && srcA.type() == dst.type() );
1086 srcA.cross(cv::cvarrToMat(srcBarr)).copyTo(dst);
1091 cvReduce( const CvArr* srcarr, CvArr* dstarr, int dim, int op )
1093 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
1096 dim = src.rows > dst.rows ? 0 : src.cols > dst.cols ? 1 : dst.cols == 1;
1099 CV_Error( CV_StsOutOfRange, "The reduced dimensionality index is out of range" );
1101 if( (dim == 0 && (dst.cols != src.cols || dst.rows != 1)) ||
1102 (dim == 1 && (dst.rows != src.rows || dst.cols != 1)) )
1103 CV_Error( CV_StsBadSize, "The output array size is incorrect" );
1105 if( src.channels() != dst.channels() )
1106 CV_Error( CV_StsUnmatchedFormats, "Input and output arrays must have the same number of channels" );
1108 cv::reduce(src, dst, dim, op, dst.type());
1113 cvRange( CvArr* arr, double start, double end )
1117 CvMat stub, *mat = (CvMat*)arr;
1124 if( !CV_IS_MAT(mat) )
1125 mat = cvGetMat( mat, &stub);
1129 type = CV_MAT_TYPE(mat->type);
1130 delta = (end-start)/(rows*cols);
1132 if( CV_IS_MAT_CONT(mat->type) )
1139 step = mat->step / CV_ELEM_SIZE(type);
1141 if( type == CV_32SC1 )
1143 int* idata = mat->data.i;
1144 int ival = cvRound(val), idelta = cvRound(delta);
1146 if( fabs(val - ival) < DBL_EPSILON &&
1147 fabs(delta - idelta) < DBL_EPSILON )
1149 for( i = 0; i < rows; i++, idata += step )
1150 for( j = 0; j < cols; j++, ival += idelta )
1155 for( i = 0; i < rows; i++, idata += step )
1156 for( j = 0; j < cols; j++, val += delta )
1157 idata[j] = cvRound(val);
1160 else if( type == CV_32FC1 )
1162 float* fdata = mat->data.fl;
1163 for( i = 0; i < rows; i++, fdata += step )
1164 for( j = 0; j < cols; j++, val += delta )
1165 fdata[j] = (float)val;
1168 CV_Error( CV_StsUnsupportedFormat, "The function only supports 32sC1 and 32fC1 datatypes" );
1171 return ok ? arr : 0;
1176 cvSort( const CvArr* _src, CvArr* _dst, CvArr* _idx, int flags )
1178 cv::Mat src = cv::cvarrToMat(_src), dst, idx;
1182 cv::Mat idx0 = cv::cvarrToMat(_idx), idx = idx0;
1183 CV_Assert( src.size() == idx.size() && idx.type() == CV_32S && src.data != idx.data );
1184 cv::sortIdx( src, idx, flags );
1185 CV_Assert( idx0.data == idx.data );
1190 cv::Mat dst0 = cv::cvarrToMat(_dst), dst = dst0;
1191 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
1192 cv::sort( src, dst, flags );
1193 CV_Assert( dst0.data == dst.data );
1199 cvKMeans2( const CvArr* _samples, int cluster_count, CvArr* _labels,
1200 CvTermCriteria termcrit, int attempts, CvRNG*,
1201 int flags, CvArr* _centers, double* _compactness )
1203 cv::Mat data = cv::cvarrToMat(_samples), labels = cv::cvarrToMat(_labels), centers;
1205 centers = cv::cvarrToMat(_centers);
1206 CV_Assert( labels.isContinuous() && labels.type() == CV_32S &&
1207 (labels.cols == 1 || labels.rows == 1) &&
1208 labels.cols + labels.rows - 1 == data.rows );
1209 double compactness = cv::kmeans(data, cluster_count, labels, termcrit, attempts,
1210 flags, _centers ? ¢ers : 0 );
1212 *_compactness = compactness;
1216 ///////////////////////////// n-dimensional matrices ////////////////////////////
1221 //////////////////////////////// MatND ///////////////////////////////////
1223 MatND::MatND(const MatND& m, const Range* ranges)
1224 : flags(MAGIC_VAL), dims(0), refcount(0), data(0), datastart(0), dataend(0)
1226 int i, j, d = m.dims;
1229 for( i = 0; i < d; i++ )
1231 Range r = ranges[i];
1232 CV_Assert( r == Range::all() ||
1233 (0 <= r.start && r.start < r.end && r.end <= m.size[i]) );
1236 for( i = 0; i < d; i++ )
1238 Range r = ranges[i];
1239 if( r != Range::all() )
1241 size[i] = r.end - r.start;
1242 data += r.start*step[i];
1246 for( i = 0; i < d; i++ )
1252 CV_Assert( step[d-1] == elemSize() );
1253 for( j = d-1; j > i; j-- )
1255 if( step[j]*size[j] < step[j-1] )
1258 flags = (flags & ~CONTINUOUS_FLAG) | (j <= i ? CONTINUOUS_FLAG : 0);
1261 void MatND::create(int d, const int* _sizes, int _type)
1263 CV_Assert(d > 0 && _sizes);
1265 _type = CV_MAT_TYPE(_type);
1266 if( data && d == dims && _type == type() )
1268 for( i = 0; i < d; i++ )
1269 if( size[i] != _sizes[i] )
1277 flags = (_type & CV_MAT_TYPE_MASK) | MAGIC_VAL | CONTINUOUS_FLAG;
1278 size_t total = elemSize();
1281 for( i = d-1; i >= 0; i-- )
1286 total1 = (int64)total*sz;
1287 CV_Assert( sz > 0 );
1288 if( (uint64)total1 != (size_t)total1 )
1289 CV_Error( CV_StsOutOfRange, "The total matrix size does not fit to \"size_t\" type" );
1290 total = (size_t)total1;
1292 total = alignSize(total, (int)sizeof(*refcount));
1293 data = datastart = (uchar*)fastMalloc(total + (int)sizeof(*refcount));
1294 dataend = datastart + step[0]*size[0];
1295 refcount = (int*)(data + total);
1300 void MatND::copyTo( MatND& m ) const
1302 m.create( m.dims, m.size, type() );
1303 NAryMatNDIterator it(*this, m);
1305 for( int i = 0; i < it.nplanes; i++, ++it )
1306 it.planes[0].copyTo(it.planes[1]);
1309 void MatND::copyTo( MatND& m, const MatND& mask ) const
1311 m.create( dims, size, type() );
1312 NAryMatNDIterator it(*this, m, mask);
1314 for( int i = 0; i < it.nplanes; i++, ++it )
1315 it.planes[0].copyTo(it.planes[1], it.planes[2]);
1318 void MatND::convertTo( MatND& m, int rtype, double alpha, double beta ) const
1320 rtype = rtype < 0 ? type() : CV_MAKETYPE(CV_MAT_DEPTH(rtype), channels());
1321 m.create( dims, size, rtype );
1322 NAryMatNDIterator it(*this, m);
1324 for( int i = 0; i < it.nplanes; i++, ++it )
1325 it.planes[0].convertTo(it.planes[1], rtype, alpha, beta);
1328 MatND& MatND::operator = (const Scalar& s)
1330 NAryMatNDIterator it(*this);
1331 for( int i = 0; i < it.nplanes; i++, ++it )
1337 MatND& MatND::setTo(const Scalar& s, const MatND& mask)
1339 NAryMatNDIterator it(*this, mask);
1340 for( int i = 0; i < it.nplanes; i++, ++it )
1341 it.planes[0].setTo(s, it.planes[1]);
1346 MatND MatND::reshape(int, int, const int*) const
1348 CV_Error(CV_StsNotImplemented, "");
1353 MatND::operator Mat() const
1355 int i, d = dims, d1, rows, cols;
1356 size_t _step = Mat::AUTO_STEP;
1361 cols = d == 2 ? size[1] : 1;
1370 for( d1 = 0; d1 < d; d1++ )
1374 for( i = d-1; i > d1; i-- )
1376 int64 cols1 = (int64)cols*size[i-1];
1377 if( cols1 != (int)cols1 || size[i]*step[i] != step[i-1] )
1387 for( ; i > d1; i-- )
1389 int64 rows1 = (int64)rows*size[i-1];
1390 if( rows1 != (int)rows1 || size[i]*step[i] != step[i-1] )
1396 CV_Error( CV_StsBadArg,
1397 "The nD matrix can not be represented as 2D matrix due "
1398 "to its layout in memory; you may use (Mat)the_matnd.clone() instead" );
1402 Mat m(rows, cols, type(), data, _step);
1403 m.datastart = datastart;
1404 m.dataend = dataend;
1405 m.refcount = refcount;
1410 MatND::operator CvMatND() const
1413 cvInitMatNDHeader( &mat, dims, size, type(), data );
1415 for( i = 0; i < d; i++ )
1416 mat.dim[i].step = (int)step[i];
1417 mat.type |= flags & CONTINUOUS_FLAG;
1421 NAryMatNDIterator::NAryMatNDIterator(const MatND** _arrays, size_t count)
1423 init(_arrays, count);
1426 NAryMatNDIterator::NAryMatNDIterator(const MatND* _arrays, size_t count)
1428 AutoBuffer<const MatND*, 32> buf(count);
1429 for( size_t i = 0; i < count; i++ )
1430 buf[i] = _arrays + i;
1435 NAryMatNDIterator::NAryMatNDIterator(const MatND& m1)
1437 const MatND* mm[] = {&m1};
1441 NAryMatNDIterator::NAryMatNDIterator(const MatND& m1, const MatND& m2)
1443 const MatND* mm[] = {&m1, &m2};
1447 NAryMatNDIterator::NAryMatNDIterator(const MatND& m1, const MatND& m2, const MatND& m3)
1449 const MatND* mm[] = {&m1, &m2, &m3};
1453 NAryMatNDIterator::NAryMatNDIterator(const MatND& m1, const MatND& m2,
1454 const MatND& m3, const MatND& m4)
1456 const MatND* mm[] = {&m1, &m2, &m3, &m4};
1460 NAryMatNDIterator::NAryMatNDIterator(const MatND& m1, const MatND& m2,
1461 const MatND& m3, const MatND& m4,
1464 const MatND* mm[] = {&m1, &m2, &m3, &m4, &m5};
1468 NAryMatNDIterator::NAryMatNDIterator(const MatND& m1, const MatND& m2,
1469 const MatND& m3, const MatND& m4,
1470 const MatND& m5, const MatND& m6)
1472 const MatND* mm[] = {&m1, &m2, &m3, &m4, &m5, &m6};
1476 void NAryMatNDIterator::init(const MatND** _arrays, size_t count)
1478 CV_Assert( _arrays && count > 0 );
1479 arrays.resize(count);
1480 int i, j, d1=0, i0 = -1, d = -1, n = (int)count;
1485 for( i = 0; i < n; i++ )
1487 if( !_arrays[i] || !_arrays[i]->data )
1489 arrays[i] = MatND();
1492 const MatND& A = arrays[i] = *_arrays[i];
1500 // find the first dimensionality which is different from 1;
1501 // in any of the arrays the first "d1" steps do not affect the continuity
1502 for( d1 = 0; d1 < d; d1++ )
1503 if( A.size[d1] > 1 )
1508 CV_Assert( A.dims == d );
1509 for( j = 0; j < d; j++ )
1510 CV_Assert( A.size[j] == arrays[i0].size[j] );
1513 if( !A.isContinuous() )
1515 CV_Assert( A.step[d-1] == esz );
1516 for( j = d-1; j > d1; j-- )
1517 if( A.step[j]*A.size[j] < A.step[j-1] )
1519 iterdepth = std::max(iterdepth, j);
1524 CV_Error( CV_StsBadArg, "All the input arrays are empty" );
1526 int total = arrays[i0].size[d-1];
1527 for( j = d-1; j > iterdepth; j-- )
1529 int64 total1 = (int64)total*arrays[i0].size[j-1];
1530 if( total1 != (int)total1 )
1532 total = (int)total1;
1536 if( iterdepth == d1 )
1540 for( i = 0; i < n; i++ )
1542 if( !arrays[i].data )
1547 planes[i] = Mat( 1, total, arrays[i].type(), arrays[i].data );
1548 planes[i].datastart = arrays[i].datastart;
1549 planes[i].dataend = arrays[i].dataend;
1550 planes[i].refcount = arrays[i].refcount;
1556 for( j = iterdepth-1; j >= 0; j-- )
1557 nplanes *= arrays[i0].size[j];
1561 NAryMatNDIterator& NAryMatNDIterator::operator ++()
1563 if( idx >= nplanes-1 )
1567 for( size_t i = 0; i < arrays.size(); i++ )
1569 const MatND& A = arrays[i];
1574 uchar* data = A.data;
1575 for( int j = iterdepth-1; j >= 0 && _idx > 0; j-- )
1577 int szi = A.size[j], t = _idx/szi;
1578 data += (_idx - t * szi)*A.step[j];
1587 NAryMatNDIterator NAryMatNDIterator::operator ++(int)
1589 NAryMatNDIterator it = *this;
1594 void add(const MatND& a, const MatND& b, MatND& c, const MatND& mask)
1596 c.create(a.dims, a.size, a.type());
1597 NAryMatNDIterator it(a, b, c, mask);
1599 for( int i = 0; i < it.nplanes; i++, ++it )
1600 add( it.planes[0], it.planes[1], it.planes[2], it.planes[3] );
1603 void subtract(const MatND& a, const MatND& b, MatND& c, const MatND& mask)
1605 c.create(a.dims, a.size, a.type());
1606 NAryMatNDIterator it(a, b, c, mask);
1608 for( int i = 0; i < it.nplanes; i++, ++it )
1609 subtract( it.planes[0], it.planes[1], it.planes[2], it.planes[3] );
1612 void add(const MatND& a, const MatND& b, MatND& c)
1614 c.create(a.dims, a.size, a.type());
1615 NAryMatNDIterator it(a, b, c);
1617 for( int i = 0; i < it.nplanes; i++, ++it )
1618 add( it.planes[0], it.planes[1], it.planes[2] );
1622 void subtract(const MatND& a, const MatND& b, MatND& c)
1624 c.create(a.dims, a.size, a.type());
1625 NAryMatNDIterator it(a, b, c);
1627 for( int i = 0; i < it.nplanes; i++, ++it )
1628 subtract( it.planes[0], it.planes[1], it.planes[2] );
1631 void add(const MatND& a, const Scalar& s, MatND& c, const MatND& mask)
1633 c.create(a.dims, a.size, a.type());
1634 NAryMatNDIterator it(a, c, mask);
1636 for( int i = 0; i < it.nplanes; i++, ++it )
1637 add( it.planes[0], s, it.planes[1], it.planes[2] );
1640 void subtract(const Scalar& s, const MatND& a, MatND& c, const MatND& mask)
1642 c.create(a.dims, a.size, a.type());
1643 NAryMatNDIterator it(a, c, mask);
1645 for( int i = 0; i < it.nplanes; i++, ++it )
1646 subtract( s, it.planes[0], it.planes[1], it.planes[2] );
1649 void multiply(const MatND& a, const MatND& b, MatND& c, double scale)
1651 c.create(a.dims, a.size, a.type());
1652 NAryMatNDIterator it(a, b, c);
1654 for( int i = 0; i < it.nplanes; i++, ++it )
1655 multiply( it.planes[0], it.planes[1], it.planes[2], scale );
1658 void divide(const MatND& a, const MatND& b, MatND& c, double scale)
1660 c.create(a.dims, a.size, a.type());
1661 NAryMatNDIterator it(a, b, c);
1663 for( int i = 0; i < it.nplanes; i++, ++it )
1664 divide( it.planes[0], it.planes[1], it.planes[2], scale );
1667 void divide(double scale, const MatND& b, MatND& c)
1669 c.create(b.dims, b.size, b.type());
1670 NAryMatNDIterator it(b, c);
1672 for( int i = 0; i < it.nplanes; i++, ++it )
1673 divide( scale, it.planes[0], it.planes[1] );
1676 void scaleAdd(const MatND& a, double alpha, const MatND& b, MatND& c)
1678 c.create(a.dims, a.size, a.type());
1679 NAryMatNDIterator it(a, b, c);
1681 for( int i = 0; i < it.nplanes; i++, ++it )
1682 scaleAdd( it.planes[0], alpha, it.planes[1], it.planes[2] );
1685 void addWeighted(const MatND& a, double alpha, const MatND& b,
1686 double beta, double gamma, MatND& c)
1688 c.create(a.dims, a.size, a.type());
1689 NAryMatNDIterator it(a, b, c);
1691 for( int i = 0; i < it.nplanes; i++, ++it )
1692 addWeighted( it.planes[0], alpha, it.planes[1], beta, gamma, it.planes[2] );
1695 Scalar sum(const MatND& m)
1697 NAryMatNDIterator it(m);
1700 for( int i = 0; i < it.nplanes; i++, ++it )
1701 s += sum(it.planes[0]);
1705 int countNonZero( const MatND& m )
1707 NAryMatNDIterator it(m);
1710 for( int i = 0; i < it.nplanes; i++, ++it )
1711 nz += countNonZero(it.planes[0]);
1715 Scalar mean(const MatND& m)
1717 NAryMatNDIterator it(m);
1719 for( int i = 0; i < m.dims; i++ )
1721 return sum(m)*(1./total);
1724 Scalar mean(const MatND& m, const MatND& mask)
1728 NAryMatNDIterator it(m, mask);
1731 for( int i = 0; i < it.nplanes; i++, ++it )
1733 s += sum(it.planes[0]);
1734 total += countNonZero(it.planes[1]);
1736 return s *= std::max(total, 1.);
1739 void meanStdDev(const MatND& m, Scalar& mean, Scalar& stddev, const MatND& mask)
1741 NAryMatNDIterator it(m, mask);
1744 int k, cn = m.channels();
1746 for( int i = 0; i < it.nplanes; i++, ++it )
1748 Scalar _mean, _stddev;
1749 meanStdDev(it.planes[0], _mean, _stddev, it.planes[1]);
1750 double nz = mask.data ? countNonZero(it.planes[1]) :
1751 (double)it.planes[0].rows*it.planes[0].cols;
1752 for( k = 0; k < cn; k++ )
1754 s[k] += _mean[k]*nz;
1755 sq[k] += (_stddev[k]*_stddev[k] + _mean[k]*_mean[k])*nz;
1760 mean = stddev = Scalar();
1761 total = 1./std::max(total, 1.);
1762 for( k = 0; k < cn; k++ )
1764 mean[k] = s[k]*total;
1765 stddev[k] = std::sqrt(std::max(sq[k]*total - mean[k]*mean[k], 0.));
1769 double norm(const MatND& a, int normType, const MatND& mask)
1771 NAryMatNDIterator it(a, mask);
1774 for( int i = 0; i < it.nplanes; i++, ++it )
1776 double n = norm(it.planes[0], normType, it.planes[1]);
1777 if( normType == NORM_INF )
1778 total = std::max(total, n);
1779 else if( normType == NORM_L1 )
1785 return normType != NORM_L2 ? total : std::sqrt(total);
1788 double norm(const MatND& a, const MatND& b,
1789 int normType, const MatND& mask)
1791 bool isRelative = (normType & NORM_RELATIVE) != 0;
1794 NAryMatNDIterator it(a, b, mask);
1795 double num = 0, denom = 0;
1797 for( int i = 0; i < it.nplanes; i++, ++it )
1799 double n = norm(it.planes[0], it.planes[1], normType, it.planes[2]);
1800 double d = !isRelative ? 0 : norm(it.planes[1], normType, it.planes[2]);
1801 if( normType == NORM_INF )
1803 num = std::max(num, n);
1804 denom = std::max(denom, d);
1806 else if( normType == NORM_L1 )
1818 if( normType == NORM_L2 )
1820 num = std::sqrt(num);
1821 denom = std::sqrt(denom);
1824 return !isRelative ? num : num/std::max(denom,DBL_EPSILON);
1827 void normalize( const MatND& src, MatND& dst, double a, double b,
1828 int norm_type, int rtype, const MatND& mask )
1830 double scale = 1, shift = 0;
1831 if( norm_type == CV_MINMAX )
1833 double smin = 0, smax = 0;
1834 double dmin = std::min( a, b ), dmax = std::max( a, b );
1835 minMaxLoc( src, &smin, &smax, 0, 0, mask );
1836 scale = (dmax - dmin)*(smax - smin > DBL_EPSILON ? 1./(smax - smin) : 0);
1837 shift = dmin - smin*scale;
1839 else if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
1841 scale = norm( src, norm_type, mask );
1842 scale = scale > DBL_EPSILON ? a/scale : 0.;
1846 CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
1849 src.convertTo( dst, rtype, scale, shift );
1853 src.convertTo( temp, rtype, scale, shift );
1854 temp.copyTo( dst, mask );
1858 static void ofs2idx(const MatND& a, size_t ofs, int* idx)
1861 for( i = 0; i < d; i++ )
1863 idx[i] = (int)(ofs / a.step[i]);
1869 void minMaxLoc(const MatND& a, double* minVal,
1870 double* maxVal, int* minLoc, int* maxLoc,
1873 NAryMatNDIterator it(a, mask);
1874 double minval = DBL_MAX, maxval = -DBL_MAX;
1875 size_t minofs = 0, maxofs = 0, esz = a.elemSize();
1877 for( int i = 0; i < it.nplanes; i++, ++it )
1879 double val0 = 0, val1 = 0;
1881 minMaxLoc( it.planes[0], &val0, &val1, &pt0, &pt1, it.planes[1] );
1885 minofs = (it.planes[0].data - a.data) + pt0.x*esz;
1890 maxofs = (it.planes[0].data - a.data) + pt1.x*esz;
1899 ofs2idx(a, minofs, minLoc);
1901 ofs2idx(a, maxofs, maxLoc);
1904 void merge(const MatND* mv, size_t n, MatND& dst)
1908 vector<MatND> v(n + 1);
1910 for( k = 0; k < n; k++ )
1912 total_cn += mv[k].channels();
1915 dst.create( mv[0].dims, mv[0].size, CV_MAKETYPE(mv[0].depth(), total_cn) );
1917 NAryMatNDIterator it(&v[0], v.size());
1919 for( int i = 0; i < it.nplanes; i++, ++it )
1920 merge( &it.planes[0], n, it.planes[n] );
1923 void split(const MatND& m, MatND* mv)
1925 size_t k, n = m.channels();
1927 vector<MatND> v(n + 1);
1928 for( k = 0; k < n; k++ )
1930 mv[k].create( m.dims, m.size, CV_MAKETYPE(m.depth(), 1) );
1934 NAryMatNDIterator it(&v[0], v.size());
1936 for( int i = 0; i < it.nplanes; i++, ++it )
1937 split( it.planes[n], &it.planes[0] );
1940 void mixChannels(const MatND* src, int nsrcs, MatND* dst, int ndsts,
1941 const int* fromTo, size_t npairs)
1943 size_t k, m = nsrcs, n = ndsts;
1944 CV_Assert( n > 0 && m > 0 );
1945 vector<MatND> v(m + n);
1946 for( k = 0; k < m; k++ )
1948 for( k = 0; k < n; k++ )
1950 NAryMatNDIterator it(&v[0], v.size());
1952 for( int i = 0; i < it.nplanes; i++, ++it )
1954 Mat* pptr = &it.planes[0];
1955 mixChannels( pptr, m, pptr + m, n, fromTo, npairs );
1959 void bitwise_and(const MatND& a, const MatND& b, MatND& c, const MatND& mask)
1961 c.create(a.dims, a.size, a.type());
1962 NAryMatNDIterator it(a, b, c, mask);
1964 for( int i = 0; i < it.nplanes; i++, ++it )
1965 bitwise_and( it.planes[0], it.planes[1], it.planes[2], it.planes[3] );
1968 void bitwise_or(const MatND& a, const MatND& b, MatND& c, const MatND& mask)
1970 c.create(a.dims, a.size, a.type());
1971 NAryMatNDIterator it(a, b, c, mask);
1973 for( int i = 0; i < it.nplanes; i++, ++it )
1974 bitwise_or( it.planes[0], it.planes[1], it.planes[2], it.planes[3] );
1977 void bitwise_xor(const MatND& a, const MatND& b, MatND& c, const MatND& mask)
1979 c.create(a.dims, a.size, a.type());
1980 NAryMatNDIterator it(a, b, c, mask);
1982 for( int i = 0; i < it.nplanes; i++, ++it )
1983 bitwise_xor( it.planes[0], it.planes[1], it.planes[2], it.planes[3] );
1986 void bitwise_and(const MatND& a, const Scalar& s, MatND& c, const MatND& mask)
1988 c.create(a.dims, a.size, a.type());
1989 NAryMatNDIterator it(a, c, mask);
1991 for( int i = 0; i < it.nplanes; i++, ++it )
1992 bitwise_and( it.planes[0], s, it.planes[1], it.planes[2] );
1995 void bitwise_or(const MatND& a, const Scalar& s, MatND& c, const MatND& mask)
1997 c.create(a.dims, a.size, a.type());
1998 NAryMatNDIterator it(a, c, mask);
2000 for( int i = 0; i < it.nplanes; i++, ++it )
2001 bitwise_or( it.planes[0], s, it.planes[1], it.planes[2] );
2004 void bitwise_xor(const MatND& a, const Scalar& s, MatND& c, const MatND& mask)
2006 c.create(a.dims, a.size, a.type());
2007 NAryMatNDIterator it(a, c, mask);
2009 for( int i = 0; i < it.nplanes; i++, ++it )
2010 bitwise_xor( it.planes[0], s, it.planes[1], it.planes[2] );
2013 void bitwise_not(const MatND& a, MatND& c)
2015 c.create(a.dims, a.size, a.type());
2016 NAryMatNDIterator it(a, c);
2018 for( int i = 0; i < it.nplanes; i++, ++it )
2019 bitwise_not( it.planes[0], it.planes[1] );
2022 void absdiff(const MatND& a, const MatND& b, MatND& c)
2024 c.create(a.dims, a.size, a.type());
2025 NAryMatNDIterator it(a, b, c);
2027 for( int i = 0; i < it.nplanes; i++, ++it )
2028 absdiff( it.planes[0], it.planes[1], it.planes[2] );
2031 void absdiff(const MatND& a, const Scalar& s, MatND& c)
2033 c.create(a.dims, a.size, a.type());
2034 NAryMatNDIterator it(a, c);
2036 for( int i = 0; i < it.nplanes; i++, ++it )
2037 absdiff( it.planes[0], s, it.planes[1] );
2040 void inRange(const MatND& src, const MatND& lowerb,
2041 const MatND& upperb, MatND& dst)
2043 dst.create(src.dims, src.size, CV_8UC1);
2044 NAryMatNDIterator it(src, lowerb, upperb, dst);
2046 for( int i = 0; i < it.nplanes; i++, ++it )
2047 inRange( it.planes[0], it.planes[1], it.planes[2], it.planes[3] );
2050 void inRange(const MatND& src, const Scalar& lowerb,
2051 const Scalar& upperb, MatND& dst)
2053 dst.create(src.dims, src.size, CV_8UC1);
2054 NAryMatNDIterator it(src, dst);
2056 for( int i = 0; i < it.nplanes; i++, ++it )
2057 inRange( it.planes[0], lowerb, upperb, it.planes[1] );
2060 void compare(const MatND& a, const MatND& b, MatND& c, int cmpop)
2062 c.create(a.dims, a.size, CV_8UC1);
2063 NAryMatNDIterator it(a, b, c);
2065 for( int i = 0; i < it.nplanes; i++, ++it )
2066 compare( it.planes[0], it.planes[1], it.planes[2], cmpop );
2069 void compare(const MatND& a, double s, MatND& c, int cmpop)
2071 c.create(a.dims, a.size, CV_8UC1);
2072 NAryMatNDIterator it(a, c);
2074 for( int i = 0; i < it.nplanes; i++, ++it )
2075 compare( it.planes[0], s, it.planes[1], cmpop );
2078 void min(const MatND& a, const MatND& b, MatND& c)
2080 c.create(a.dims, a.size, a.type());
2081 NAryMatNDIterator it(a, b, c);
2083 for( int i = 0; i < it.nplanes; i++, ++it )
2084 min( it.planes[0], it.planes[1], it.planes[2] );
2087 void min(const MatND& a, double alpha, MatND& c)
2089 c.create(a.dims, a.size, a.type());
2090 NAryMatNDIterator it(a, c);
2092 for( int i = 0; i < it.nplanes; i++, ++it )
2093 min( it.planes[0], alpha, it.planes[1] );
2096 void max(const MatND& a, const MatND& b, MatND& c)
2098 c.create(a.dims, a.size, a.type());
2099 NAryMatNDIterator it(a, b, c);
2101 for( int i = 0; i < it.nplanes; i++, ++it )
2102 max( it.planes[0], it.planes[1], it.planes[2] );
2105 void max(const MatND& a, double alpha, MatND& c)
2107 c.create(a.dims, a.size, a.type());
2108 NAryMatNDIterator it(a, c);
2110 for( int i = 0; i < it.nplanes; i++, ++it )
2111 max( it.planes[0], alpha, it.planes[1] );
2114 void sqrt(const MatND& a, MatND& b)
2116 b.create(a.dims, a.size, a.type());
2117 NAryMatNDIterator it(a, b);
2119 for( int i = 0; i < it.nplanes; i++, ++it )
2120 sqrt( it.planes[0], it.planes[1] );
2123 void pow(const MatND& a, double power, MatND& b)
2125 b.create(a.dims, a.size, a.type());
2126 NAryMatNDIterator it(a, b);
2128 for( int i = 0; i < it.nplanes; i++, ++it )
2129 pow( it.planes[0], power, it.planes[1] );
2132 void exp(const MatND& a, MatND& b)
2134 b.create(a.dims, a.size, a.type());
2135 NAryMatNDIterator it(a, b);
2137 for( int i = 0; i < it.nplanes; i++, ++it )
2138 exp( it.planes[0], it.planes[1] );
2141 void log(const MatND& a, MatND& b)
2143 b.create(a.dims, a.size, a.type());
2144 NAryMatNDIterator it(a, b);
2146 for( int i = 0; i < it.nplanes; i++, ++it )
2147 log( it.planes[0], it.planes[1] );
2150 bool checkRange(const MatND& a, bool quiet, int*,
2151 double minVal, double maxVal)
2153 NAryMatNDIterator it(a);
2155 for( int i = 0; i < it.nplanes; i++, ++it )
2158 if( !checkRange( it.planes[0], quiet, &pt, minVal, maxVal ))
2160 // todo: set index properly
2168 //////////////////////////////// SparseMat ////////////////////////////////
2170 template<typename T1, typename T2> void
2171 convertData_(const void* _from, void* _to, int cn)
2173 const T1* from = (const T1*)_from;
2176 *to = saturate_cast<T2>(*from);
2178 for( int i = 0; i < cn; i++ )
2179 to[i] = saturate_cast<T2>(from[i]);
2182 template<typename T1, typename T2> void
2183 convertScaleData_(const void* _from, void* _to, int cn, double alpha, double beta)
2185 const T1* from = (const T1*)_from;
2188 *to = saturate_cast<T2>(*from*alpha + beta);
2190 for( int i = 0; i < cn; i++ )
2191 to[i] = saturate_cast<T2>(from[i]*alpha + beta);
2194 ConvertData getConvertData(int fromType, int toType)
2196 static ConvertData tab[][8] =
2197 {{ convertData_<uchar, uchar>, convertData_<uchar, schar>,
2198 convertData_<uchar, ushort>, convertData_<uchar, short>,
2199 convertData_<uchar, int>, convertData_<uchar, float>,
2200 convertData_<uchar, double>, 0 },
2202 { convertData_<schar, uchar>, convertData_<schar, schar>,
2203 convertData_<schar, ushort>, convertData_<schar, short>,
2204 convertData_<schar, int>, convertData_<schar, float>,
2205 convertData_<schar, double>, 0 },
2207 { convertData_<ushort, uchar>, convertData_<ushort, schar>,
2208 convertData_<ushort, ushort>, convertData_<ushort, short>,
2209 convertData_<ushort, int>, convertData_<ushort, float>,
2210 convertData_<ushort, double>, 0 },
2212 { convertData_<short, uchar>, convertData_<short, schar>,
2213 convertData_<short, ushort>, convertData_<short, short>,
2214 convertData_<short, int>, convertData_<short, float>,
2215 convertData_<short, double>, 0 },
2217 { convertData_<int, uchar>, convertData_<int, schar>,
2218 convertData_<int, ushort>, convertData_<int, short>,
2219 convertData_<int, int>, convertData_<int, float>,
2220 convertData_<int, double>, 0 },
2222 { convertData_<float, uchar>, convertData_<float, schar>,
2223 convertData_<float, ushort>, convertData_<float, short>,
2224 convertData_<float, int>, convertData_<float, float>,
2225 convertData_<float, double>, 0 },
2227 { convertData_<double, uchar>, convertData_<double, schar>,
2228 convertData_<double, ushort>, convertData_<double, short>,
2229 convertData_<double, int>, convertData_<double, float>,
2230 convertData_<double, double>, 0 },
2232 { 0, 0, 0, 0, 0, 0, 0, 0 }};
2234 ConvertData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
2235 CV_Assert( func != 0 );
2239 ConvertScaleData getConvertScaleData(int fromType, int toType)
2241 static ConvertScaleData tab[][8] =
2242 {{ convertScaleData_<uchar, uchar>, convertScaleData_<uchar, schar>,
2243 convertScaleData_<uchar, ushort>, convertScaleData_<uchar, short>,
2244 convertScaleData_<uchar, int>, convertScaleData_<uchar, float>,
2245 convertScaleData_<uchar, double>, 0 },
2247 { convertScaleData_<schar, uchar>, convertScaleData_<schar, schar>,
2248 convertScaleData_<schar, ushort>, convertScaleData_<schar, short>,
2249 convertScaleData_<schar, int>, convertScaleData_<schar, float>,
2250 convertScaleData_<schar, double>, 0 },
2252 { convertScaleData_<ushort, uchar>, convertScaleData_<ushort, schar>,
2253 convertScaleData_<ushort, ushort>, convertScaleData_<ushort, short>,
2254 convertScaleData_<ushort, int>, convertScaleData_<ushort, float>,
2255 convertScaleData_<ushort, double>, 0 },
2257 { convertScaleData_<short, uchar>, convertScaleData_<short, schar>,
2258 convertScaleData_<short, ushort>, convertScaleData_<short, short>,
2259 convertScaleData_<short, int>, convertScaleData_<short, float>,
2260 convertScaleData_<short, double>, 0 },
2262 { convertScaleData_<int, uchar>, convertScaleData_<int, schar>,
2263 convertScaleData_<int, ushort>, convertScaleData_<int, short>,
2264 convertScaleData_<int, int>, convertScaleData_<int, float>,
2265 convertScaleData_<int, double>, 0 },
2267 { convertScaleData_<float, uchar>, convertScaleData_<float, schar>,
2268 convertScaleData_<float, ushort>, convertScaleData_<float, short>,
2269 convertScaleData_<float, int>, convertScaleData_<float, float>,
2270 convertScaleData_<float, double>, 0 },
2272 { convertScaleData_<double, uchar>, convertScaleData_<double, schar>,
2273 convertScaleData_<double, ushort>, convertScaleData_<double, short>,
2274 convertScaleData_<double, int>, convertScaleData_<double, float>,
2275 convertScaleData_<double, double>, 0 },
2277 { 0, 0, 0, 0, 0, 0, 0, 0 }};
2279 ConvertScaleData func = tab[CV_MAT_DEPTH(fromType)][CV_MAT_DEPTH(toType)];
2280 CV_Assert( func != 0 );
2284 enum { HASH_SIZE0 = 8 };
2286 static inline void copyElem(const uchar* from, uchar* to, size_t elemSize)
2289 for( i = 0; i <= elemSize - sizeof(int); i += sizeof(int) )
2290 *(int*)(to + i) = *(const int*)(from + i);
2291 for( ; i < elemSize; i++ )
2295 static inline bool isZeroElem(const uchar* data, size_t elemSize)
2298 for( i = 0; i <= elemSize - sizeof(int); i += sizeof(int) )
2299 if( *(int*)(data + i) != 0 )
2301 for( ; i < elemSize; i++ )
2307 SparseMat::Hdr::Hdr( int _dims, const int* _sizes, int _type )
2312 valueOffset = (int)alignSize(sizeof(SparseMat::Node) +
2313 sizeof(int)*(dims - CV_MAX_DIM), CV_ELEM_SIZE1(_type));
2314 nodeSize = alignSize(valueOffset +
2315 CV_ELEM_SIZE(_type), (int)sizeof(size_t));
2318 for( i = 0; i < dims; i++ )
2319 size[i] = _sizes[i];
2320 for( ; i < CV_MAX_DIM; i++ )
2325 void SparseMat::Hdr::clear()
2328 hashtab.resize(HASH_SIZE0);
2330 pool.resize(nodeSize);
2331 nodeCount = freeList = 0;
2335 SparseMat::SparseMat(const Mat& m, bool try1d)
2336 : flags(MAGIC_VAL), hdr(0)
2338 bool is1d = try1d && m.cols == 1;
2343 const uchar* data = m.data;
2344 size_t step = m.step, esz = m.elemSize();
2345 create( 1, &M, m.type() );
2346 for( i = 0; i < M; i++ )
2348 const uchar* from = data + step*i;
2349 if( isZeroElem(from, esz) )
2351 uchar* to = newNode(&i, hash(i));
2352 copyElem(from, to, esz);
2357 int i, j, size[] = {m.rows, m.cols};
2358 const uchar* data = m.data;
2359 size_t step = m.step, esz = m.elemSize();
2360 create( 2, size, m.type() );
2361 for( i = 0; i < m.rows; i++ )
2363 for( j = 0; j < m.cols; j++ )
2365 const uchar* from = data + step*i + esz*j;
2366 if( isZeroElem(from, esz) )
2369 uchar* to = newNode(idx, hash(i, j));
2370 copyElem(from, to, esz);
2376 SparseMat::SparseMat(const MatND& m)
2377 : flags(MAGIC_VAL), hdr(0)
2379 create( m.dims, m.size, m.type() );
2381 int i, idx[CV_MAX_DIM] = {0}, d = m.dims, lastSize = m.size[d - 1];
2382 size_t esz = m.elemSize();
2383 uchar* ptr = m.data;
2387 for( i = 0; i < lastSize; i++, ptr += esz )
2389 if( isZeroElem(ptr, esz) )
2392 uchar* to = newNode(idx, hash(idx));
2393 copyElem( ptr, to, esz );
2396 for( i = d - 2; i >= 0; i-- )
2398 ptr += m.step[i] - m.size[i+1]*m.step[i+1];
2399 if( ++idx[i] < m.size[i] )
2408 SparseMat::SparseMat(const CvSparseMat* m)
2409 : flags(MAGIC_VAL), hdr(0)
2412 create( m->dims, &m->size[0], m->type );
2414 CvSparseMatIterator it;
2415 CvSparseNode* n = cvInitSparseMatIterator(m, &it);
2416 size_t esz = elemSize();
2418 for( ; n != 0; n = cvGetNextSparseNode(&it) )
2420 const int* idx = CV_NODE_IDX(m, n);
2421 uchar* to = newNode(idx, hash(idx));
2422 copyElem((const uchar*)CV_NODE_VAL(m, n), to, esz);
2426 void SparseMat::create(int d, const int* _sizes, int _type)
2429 CV_Assert( _sizes && 0 < d && d <= CV_MAX_DIM );
2430 for( i = 0; i < d; i++ )
2431 CV_Assert( _sizes[i] > 0 );
2432 _type = CV_MAT_TYPE(_type);
2433 if( hdr && _type == type() && hdr->dims == d && hdr->refcount == 1 )
2435 for( i = 0; i < d; i++ )
2436 if( _sizes[i] != hdr->size[i] )
2445 flags = MAGIC_VAL | _type;
2446 hdr = new Hdr(d, _sizes, _type);
2449 void SparseMat::copyTo( SparseMat& m ) const
2458 m.create( m.hdr->dims, m.hdr->size, type() );
2459 SparseMatConstIterator from = begin();
2460 size_t i, N = nzcount(), esz = elemSize();
2462 for( i = 0; i < N; i++, ++from )
2464 const Node* n = from.node();
2465 uchar* to = m.newNode(n->idx, n->hashval);
2466 copyElem( from.ptr, to, esz );
2470 void SparseMat::copyTo( Mat& m ) const
2472 CV_Assert( hdr && hdr->dims <= 2 );
2473 m.create( hdr->size[0], hdr->dims == 2 ? hdr->size[1] : 1, type() );
2476 SparseMatConstIterator from = begin();
2477 size_t i, N = nzcount(), esz = elemSize();
2479 if( hdr->dims == 2 )
2481 for( i = 0; i < N; i++, ++from )
2483 const Node* n = from.node();
2484 uchar* to = m.data + m.step*n->idx[0] + esz*n->idx[1];
2485 copyElem( from.ptr, to, esz );
2490 for( i = 0; i < N; i++, ++from )
2492 const Node* n = from.node();
2493 uchar* to = m.data + esz*n->idx[0];
2494 copyElem( from.ptr, to, esz );
2499 void SparseMat::copyTo( MatND& m ) const
2502 m.create( dims(), hdr->size, type() );
2505 SparseMatConstIterator from = begin();
2506 size_t i, N = nzcount(), esz = elemSize();
2508 for( i = 0; i < N; i++, ++from )
2510 const Node* n = from.node();
2511 copyElem( from.ptr, m.ptr(n->idx), esz);
2516 void SparseMat::convertTo( SparseMat& m, int rtype, double alpha ) const
2518 int cn = channels();
2521 rtype = CV_MAKETYPE(rtype, cn);
2522 if( this == &m && rtype != type() )
2525 convertTo(temp, rtype, alpha);
2530 CV_Assert(hdr != 0);
2531 m.create( m.hdr->dims, m.hdr->size, rtype );
2533 SparseMatConstIterator from = begin();
2534 size_t i, N = nzcount();
2538 ConvertData cvtfunc = getConvertData(type(), rtype);
2539 for( i = 0; i < N; i++, ++from )
2541 const Node* n = from.node();
2542 uchar* to = m.newNode(n->idx, n->hashval);
2543 cvtfunc( from.ptr, to, cn );
2548 ConvertScaleData cvtfunc = getConvertScaleData(type(), rtype);
2549 for( i = 0; i < N; i++, ++from )
2551 const Node* n = from.node();
2552 uchar* to = m.newNode(n->idx, n->hashval);
2553 cvtfunc( from.ptr, to, cn, alpha, 0 );
2559 void SparseMat::convertTo( Mat& m, int rtype, double alpha, double beta ) const
2561 int cn = channels();
2564 rtype = CV_MAKETYPE(rtype, cn);
2566 CV_Assert( hdr && hdr->dims <= 2 );
2567 m.create( hdr->size[0], hdr->dims == 2 ? hdr->size[1] : 1, type() );
2570 SparseMatConstIterator from = begin();
2571 size_t i, N = nzcount(), esz = CV_ELEM_SIZE(rtype);
2573 if( alpha == 1 && beta == 0 )
2575 ConvertData cvtfunc = getConvertData(type(), rtype);
2577 if( hdr->dims == 2 )
2579 for( i = 0; i < N; i++, ++from )
2581 const Node* n = from.node();
2582 uchar* to = m.data + m.step*n->idx[0] + esz*n->idx[1];
2583 cvtfunc( from.ptr, to, cn );
2588 for( i = 0; i < N; i++, ++from )
2590 const Node* n = from.node();
2591 uchar* to = m.data + esz*n->idx[0];
2592 cvtfunc( from.ptr, to, cn );
2598 ConvertScaleData cvtfunc = getConvertScaleData(type(), rtype);
2600 if( hdr->dims == 2 )
2602 for( i = 0; i < N; i++, ++from )
2604 const Node* n = from.node();
2605 uchar* to = m.data + m.step*n->idx[0] + esz*n->idx[1];
2606 cvtfunc( from.ptr, to, cn, alpha, beta );
2611 for( i = 0; i < N; i++, ++from )
2613 const Node* n = from.node();
2614 uchar* to = m.data + esz*n->idx[0];
2615 cvtfunc( from.ptr, to, cn, alpha, beta );
2621 void SparseMat::convertTo( MatND& m, int rtype, double alpha, double beta ) const
2623 int cn = channels();
2626 rtype = CV_MAKETYPE(rtype, cn);
2629 m.create( dims(), hdr->size, rtype );
2632 SparseMatConstIterator from = begin();
2633 size_t i, N = nzcount();
2635 if( alpha == 1 && beta == 0 )
2637 ConvertData cvtfunc = getConvertData(type(), rtype);
2638 for( i = 0; i < N; i++, ++from )
2640 const Node* n = from.node();
2641 uchar* to = m.ptr(n->idx);
2642 cvtfunc( from.ptr, to, cn );
2647 ConvertScaleData cvtfunc = getConvertScaleData(type(), rtype);
2648 for( i = 0; i < N; i++, ++from )
2650 const Node* n = from.node();
2651 uchar* to = m.ptr(n->idx);
2652 cvtfunc( from.ptr, to, cn, alpha, beta );
2657 void SparseMat::clear()
2663 SparseMat::operator CvSparseMat*() const
2667 CvSparseMat* m = cvCreateSparseMat(hdr->dims, hdr->size, type());
2669 SparseMatConstIterator from = begin();
2670 size_t i, N = nzcount(), esz = elemSize();
2672 for( i = 0; i < N; i++, ++from )
2674 const Node* n = from.node();
2675 uchar* to = cvPtrND(m, n->idx, 0, -2, 0);
2676 copyElem(from.ptr, to, esz);
2681 uchar* SparseMat::ptr(int i0, int i1, bool createMissing, size_t* hashval)
2683 CV_Assert( hdr && hdr->dims == 2 );
2684 size_t h = hashval ? *hashval : hash(i0, i1);
2685 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
2686 uchar* pool = &hdr->pool[0];
2689 Node* elem = (Node*)(pool + nidx);
2690 if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
2691 return &value<uchar>(elem);
2697 int idx[] = { i0, i1 };
2698 return newNode( idx, h );
2703 uchar* SparseMat::ptr(int i0, int i1, int i2, bool createMissing, size_t* hashval)
2705 CV_Assert( hdr && hdr->dims == 3 );
2706 size_t h = hashval ? *hashval : hash(i0, i1, i2);
2707 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
2708 uchar* pool = &hdr->pool[0];
2711 Node* elem = (Node*)(pool + nidx);
2712 if( elem->hashval == h && elem->idx[0] == i0 &&
2713 elem->idx[1] == i1 && elem->idx[2] == i2 )
2714 return &value<uchar>(elem);
2720 int idx[] = { i0, i1, i2 };
2721 return newNode( idx, h );
2726 uchar* SparseMat::ptr(const int* idx, bool createMissing, size_t* hashval)
2729 int i, d = hdr->dims;
2730 size_t h = hashval ? *hashval : hash(idx);
2731 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx];
2732 uchar* pool = &hdr->pool[0];
2735 Node* elem = (Node*)(pool + nidx);
2736 if( elem->hashval == h )
2738 for( i = 0; i < d; i++ )
2739 if( elem->idx[i] != idx[i] )
2742 return &value<uchar>(elem);
2747 return createMissing ? newNode(idx, h) : 0;
2750 void SparseMat::erase(int i0, int i1, size_t* hashval)
2752 CV_Assert( hdr && hdr->dims == 2 );
2753 size_t h = hashval ? *hashval : hash(i0, i1);
2754 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
2755 uchar* pool = &hdr->pool[0];
2758 Node* elem = (Node*)(pool + nidx);
2759 if( elem->hashval == h && elem->idx[0] == i0 && elem->idx[1] == i1 )
2766 removeNode(hidx, nidx, previdx);
2769 void SparseMat::erase(int i0, int i1, int i2, size_t* hashval)
2771 CV_Assert( hdr && hdr->dims == 3 );
2772 size_t h = hashval ? *hashval : hash(i0, i1, i2);
2773 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
2774 uchar* pool = &hdr->pool[0];
2777 Node* elem = (Node*)(pool + nidx);
2778 if( elem->hashval == h && elem->idx[0] == i0 &&
2779 elem->idx[1] == i1 && elem->idx[2] == i2 )
2786 removeNode(hidx, nidx, previdx);
2789 void SparseMat::erase(const int* idx, size_t* hashval)
2792 int i, d = hdr->dims;
2793 size_t h = hashval ? *hashval : hash(idx);
2794 size_t hidx = h & (hdr->hashtab.size() - 1), nidx = hdr->hashtab[hidx], previdx=0;
2795 uchar* pool = &hdr->pool[0];
2798 Node* elem = (Node*)(pool + nidx);
2799 if( elem->hashval == h )
2801 for( i = 0; i < d; i++ )
2802 if( elem->idx[i] != idx[i] )
2812 removeNode(hidx, nidx, previdx);
2815 void SparseMat::resizeHashTab(size_t newsize)
2817 newsize = std::max(newsize, (size_t)8);
2818 if((newsize & (newsize-1)) != 0)
2819 newsize = 1 << cvCeil(std::log((double)newsize)/CV_LOG2);
2821 size_t i, hsize = hdr->hashtab.size();
2822 vector<size_t> _newh(newsize);
2823 size_t* newh = &_newh[0];
2824 for( i = 0; i < newsize; i++ )
2826 uchar* pool = &hdr->pool[0];
2827 for( i = 0; i < hsize; i++ )
2829 size_t nidx = hdr->hashtab[i];
2832 Node* elem = (Node*)(pool + nidx);
2833 size_t next = elem->next;
2834 size_t newhidx = elem->hashval & (newsize - 1);
2835 elem->next = newh[newhidx];
2836 newh[newhidx] = nidx;
2840 hdr->hashtab = _newh;
2843 uchar* SparseMat::newNode(const int* idx, size_t hashval)
2845 const int HASH_MAX_FILL_FACTOR=3;
2847 size_t hsize = hdr->hashtab.size();
2848 if( ++hdr->nodeCount > hsize*HASH_MAX_FILL_FACTOR )
2850 resizeHashTab(std::max(hsize*2, (size_t)8));
2851 hsize = hdr->hashtab.size();
2854 if( !hdr->freeList )
2856 size_t i, nsz = hdr->nodeSize, psize = hdr->pool.size(),
2857 newpsize = std::max(psize*2, 8*nsz);
2858 hdr->pool.resize(newpsize);
2859 uchar* pool = &hdr->pool[0];
2860 hdr->freeList = std::max(psize, nsz);
2861 for( i = hdr->freeList; i < newpsize - nsz; i += nsz )
2862 ((Node*)(pool + i))->next = i + nsz;
2863 ((Node*)(pool + i))->next = 0;
2865 size_t nidx = hdr->freeList;
2866 Node* elem = (Node*)&hdr->pool[nidx];
2867 hdr->freeList = elem->next;
2868 elem->hashval = hashval;
2869 size_t hidx = hashval & (hsize - 1);
2870 elem->next = hdr->hashtab[hidx];
2871 hdr->hashtab[hidx] = nidx;
2873 size_t i, d = hdr->dims;
2874 for( i = 0; i < d; i++ )
2875 elem->idx[i] = idx[i];
2877 uchar* p = &value<uchar>(elem);
2878 for( i = 0; i <= d - sizeof(int); i += sizeof(int) )
2887 void SparseMat::removeNode(size_t hidx, size_t nidx, size_t previdx)
2889 Node* n = node(nidx);
2892 Node* prev = node(previdx);
2893 prev->next = n->next;
2896 hdr->hashtab[hidx] = n->next;
2897 n->next = hdr->freeList;
2898 hdr->freeList = nidx;
2903 SparseMatConstIterator::SparseMatConstIterator(const SparseMat* _m)
2904 : m((SparseMat*)_m), hashidx(0), ptr(0)
2908 SparseMat::Hdr& hdr = *m->hdr;
2909 const vector<size_t>& htab = hdr.hashtab;
2910 size_t i, hsize = htab.size();
2911 for( i = 0; i < hsize; i++ )
2913 size_t nidx = htab[i];
2917 ptr = &hdr.pool[nidx] + hdr.valueOffset;
2923 SparseMatConstIterator& SparseMatConstIterator::operator ++()
2925 if( !ptr || !m || !m->hdr )
2927 SparseMat::Hdr& hdr = *m->hdr;
2928 size_t next = ((const SparseMat::Node*)(ptr - hdr.valueOffset))->next;
2931 ptr = &hdr.pool[next] + hdr.valueOffset;
2934 size_t i = hashidx + 1, sz = hdr.hashtab.size();
2935 for( ; i < sz; i++ )
2937 size_t nidx = hdr.hashtab[i];
2941 ptr = &hdr.pool[nidx] + hdr.valueOffset;
2951 double norm( const SparseMat& src, int normType )
2953 SparseMatConstIterator it;
2955 size_t i, N = src.nzcount();
2956 normType &= NORM_TYPE_MASK;
2957 int type = src.type();
2960 CV_Assert( normType == NORM_INF || normType == NORM_L1 || normType == NORM_L2 );
2962 if( type == CV_32F )
2964 if( normType == NORM_INF )
2965 for( i = 0; i < N; i++, ++it )
2966 result = std::max(result, (double)*(const float*)it.ptr);
2967 else if( normType == NORM_L1 )
2968 for( i = 0; i < N; i++, ++it )
2969 result += std::abs(*(const float*)it.ptr);
2971 for( i = 0; i < N; i++, ++it )
2973 double v = *(const float*)it.ptr;
2977 else if( type == CV_64F )
2979 if( normType == NORM_INF )
2980 for( i = 0; i < N; i++, ++it )
2981 result = std::max(result, *(const double*)it.ptr);
2982 else if( normType == NORM_L1 )
2983 for( i = 0; i < N; i++, ++it )
2984 result += std::abs(*(const double*)it.ptr);
2986 for( i = 0; i < N; i++, ++it )
2988 double v = *(const double*)it.ptr;
2993 CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
2995 if( normType == NORM_L2 )
2996 result = std::sqrt(result);
3000 void minMaxLoc( const SparseMat& src, double* _minval, double* _maxval, int* _minidx, int* _maxidx )
3002 SparseMatConstIterator it;
3004 size_t i, N = src.nzcount(), d = src.hdr ? src.hdr->dims : 0;
3005 int type = src.type();
3006 const int *minidx = 0, *maxidx = 0;
3008 if( type == CV_32F )
3010 float minval = FLT_MAX, maxval = -FLT_MAX;
3011 for( i = 0; i < N; i++, ++it )
3013 float v = *(const float*)it.ptr;
3017 minidx = it.node()->idx;
3022 maxidx = it.node()->idx;
3030 else if( type == CV_64F )
3032 double minval = DBL_MAX, maxval = -DBL_MAX;
3033 for( i = 0; i < N; i++, ++it )
3035 double v = *(const double*)it.ptr;
3039 minidx = it.node()->idx;
3044 maxidx = it.node()->idx;
3053 CV_Error( CV_StsUnsupportedFormat, "Only 32f and 64f are supported" );
3056 for( i = 0; i < d; i++ )
3057 _minidx[i] = minidx[i];
3059 for( i = 0; i < d; i++ )
3060 _maxidx[i] = maxidx[i];
3064 void normalize( const SparseMat& src, SparseMat& dst, double a, int norm_type )
3067 if( norm_type == CV_L2 || norm_type == CV_L1 || norm_type == CV_C )
3069 scale = norm( src, norm_type );
3070 scale = scale > DBL_EPSILON ? a/scale : 0.;
3073 CV_Error( CV_StsBadArg, "Unknown/unsupported norm type" );
3075 src.convertTo( dst, -1, scale );