1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
10 // Intel License Agreement
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000, Intel Corporation, all rights reserved.
14 // Third party copyrights are property of their respective owners.
16 // Redistribution and use in source and binary forms, with or without modification,
17 // are permitted provided that the following conditions are met:
19 // * Redistribution's of source code must retain the above copyright notice,
20 // this list of conditions and the following disclaimer.
22 // * Redistribution's in binary form must reproduce the above copyright notice,
23 // this list of conditions and the following disclaimer in the documentation
24 // and/or other materials provided with the distribution.
26 // * The name of Intel Corporation may not be used to endorse or promote products
27 // derived from this software without specific prior written permission.
29 // This software is provided by the copyright holders and contributors "as is" and
30 // any express or implied warranties, including, but not limited to, the implied
31 // warranties of merchantability and fitness for a particular purpose are disclaimed.
32 // In no event shall the Intel Corporation or contributors be liable for any direct,
33 // indirect, incidental, special, exemplary, or consequential damages
34 // (including, but not limited to, procurement of substitute goods or services;
35 // loss of use, data, or profits; or business interruption) however caused
36 // and on any theory of liability, whether in contract, strict liability,
37 // or tort (including negligence or otherwise) arising in any way out of
38 // the use of this software, even if advised of the possibility of such damage.
44 /****************************************************************************************\
46 \****************************************************************************************/
48 const char* cvTsGetTypeName( int type )
50 static const char* type_names[] = { "8u", "8s", "16u", "16s", "32s", "32f", "64f", "ptr" };
51 return type_names[CV_MAT_DEPTH(type)];
55 int cvTsTypeByName( const char* name )
58 for( i = 0; i < CV_DEPTH_MAX; i++ )
59 if( strcmp(name, cvTsGetTypeName(i)) == 0 )
65 void cvTsRandUni( CvRNG* rng, CvMat* a, CvScalar param0, CvScalar param1 )
67 int i, j, k, cn, ncols;
68 CvScalar scale = param0;
69 CvScalar delta = param1;
70 double C = 1./(65536.*65536.);
72 cn = CV_MAT_CN(a->type);
75 for( k = 0; k < 4; k++ )
77 double s = scale.val[k] - delta.val[k];
82 delta.val[k] = scale.val[k];
88 for( i = 0; i < a->rows; i++ )
90 uchar* data = a->data.ptr + i*a->step;
92 switch( CV_MAT_DEPTH(a->type) )
95 for( j = 0; j < ncols; j += cn )
96 for( k = 0; k < cn; k++ )
98 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
99 ((uchar*)data)[j + k] = CV_CAST_8U(val);
103 for( j = 0; j < ncols; j += cn )
104 for( k = 0; k < cn; k++ )
106 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
107 ((char*)data)[j + k] = CV_CAST_8S(val);
111 for( j = 0; j < ncols; j += cn )
112 for( k = 0; k < cn; k++ )
114 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
115 ((ushort*)data)[j + k] = CV_CAST_16U(val);
119 for( j = 0; j < ncols; j += cn )
120 for( k = 0; k < cn; k++ )
122 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
123 ((short*)data)[j + k] = CV_CAST_16S(val);
127 for( j = 0; j < ncols; j += cn )
128 for( k = 0; k < cn; k++ )
130 int val = cvFloor( cvTsRandInt(rng)*scale.val[k] + delta.val[k] );
131 ((int*)data)[j + k] = val;
135 for( j = 0; j < ncols; j += cn )
136 for( k = 0; k < cn; k++ )
138 double val = cvTsRandInt(rng)*scale.val[k] + delta.val[k];
139 ((float*)data)[j + k] = (float)val;
143 for( j = 0; j < ncols; j += cn )
144 for( k = 0; k < cn; k++ )
146 double val = cvTsRandInt(rng);
147 val = (val + cvTsRandInt(rng)*C)*scale.val[k] + delta.val[k];
148 ((double*)data)[j + k] = val;
159 void cvTsZero( CvMat* c )
162 width = c->cols*CV_ELEM_SIZE(c->type);
163 for( i = 0; i < c->rows; i++ )
164 memset( c->data.ptr + i*c->step, 0, width );
168 // initializes scaled identity matrix
169 void cvTsSetIdentity( CvMat* c, CvScalar diag_value )
173 width = MIN(c->rows, c->cols);
174 for( i = 0; i < width; i++ )
175 cvSet2D( c, i, i, diag_value );
179 // copies selected region of one array to another array
180 void cvTsCopy( const CvMat* a, CvMat* b, const CvMat* mask )
185 el_size = CV_ELEM_SIZE(a->type);
189 assert( CV_ARE_SIZES_EQ(a,mask) &&
190 (CV_MAT_TYPE(mask->type) == CV_8UC1 ||
191 CV_MAT_TYPE(mask->type) == CV_8SC1 ));
194 assert( CV_ARE_TYPES_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) );
199 for( i = 0; i < a->rows; i++ )
201 uchar* a_data = a->data.ptr + a->step*i;
202 uchar* b_data = b->data.ptr + b->step*i;
205 memcpy( b_data, a_data, ncols );
208 uchar* m_data = mask->data.ptr + mask->step*i;
210 for( j = 0; j < ncols; j++, b_data += el_size, a_data += el_size )
214 for( k = 0; k < el_size; k++ )
215 b_data[k] = a_data[k];
223 void cvTsConvert( const CvMat* a, CvMat* b )
225 int i, j, ncols = b->cols*CV_MAT_CN(b->type);
228 assert( CV_ARE_SIZES_EQ(a,b) && CV_ARE_CNS_EQ(a,b) );
229 buf = (double*)cvStackAlloc(ncols*sizeof(buf[0]));
231 for( i = 0; i < b->rows; i++ )
233 uchar* a_data = a->data.ptr + i*a->step;
234 uchar* b_data = b->data.ptr + i*b->step;
236 switch( CV_MAT_DEPTH(a->type) )
239 for( j = 0; j < ncols; j++ )
240 buf[j] = ((uchar*)a_data)[j];
243 for( j = 0; j < ncols; j++ )
244 buf[j] = ((char*)a_data)[j];
247 for( j = 0; j < ncols; j++ )
248 buf[j] = ((ushort*)a_data)[j];
251 for( j = 0; j < ncols; j++ )
252 buf[j] = ((short*)a_data)[j];
255 for( j = 0; j < ncols; j++ )
256 buf[j] = ((int*)a_data)[j];
259 for( j = 0; j < ncols; j++ )
260 buf[j] = ((float*)a_data)[j];
263 for( j = 0; j < ncols; j++ )
264 buf[j] = ((double*)a_data)[j];
271 switch( CV_MAT_DEPTH(b->type) )
274 for( j = 0; j < ncols; j++ )
276 int val = cvRound(buf[j]);
277 ((uchar*)b_data)[j] = CV_CAST_8U(val);
281 for( j = 0; j < ncols; j++ )
283 int val = cvRound(buf[j]);
284 ((char*)b_data)[j] = CV_CAST_8S(val);
288 for( j = 0; j < ncols; j++ )
290 int val = cvRound(buf[j]);
291 ((ushort*)b_data)[j] = CV_CAST_16U(val);
295 for( j = 0; j < ncols; j++ )
297 int val = cvRound(buf[j]);
298 ((short*)b_data)[j] = CV_CAST_16S(val);
302 for( j = 0; j < ncols; j++ )
304 int val = cvRound(buf[j]);
305 ((int*)b_data)[j] = CV_CAST_32S(val);
309 for( j = 0; j < ncols; j++ )
310 ((float*)b_data)[j] = CV_CAST_32F(buf[j]);
313 for( j = 0; j < ncols; j++ )
314 ((double*)b_data)[j] = CV_CAST_64F(buf[j]);
323 // extracts a single channel from a multi-channel array
324 void cvTsExtract( const CvMat* a, CvMat* b, int coi )
327 int el_size, el_size1, ncols;
329 el_size = CV_ELEM_SIZE(a->type);
330 el_size1 = CV_ELEM_SIZE(b->type);
333 assert( CV_ARE_DEPTHS_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) &&
334 (unsigned)coi < (unsigned)CV_MAT_CN(a->type) &&
335 CV_MAT_CN(b->type) == 1 );
337 for( i = 0; i < a->rows; i++ )
339 uchar* a_data = a->data.ptr + a->step*i;
340 uchar* b_data = b->data.ptr + b->step*i;
341 a_data += el_size1*coi;
342 for( j = 0; j < ncols; j++, b_data += el_size1, a_data += el_size )
344 for( k = 0; k < el_size1; k++ )
345 b_data[k] = a_data[k];
350 // replaces a single channel in a multi-channel array
351 void cvTsInsert( const CvMat* a, CvMat* b, int coi )
354 int el_size, el_size1, ncols;
356 el_size = CV_ELEM_SIZE(b->type);
357 el_size1 = CV_ELEM_SIZE(a->type);
360 assert( CV_ARE_DEPTHS_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) &&
361 (unsigned)coi < (unsigned)CV_MAT_CN(b->type) &&
362 CV_MAT_CN(a->type) == 1 );
364 for( i = 0; i < a->rows; i++ )
366 uchar* a_data = a->data.ptr + a->step*i;
367 uchar* b_data = b->data.ptr + b->step*i;
368 b_data += el_size1*coi;
369 for( j = 0; j < ncols; j++, b_data += el_size, a_data += el_size1 )
371 for( k = 0; k < el_size1; k++ )
372 b_data[k] = a_data[k];
378 // c = alpha*a + beta*b + gamma
379 void cvTsAdd( const CvMat* a, CvScalar alpha, const CvMat* b, CvScalar beta,
380 CvScalar gamma, CvMat* c, int calc_abs )
382 int i, j, k, cn, ncols;
384 double* alpha_buf = 0;
385 double* beta_buf = 0;
386 double* gamma_buf = 0;
394 cn = CV_MAT_CN(c->type);
407 assert( CV_ARE_SIZES_EQ(a,c) && CV_MAT_CN(a->type) == cn );
408 buf = (double*)malloc( a->cols * cn * sizeof(buf[0]) );
409 alpha_buf = (double*)malloc( a->cols * cn * sizeof(alpha_buf[0]) );
414 assert( CV_ARE_SIZES_EQ(b,c) && CV_MAT_CN(b->type) == cn );
415 beta_buf = (double*)malloc( b->cols * cn * sizeof(beta_buf[0]) );
419 gamma_buf = (double*)malloc( ncols * sizeof(gamma_buf[0]) );
423 if( !a && !b && calc_abs )
425 for( k = 0; k < cn; k++ )
426 gamma.val[k] = fabs(gamma.val[k]);
429 for( i = 0; i < 1 + (a != 0) + (b != 0); i++ )
431 double* scalar_buf = i == 0 ? gamma_buf : i == 1 ? alpha_buf : beta_buf;
432 CvScalar scalar = i == 0 ? gamma : i == 1 ? alpha : beta;
433 for( j = 0; j < ncols; j += cn )
434 for( k = 0; k < cn; k++ )
435 scalar_buf[j + k] = scalar.val[k];
438 for( i = 0; i < c->rows; i++ )
440 uchar* c_data = c->data.ptr + i*c->step;
444 uchar* a_data = a->data.ptr + i*a->step;
446 switch( CV_MAT_DEPTH(a->type) )
449 for( j = 0; j < ncols; j++ )
450 buf[j] = ((uchar*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
453 for( j = 0; j < ncols; j++ )
454 buf[j] = ((char*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
457 for( j = 0; j < ncols; j++ )
458 buf[j] = ((ushort*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
461 for( j = 0; j < ncols; j++ )
462 buf[j] = ((short*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
465 for( j = 0; j < ncols; j++ )
466 buf[j] = ((int*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
469 for( j = 0; j < ncols; j++ )
470 buf[j] = ((float*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
473 for( j = 0; j < ncols; j++ )
474 buf[j] = ((double*)a_data)[j]*alpha_buf[j] + gamma_buf[j];
484 uchar* b_data = b->data.ptr + i*b->step;
486 switch( CV_MAT_DEPTH(b->type) )
489 for( j = 0; j < ncols; j++ )
490 buf[j] += ((uchar*)b_data)[j]*beta_buf[j];
493 for( j = 0; j < ncols; j++ )
494 buf[j] += ((char*)b_data)[j]*beta_buf[j];
497 for( j = 0; j < ncols; j++ )
498 buf[j] += ((ushort*)b_data)[j]*beta_buf[j];
501 for( j = 0; j < ncols; j++ )
502 buf[j] += ((short*)b_data)[j]*beta_buf[j];
505 for( j = 0; j < ncols; j++ )
506 buf[j] += ((int*)b_data)[j]*beta_buf[j];
509 for( j = 0; j < ncols; j++ )
510 buf[j] += ((float*)b_data)[j]*beta_buf[j];
513 for( j = 0; j < ncols; j++ )
514 buf[j] += ((double*)b_data)[j]*beta_buf[j];
526 for( j = 0; j < ncols; j++ )
527 buf[j] = fabs(buf[j]);
532 memcpy( c_data, c_data - c->step, c->cols*CV_ELEM_SIZE(c->type) );
536 switch( CV_MAT_DEPTH(c->type) )
539 for( j = 0; j < ncols; j++ )
541 int val = cvRound(buf[j]);
542 ((uchar*)c_data)[j] = CV_CAST_8U(val);
546 for( j = 0; j < ncols; j++ )
548 int val = cvRound(buf[j]);
549 ((char*)c_data)[j] = CV_CAST_8S(val);
553 for( j = 0; j < ncols; j++ )
555 int val = cvRound(buf[j]);
556 ((ushort*)c_data)[j] = CV_CAST_16U(val);
560 for( j = 0; j < ncols; j++ )
562 int val = cvRound(buf[j]);
563 ((short*)c_data)[j] = CV_CAST_16S(val);
567 for( j = 0; j < ncols; j++ )
569 int val = cvRound(buf[j]);
570 ((int*)c_data)[j] = CV_CAST_32S(val);
574 for( j = 0; j < ncols; j++ )
575 ((float*)c_data)[j] = CV_CAST_32F(buf[j]);
578 for( j = 0; j < ncols; j++ )
579 ((double*)c_data)[j] = CV_CAST_64F(buf[j]);
586 if( buf && buf != gamma_buf )
598 void cvTsMul( const CvMat* a, const CvMat* b, CvScalar alpha, CvMat* c )
600 int i, j, k, cn, ncols;
602 double* alpha_buf = 0;
610 assert( CV_ARE_SIZES_EQ(a,c) && CV_ARE_SIZES_EQ(b,c) &&
611 CV_ARE_TYPES_EQ(a,b) && CV_ARE_CNS_EQ(a,c) );
613 cn = CV_MAT_CN(c->type);
614 ncols = c->cols * cn;
615 alpha_buf = (double*)malloc( ncols * sizeof(alpha_buf[0]) );
616 buf = (double*)malloc( ncols * sizeof(buf[0]) );
618 for( j = 0; j < ncols; j += cn )
619 for( k = 0; k < cn; k++ )
620 alpha_buf[j + k] = alpha.val[k];
622 for( i = 0; i < c->rows; i++ )
624 uchar* c_data = c->data.ptr + i*c->step;
625 uchar* a_data = a->data.ptr + i*a->step;
626 uchar* b_data = b->data.ptr + i*b->step;
628 switch( CV_MAT_DEPTH(a->type) )
631 for( j = 0; j < ncols; j++ )
632 buf[j] = (alpha_buf[j]*((uchar*)a_data)[j])*((uchar*)b_data)[j];
635 for( j = 0; j < ncols; j++ )
636 buf[j] = (alpha_buf[j]*((char*)a_data)[j])*((char*)b_data)[j];
639 for( j = 0; j < ncols; j++ )
640 buf[j] = (alpha_buf[j]*((ushort*)a_data)[j])*((ushort*)b_data)[j];
643 for( j = 0; j < ncols; j++ )
644 buf[j] = (alpha_buf[j]*((short*)a_data)[j])*((short*)b_data)[j];
647 for( j = 0; j < ncols; j++ )
648 buf[j] = (alpha_buf[j]*((int*)a_data)[j])*((int*)b_data)[j];
651 for( j = 0; j < ncols; j++ )
652 buf[j] = (alpha_buf[j]*((float*)a_data)[j])*((float*)b_data)[j];
655 for( j = 0; j < ncols; j++ )
656 buf[j] = (alpha_buf[j]*((double*)a_data)[j])*((double*)b_data)[j];
663 switch( CV_MAT_DEPTH(c->type) )
666 for( j = 0; j < ncols; j++ )
668 int val = cvRound(buf[j]);
669 ((uchar*)c_data)[j] = CV_CAST_8U(val);
673 for( j = 0; j < ncols; j++ )
675 int val = cvRound(buf[j]);
676 ((char*)c_data)[j] = CV_CAST_8S(val);
680 for( j = 0; j < ncols; j++ )
682 int val = cvRound(buf[j]);
683 ((ushort*)c_data)[j] = CV_CAST_16U(val);
687 for( j = 0; j < ncols; j++ )
689 int val = cvRound(buf[j]);
690 ((short*)c_data)[j] = CV_CAST_16S(val);
694 for( j = 0; j < ncols; j++ )
696 int val = cvRound(buf[j]);
697 ((int*)c_data)[j] = CV_CAST_32S(val);
701 for( j = 0; j < ncols; j++ )
702 ((float*)c_data)[j] = CV_CAST_32F(buf[j]);
705 for( j = 0; j < ncols; j++ )
706 ((double*)c_data)[j] = CV_CAST_64F(buf[j]);
722 void cvTsDiv( const CvMat* a, const CvMat* b, CvScalar alpha, CvMat* c )
724 int i, j, k, cn, ncols;
726 double* alpha_buf = 0;
736 assert( CV_ARE_SIZES_EQ(a,c) &&
737 CV_ARE_TYPES_EQ(a,b) && CV_ARE_CNS_EQ(a,c) );
740 assert( CV_ARE_SIZES_EQ(b,c) && CV_ARE_CNS_EQ(b,c) );
742 cn = CV_MAT_CN(c->type);
743 ncols = c->cols * cn;
744 alpha_buf = (double*)malloc( ncols * sizeof(alpha_buf[0]) );
745 buf = (double*)malloc( ncols * sizeof(buf[0]) );
747 for( j = 0; j < ncols; j += cn )
748 for( k = 0; k < cn; k++ )
749 alpha_buf[j + k] = alpha.val[k];
751 for( i = 0; i < c->rows; i++ )
753 uchar* c_data = c->data.ptr + i*c->step;
754 uchar* a_data = a ? a->data.ptr + i*a->step : 0;
755 uchar* b_data = b->data.ptr + i*b->step;
757 switch( CV_MAT_DEPTH(b->type) )
760 for( j = 0; j < ncols; j++ )
762 int denom = ((uchar*)b_data)[j];
763 int num = a_data ? ((uchar*)a_data)[j] : 1;
764 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
768 for( j = 0; j < ncols; j++ )
770 int denom = ((char*)b_data)[j];
771 int num = a_data ? ((char*)a_data)[j] : 1;
772 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
776 for( j = 0; j < ncols; j++ )
778 int denom = ((ushort*)b_data)[j];
779 int num = a_data ? ((ushort*)a_data)[j] : 1;
780 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
784 for( j = 0; j < ncols; j++ )
786 int denom = ((short*)b_data)[j];
787 int num = a_data ? ((short*)a_data)[j] : 1;
788 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
792 for( j = 0; j < ncols; j++ )
794 int denom = ((int*)b_data)[j];
795 int num = a_data ? ((int*)a_data)[j] : 1;
796 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
800 for( j = 0; j < ncols; j++ )
802 double denom = ((float*)b_data)[j];
803 double num = a_data ? ((float*)a_data)[j] : 1;
804 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
808 for( j = 0; j < ncols; j++ )
810 double denom = ((double*)b_data)[j];
811 double num = a_data ? ((double*)a_data)[j] : 1;
812 buf[j] = !denom ? 0 : (alpha_buf[j]*num/denom);
820 switch( CV_MAT_DEPTH(c->type) )
823 for( j = 0; j < ncols; j++ )
825 int val = cvRound(buf[j]);
826 ((uchar*)c_data)[j] = CV_CAST_8U(val);
830 for( j = 0; j < ncols; j++ )
832 int val = cvRound(buf[j]);
833 ((char*)c_data)[j] = CV_CAST_8S(val);
837 for( j = 0; j < ncols; j++ )
839 int val = cvRound(buf[j]);
840 ((ushort*)c_data)[j] = CV_CAST_16U(val);
844 for( j = 0; j < ncols; j++ )
846 int val = cvRound(buf[j]);
847 ((short*)c_data)[j] = CV_CAST_16S(val);
851 for( j = 0; j < ncols; j++ )
853 int val = cvRound(buf[j]);
854 ((int*)c_data)[j] = CV_CAST_32S(val);
858 for( j = 0; j < ncols; j++ )
859 ((float*)c_data)[j] = CV_CAST_32F(buf[j]);
862 for( j = 0; j < ncols; j++ )
863 ((double*)c_data)[j] = CV_CAST_64F(buf[j]);
878 // c = min(a,b) or c = max(a,b)
879 void cvTsMinMax( const CvMat* a, const CvMat* b, CvMat* c, int op_type )
882 int calc_max = op_type == CV_TS_MAX;
890 assert( CV_ARE_SIZES_EQ(a,c) && CV_ARE_TYPES_EQ(a,c) &&
891 CV_ARE_SIZES_EQ(b,c) && CV_ARE_TYPES_EQ(b,c) &&
892 CV_MAT_CN(a->type) == 1 );
895 for( i = 0; i < c->rows; i++ )
897 uchar* c_data = c->data.ptr + i*c->step;
898 uchar* a_data = a->data.ptr + i*a->step;
899 uchar* b_data = b->data.ptr + i*b->step;
901 switch( CV_MAT_DEPTH(a->type) )
904 for( j = 0; j < ncols; j++ )
906 int aj = ((uchar*)a_data)[j];
907 int bj = ((uchar*)b_data)[j];
908 ((uchar*)c_data)[j] = (uchar)(calc_max ? MAX(aj, bj) : MIN(aj,bj));
912 for( j = 0; j < ncols; j++ )
914 int aj = ((char*)a_data)[j];
915 int bj = ((char*)b_data)[j];
916 ((char*)c_data)[j] = (char)(calc_max ? MAX(aj, bj) : MIN(aj,bj));
920 for( j = 0; j < ncols; j++ )
922 int aj = ((ushort*)a_data)[j];
923 int bj = ((ushort*)b_data)[j];
924 ((ushort*)c_data)[j] = (ushort)(calc_max ? MAX(aj, bj) : MIN(aj,bj));
928 for( j = 0; j < ncols; j++ )
930 int aj = ((short*)a_data)[j];
931 int bj = ((short*)b_data)[j];
932 ((short*)c_data)[j] = (short)(calc_max ? MAX(aj, bj) : MIN(aj,bj));
936 for( j = 0; j < ncols; j++ )
938 int aj = ((int*)a_data)[j];
939 int bj = ((int*)b_data)[j];
940 ((int*)c_data)[j] = calc_max ? MAX(aj, bj) : MIN(aj,bj);
944 for( j = 0; j < ncols; j++ )
946 float aj = ((float*)a_data)[j];
947 float bj = ((float*)b_data)[j];
948 ((float*)c_data)[j] = calc_max ? MAX(aj, bj) : MIN(aj,bj);
952 for( j = 0; j < ncols; j++ )
954 double aj = ((double*)a_data)[j];
955 double bj = ((double*)b_data)[j];
956 ((double*)c_data)[j] = calc_max ? MAX(aj, bj) : MIN(aj,bj);
966 // c = min(a,b) or c = max(a,b)
967 void cvTsMinMaxS( const CvMat* a, double s, CvMat* c, int op_type )
970 int calc_max = op_type == CV_TS_MAX;
980 assert( CV_ARE_SIZES_EQ(a,c) && CV_ARE_TYPES_EQ(a,c) &&
981 CV_MAT_CN(a->type) == 1 );
984 switch( CV_MAT_DEPTH(a->type) )
993 is = CV_CAST_16U(is);
996 is = CV_CAST_16S(is);
1002 for( i = 0; i < c->rows; i++ )
1004 uchar* c_data = c->data.ptr + i*c->step;
1005 uchar* a_data = a->data.ptr + i*a->step;
1007 switch( CV_MAT_DEPTH(a->type) )
1010 for( j = 0; j < ncols; j++ )
1012 int aj = ((uchar*)a_data)[j];
1013 ((uchar*)c_data)[j] = (uchar)(calc_max ? MAX(aj, is) : MIN(aj, is));
1017 for( j = 0; j < ncols; j++ )
1019 int aj = ((char*)a_data)[j];
1020 ((char*)c_data)[j] = (char)(calc_max ? MAX(aj, is) : MIN(aj, is));
1024 for( j = 0; j < ncols; j++ )
1026 int aj = ((ushort*)a_data)[j];
1027 ((ushort*)c_data)[j] = (ushort)(calc_max ? MAX(aj, is) : MIN(aj, is));
1031 for( j = 0; j < ncols; j++ )
1033 int aj = ((short*)a_data)[j];
1034 ((short*)c_data)[j] = (short)(calc_max ? MAX(aj, is) : MIN(aj, is));
1038 for( j = 0; j < ncols; j++ )
1040 int aj = ((int*)a_data)[j];
1041 ((int*)c_data)[j] = calc_max ? MAX(aj, is) : MIN(aj, is);
1045 for( j = 0; j < ncols; j++ )
1047 float aj = ((float*)a_data)[j];
1048 ((float*)c_data)[j] = calc_max ? MAX(aj, fs) : MIN(aj, fs);
1052 for( j = 0; j < ncols; j++ )
1054 double aj = ((double*)a_data)[j];
1055 ((double*)c_data)[j] = calc_max ? MAX(aj, s) : MIN(aj, s);
1065 // checks that the array does not have NaNs and/or Infs and all the elements are
1066 // within [min_val,max_val). idx is the index of the first "bad" element.
1067 int cvTsCheck( const CvMat* a, double min_val, double max_val, CvPoint* idx )
1071 int imin = 0, imax = 0;
1072 cn = CV_MAT_CN(a->type);
1075 if( CV_MAT_DEPTH(a->type) <= CV_32S )
1077 imin = cvCeil(min_val);
1078 imax = cvFloor(max_val);
1081 for( i = 0; i < a->rows; i++ )
1083 uchar* data = a->data.ptr + a->step*i;
1085 switch( CV_MAT_DEPTH(a->type) )
1088 for( j = 0; j < ncols; j++ )
1090 int val = ((uchar*)data)[j];
1091 if( val < imin || imax < val )
1096 for( j = 0; j < ncols; j++ )
1098 int val = ((char*)data)[j];
1099 if( val < imin || imax < val )
1104 for( j = 0; j < ncols; j++ )
1106 int val = ((ushort*)data)[j];
1107 if( val < imin || imax < val )
1112 for( j = 0; j < ncols; j++ )
1114 int val = ((short*)data)[j];
1115 if( val < imin || imax < val )
1120 for( j = 0; j < ncols; j++ )
1122 int val = ((int*)data)[j];
1123 if( val < imin || imax < val )
1128 for( j = 0; j < ncols; j++ )
1130 double val = ((float*)data)[j];
1131 if( cvIsNaN(val) || cvIsInf(val) || val < min_val || max_val < val )
1136 for( j = 0; j < ncols; j++ )
1138 double val = ((double*)data)[j];
1139 if( cvIsNaN(val) || cvIsInf(val) || val < min_val || max_val < val )
1157 // compares two arrays. max_diff is the maximum actual difference,
1158 // success_err_level is maximum allowed difference, idx is the index of the first
1159 // element for which difference is >success_err_level
1160 // (or index of element with the maximum difference)
1161 int cvTsCmpEps( const CvMat* check_arr, const CvMat* etalon, double* _max_diff,
1162 double success_err_level, CvPoint* idx, bool element_wise_relative_error )
1172 cn = CV_MAT_CN(check_arr->type);
1173 ncols = check_arr->cols*cn;
1175 *idx = cvPoint(0,0);
1177 assert( CV_ARE_TYPES_EQ(check_arr,etalon) && CV_ARE_SIZES_EQ(check_arr,etalon) );
1179 if( CV_MAT_DEPTH(check_arr->type) < CV_32S )
1180 ilevel = cvFloor(success_err_level);
1182 if( CV_MAT_DEPTH(check_arr->type) >= CV_32F && !element_wise_relative_error )
1184 double maxval0 = 1.;
1185 maxval = cvTsNorm( etalon, 0, CV_C, 0 );
1186 maxval = MAX(maxval, maxval0);
1189 for( i = 0; i < check_arr->rows; i++ )
1191 uchar* a_data = check_arr->data.ptr + check_arr->step*i;
1192 uchar* b_data = etalon->data.ptr + etalon->step*i;
1194 switch( CV_MAT_DEPTH(check_arr->type) )
1197 for( j = 0; j < ncols; j++ )
1199 int val = abs(((uchar*)a_data)[j] - ((uchar*)b_data)[j]);
1200 if( val > imaxdiff )
1203 *idx = cvPoint(j,i);
1210 for( j = 0; j < ncols; j++ )
1212 int val = abs(((char*)a_data)[j] - ((char*)b_data)[j]);
1213 if( val > imaxdiff )
1216 *idx = cvPoint(j,i);
1223 for( j = 0; j < ncols; j++ )
1225 int val = abs(((ushort*)a_data)[j] - ((ushort*)b_data)[j]);
1226 if( val > imaxdiff )
1229 *idx = cvPoint(j,i);
1236 for( j = 0; j < ncols; j++ )
1238 int val = abs(((short*)a_data)[j] - ((short*)b_data)[j]);
1239 if( val > imaxdiff )
1242 *idx = cvPoint(j,i);
1249 for( j = 0; j < ncols; j++ )
1251 double val = fabs((double)((int*)a_data)[j] - (double)((int*)b_data)[j]);
1255 *idx = cvPoint(j,i);
1256 if( val > success_err_level )
1262 for( j = 0; j < ncols; j++ )
1264 double a_val = ((float*)a_data)[j];
1265 double b_val = ((float*)b_data)[j];
1267 if( cvIsNaN(a_val) || cvIsInf(a_val) )
1270 *idx = cvPoint(j,i);
1273 if( cvIsNaN(b_val) || cvIsInf(b_val) )
1276 *idx = cvPoint(j,i);
1279 a_val = fabs(a_val - b_val);
1280 threshold = element_wise_relative_error ? fabs(b_val) + 1e-5 : maxval;
1281 if( a_val > threshold*success_err_level )
1283 maxdiff = a_val/threshold;
1284 *idx = cvPoint(j,i);
1290 for( j = 0; j < ncols; j++ )
1292 double a_val = ((double*)a_data)[j];
1293 double b_val = ((double*)b_data)[j];
1295 if( cvIsNaN(a_val) || cvIsInf(a_val) )
1298 *idx = cvPoint(j,i);
1301 if( cvIsNaN(b_val) || cvIsInf(b_val) )
1304 *idx = cvPoint(j,i);
1307 a_val = fabs(a_val - b_val);
1308 threshold = element_wise_relative_error ? fabs(b_val)+FLT_EPSILON : maxval;
1309 if( a_val > threshold*success_err_level )
1311 maxdiff = a_val/threshold;
1312 *idx = cvPoint(j,i);
1326 if( CV_MAT_DEPTH(check_arr->type) < CV_32S )
1330 maxdiff = exp(1000.);
1331 *_max_diff = maxdiff;
1336 int cvTsCmpEps2( CvTS* ts, const CvArr* _a, const CvArr* _b, double success_err_level,
1337 bool element_wise_relative_error, const char* desc )
1341 CvMat astub, bstub, *a, *b;
1342 CvPoint idx = {0,0};
1345 a = cvGetMat( _a, &astub );
1346 b = cvGetMat( _b, &bstub );
1347 code = cvTsCmpEps( a, b, &diff, success_err_level, &idx,
1348 element_wise_relative_error );
1353 sprintf( msg, "%s: Too big difference (=%g)", desc, diff );
1354 code = CvTS::FAIL_BAD_ACCURACY;
1357 sprintf( msg, "%s: Invalid output", desc );
1358 code = CvTS::FAIL_INVALID_OUTPUT;
1361 sprintf( msg, "%s: Invalid reference output", desc );
1362 code = CvTS::FAIL_INVALID_OUTPUT;
1370 if( a->rows == 1 && a->cols == 1 )
1372 assert( idx.x == 0 && idx.y == 0 );
1373 ts->printf( CvTS::LOG, "%s\n", msg );
1375 else if( a->rows == 1 || a->cols == 1 )
1377 assert( idx.x == 0 || idx.y == 0 );
1378 ts->printf( CvTS::LOG, "%s at element %d\n", msg, idx.x + idx.y );
1381 ts->printf( CvTS::LOG, "%s at (%d,%d)\n", msg, idx.x, idx.y );
1388 int cvTsCmpEps2_64f( CvTS* ts, const double* val, const double* ref_val, int len,
1389 double eps, const char* param_name )
1391 CvMat _val = cvMat( 1, len, CV_64F, (void*)val );
1392 CvMat _ref_val = cvMat( 1, len, CV_64F, (void*)ref_val );
1394 return cvTsCmpEps2( ts, &_val, &_ref_val, eps, true, param_name );
1397 // compares two arrays. the result is 8s image that takes values -1, 0, 1
1398 void cvTsCmp( const CvMat* a, const CvMat* b, CvMat* result, int cmp_op )
1400 int i = 0, j = 0, ncols;
1403 assert( CV_ARE_TYPES_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) && CV_MAT_CN(a->type) == 1 );
1404 assert( CV_ARE_SIZES_EQ(a,result) &&
1405 (CV_MAT_TYPE(result->type) == CV_8UC1 ||
1406 CV_MAT_TYPE(result->type) == CV_8SC1 ));
1408 for( i = 0; i < a->rows; i++ )
1410 uchar* a_data = a->data.ptr + a->step*i;
1411 uchar* b_data = b->data.ptr + b->step*i;
1412 char* r_data = (char*)(result->data.ptr + result->step*i);
1414 switch( CV_MAT_DEPTH(a->type) )
1417 for( j = 0; j < ncols; j++ )
1419 int a_val = ((uchar*)a_data)[j];
1420 int b_val = ((uchar*)b_data)[j];
1421 r_data[j] = (char)CV_CMP(a_val,b_val);
1425 for( j = 0; j < ncols; j++ )
1427 int a_val = ((char*)a_data)[j];
1428 int b_val = ((char*)b_data)[j];
1429 r_data[j] = (char)CV_CMP(a_val,b_val);
1433 for( j = 0; j < ncols; j++ )
1435 int a_val = ((ushort*)a_data)[j];
1436 int b_val = ((ushort*)b_data)[j];
1437 r_data[j] = (char)CV_CMP(a_val,b_val);
1441 for( j = 0; j < ncols; j++ )
1443 int a_val = ((short*)a_data)[j];
1444 int b_val = ((short*)b_data)[j];
1445 r_data[j] = (char)CV_CMP(a_val,b_val);
1449 for( j = 0; j < ncols; j++ )
1451 int a_val = ((int*)a_data)[j];
1452 int b_val = ((int*)b_data)[j];
1453 r_data[j] = (char)CV_CMP(a_val,b_val);
1457 for( j = 0; j < ncols; j++ )
1459 float a_val = ((float*)a_data)[j];
1460 float b_val = ((float*)b_data)[j];
1461 r_data[j] = (char)CV_CMP(a_val,b_val);
1465 for( j = 0; j < ncols; j++ )
1467 double a_val = ((double*)a_data)[j];
1468 double b_val = ((double*)b_data)[j];
1469 r_data[j] = (char)CV_CMP(a_val,b_val);
1479 for( j = 0; j < ncols; j++ )
1480 r_data[j] = (char)(r_data[j] == 0 ? -1 : 0);
1483 for( j = 0; j < ncols; j++ )
1484 r_data[j] = (char)(r_data[j] != 0 ? -1 : 0);
1487 for( j = 0; j < ncols; j++ )
1488 r_data[j] = (char)(r_data[j] < 0 ? -1 : 0);
1491 for( j = 0; j < ncols; j++ )
1492 r_data[j] = (char)(r_data[j] <= 0 ? -1 : 0);
1495 for( j = 0; j < ncols; j++ )
1496 r_data[j] = (char)(r_data[j] >= 0 ? -1 : 0);
1499 for( j = 0; j < ncols; j++ )
1500 r_data[j] = (char)(r_data[j] > 0 ? -1 : 0);
1508 // compares two arrays. the result is 8s image that takes values -1, 0, 1
1509 void cvTsCmpS( const CvMat* a, double fval, CvMat* result, int cmp_op )
1512 int ncols, ival = 0;
1515 if( CV_MAT_DEPTH(a->type) <= CV_32S )
1516 ival = cvRound(fval);
1518 assert( CV_MAT_CN(a->type) == 1 );
1519 assert( CV_ARE_SIZES_EQ(a,result) &&
1520 (CV_MAT_TYPE(result->type) == CV_8UC1 ||
1521 CV_MAT_TYPE(result->type) == CV_8SC1 ));
1523 for( i = 0; i < a->rows; i++ )
1525 uchar* a_data = a->data.ptr + a->step*i;
1526 char* r_data = (char*)(result->data.ptr + result->step*i);
1528 switch( CV_MAT_DEPTH(a->type) )
1531 for( j = 0; j < ncols; j++ )
1533 int a_val = ((uchar*)a_data)[j];
1534 r_data[j] = (char)CV_CMP(a_val,ival);
1538 for( j = 0; j < ncols; j++ )
1540 int a_val = ((char*)a_data)[j];
1541 r_data[j] = (char)CV_CMP(a_val,ival);
1545 for( j = 0; j < ncols; j++ )
1547 int a_val = ((ushort*)a_data)[j];
1548 r_data[j] = (char)CV_CMP(a_val,ival);
1552 for( j = 0; j < ncols; j++ )
1554 int a_val = ((short*)a_data)[j];
1555 r_data[j] = (char)CV_CMP(a_val,ival);
1559 for( j = 0; j < ncols; j++ )
1561 int a_val = ((int*)a_data)[j];
1562 r_data[j] = (char)CV_CMP(a_val,ival);
1566 for( j = 0; j < ncols; j++ )
1568 float a_val = ((float*)a_data)[j];
1569 r_data[j] = (char)CV_CMP(a_val,fval);
1573 for( j = 0; j < ncols; j++ )
1575 double a_val = ((double*)a_data)[j];
1576 r_data[j] = (char)CV_CMP(a_val,fval);
1586 for( j = 0; j < ncols; j++ )
1587 r_data[j] = (char)(r_data[j] == 0 ? -1 : 0);
1590 for( j = 0; j < ncols; j++ )
1591 r_data[j] = (char)(r_data[j] != 0 ? -1 : 0);
1594 for( j = 0; j < ncols; j++ )
1595 r_data[j] = (char)(r_data[j] < 0 ? -1 : 0);
1598 for( j = 0; j < ncols; j++ )
1599 r_data[j] = (char)(r_data[j] <= 0 ? -1 : 0);
1602 for( j = 0; j < ncols; j++ )
1603 r_data[j] = (char)(r_data[j] >= 0 ? -1 : 0);
1606 for( j = 0; j < ncols; j++ )
1607 r_data[j] = (char)(r_data[j] > 0 ? -1 : 0);
1616 // calculates norm of a matrix
1617 double cvTsNorm( const CvMat* arr, const CvMat* mask, int norm_type, int coi )
1619 int i = 0, j = 0, k;
1620 int depth, cn0, cn, ncols, el_size1;
1626 cn0 = cn = CV_MAT_CN(arr->type);
1627 ncols = arr->cols*cn;
1628 depth = CV_MAT_DEPTH(arr->type);
1629 el_size1 = CV_ELEM_SIZE(depth);
1630 zerobuf = (uchar*)cvStackAlloc(el_size1*cn);
1631 memset( zerobuf, 0, el_size1*cn);
1635 assert( CV_ARE_SIZES_EQ( arr, mask ) && CV_IS_MASK_ARR(mask) );
1636 buffer = cvStackAlloc( el_size1*ncols );
1642 for( i = 0; i < arr->rows; i++ )
1644 const uchar* data = arr->data.ptr + arr->step*i + (coi - (coi != 0))*el_size1;
1648 const uchar* mdata = mask->data.ptr + mask->step*i;
1653 for( j = 0; j < ncols; j += cn0 )
1655 const uchar* src = *mdata++ ? (uchar*)data + j : zerobuf;
1656 for( k = 0; k < cn0; k++ )
1657 ((uchar*)buffer)[j+k] = src[k];
1662 for( j = 0; j < ncols; j += cn0 )
1664 const short* src = *mdata++ ? (short*)data + j : (short*)zerobuf;
1665 for( k = 0; k < cn0; k++ )
1666 ((short*)buffer)[j+k] = src[k];
1671 for( j = 0; j < ncols; j += cn0 )
1673 const int* src = *mdata++ ? (int*)data + j : (int*)zerobuf;
1674 for( k = 0; k < cn0; k++ )
1675 ((int*)buffer)[j+k] = src[k];
1679 for( j = 0; j < ncols; j += cn0 )
1681 const double* src = *mdata++ ? (double*)data + j : (double*)zerobuf;
1682 for( k = 0; k < cn0; k++ )
1683 ((double*)buffer)[j+k] = src[k];
1690 data = (const uchar*)buffer;
1696 if( norm_type == CV_C )
1698 for( j = 0; j < ncols; j += cn )
1700 int val = ((const uchar*)data)[j];
1701 inorm = MAX( inorm, val );
1704 else if( norm_type == CV_L1 )
1707 for( j = 0; j < ncols; j += cn )
1709 int val = ((const uchar*)data)[j];
1717 for( j = 0; j < ncols; j += cn )
1719 int val = ((const uchar*)data)[j];
1726 if( norm_type == CV_C )
1728 for( j = 0; j < ncols; j += cn )
1730 int val = abs(((const char*)data)[j]);
1731 inorm = MAX( inorm, val );
1734 else if( norm_type == CV_L1 )
1737 for( j = 0; j < ncols; j += cn )
1739 int val = abs(((const char*)data)[j]);
1747 for( j = 0; j < ncols; j += cn )
1749 int val = ((const char*)data)[j];
1756 if( norm_type == CV_C )
1758 for( j = 0; j < ncols; j += cn )
1760 int val = ((const ushort*)data)[j];
1761 inorm = MAX( inorm, val );
1764 else if( norm_type == CV_L1 )
1767 for( j = 0; j < ncols; j += cn )
1769 int val = ((const ushort*)data)[j];
1776 for( j = 0; j < ncols; j += cn )
1778 double val = ((const ushort*)data)[j];
1784 if( norm_type == CV_C )
1786 for( j = 0; j < ncols; j += cn )
1788 int val = abs(((const short*)data)[j]);
1789 inorm = MAX( inorm, val );
1792 else if( norm_type == CV_L1 )
1795 for( j = 0; j < ncols; j += cn )
1797 int val = abs(((const short*)data)[j]);
1804 for( j = 0; j < ncols; j += cn )
1806 double val = ((const short*)data)[j];
1812 if( norm_type == CV_C )
1814 for( j = 0; j < ncols; j += cn )
1816 int val = abs(((const int*)data)[j]);
1817 inorm = MAX( inorm, val );
1820 else if( norm_type == CV_L1 )
1822 for( j = 0; j < ncols; j += cn )
1824 double val = fabs((double)((const int*)data)[j]);
1830 for( j = 0; j < ncols; j += cn )
1832 double val = ((const int*)data)[j];
1838 if( norm_type == CV_C )
1840 for( j = 0; j < ncols; j += cn )
1842 double val = fabs((double)((const float*)data)[j]);
1843 fnorm = MAX( fnorm, val );
1846 else if( norm_type == CV_L1 )
1848 for( j = 0; j < ncols; j += cn )
1850 double val = fabs((double)((const float*)data)[j]);
1856 for( j = 0; j < ncols; j += cn )
1858 double val = ((const float*)data)[j];
1864 if( norm_type == CV_C )
1866 for( j = 0; j < ncols; j += cn )
1868 double val = fabs(((const double*)data)[j]);
1869 fnorm = MAX( fnorm, val );
1872 else if( norm_type == CV_L1 )
1874 for( j = 0; j < ncols; j += cn )
1876 double val = fabs(((const double*)data)[j]);
1882 for( j = 0; j < ncols; j += cn )
1884 double val = ((const double*)data)[j];
1895 if( norm_type == CV_L2 )
1896 fnorm = sqrt( fnorm );
1897 else if( depth < CV_32F && norm_type == CV_C )
1904 // retrieves mean, standard deviation and the number of nonzero mask pixels
1905 int cvTsMeanStdDevNonZero( const CvMat* arr, const CvMat* mask,
1906 CvScalar* _mean, CvScalar* _stddev, int coi )
1908 int i = 0, j = 0, k;
1909 int depth, cn0, cn, cols, ncols, el_size1;
1910 CvScalar sum = cvScalar(0), sqsum = cvScalar(0);
1912 int isum[4], isqsum[4];
1916 cn0 = cn = CV_MAT_CN(arr->type);
1918 ncols = arr->cols*cn;
1919 depth = CV_MAT_DEPTH(arr->type);
1920 el_size1 = CV_ELEM_SIZE(depth);
1923 assert( CV_ARE_SIZES_EQ( arr, mask ) && CV_IS_MASK_ARR(mask) );
1927 maskbuf = (uchar*)cvStackAlloc( cols );
1928 memset( maskbuf, 1, cols );
1929 nonzero = cols*arr->rows;
1935 for( i = 0; i < arr->rows; i++ )
1937 const uchar* data = arr->data.ptr + arr->step*i + (coi - (coi != 0))*el_size1;
1942 mdata = mask->data.ptr + mask->step*i;
1943 for( j = 0; j < cols; j++ )
1944 nonzero += mdata[j] != 0;
1951 // if only a number of pixels in the mask is needed, skip the rest of the loop body
1952 if( !_mean && !_stddev )
1958 for( k = 0; k < cn; k++ )
1959 isum[k] = isqsum[k] = 0;
1960 for( j = 0; j < ncols; j += cn0 )
1963 for( k = 0; k < cn; k++ )
1965 int val = ((const uchar*)data)[j+k];
1967 isqsum[k] += val*val;
1970 for( k = 0; k < cn; k++ )
1972 sum.val[k] += isum[k];
1973 sqsum.val[k] += isqsum[k];
1977 for( k = 0; k < cn; k++ )
1978 isum[k] = isqsum[k] = 0;
1979 for( j = 0; j < ncols; j += cn0 )
1982 for( k = 0; k < cn; k++ )
1984 int val = ((const char*)data)[j+k];
1986 isqsum[k] += val*val;
1989 for( k = 0; k < cn; k++ )
1991 sum.val[k] += isum[k];
1992 sqsum.val[k] += isqsum[k];
1996 for( k = 0; k < cn; k++ )
1998 for( j = 0; j < ncols; j += cn0 )
2001 for( k = 0; k < cn; k++ )
2003 int val = ((const ushort*)data)[j+k];
2005 sqsum.val[k] += ((double)val)*val;
2008 for( k = 0; k < cn; k++ )
2009 sum.val[k] += isum[k];
2012 for( k = 0; k < cn; k++ )
2014 for( j = 0; j < ncols; j += cn0 )
2017 for( k = 0; k < cn; k++ )
2019 int val = ((const short*)data)[j+k];
2021 sqsum.val[k] += ((double)val)*val;
2024 for( k = 0; k < cn; k++ )
2025 sum.val[k] += isum[k];
2028 for( j = 0; j < ncols; j += cn0 )
2031 for( k = 0; k < cn; k++ )
2033 double val = ((const int*)data)[j+k];
2035 sqsum.val[k] += val*val;
2040 for( j = 0; j < ncols; j += cn0 )
2043 for( k = 0; k < cn; k++ )
2045 double val = ((const float*)data)[j+k];
2047 sqsum.val[k] += val*val;
2052 for( j = 0; j < ncols; j += cn0 )
2055 for( k = 0; k < cn; k++ )
2057 double val = ((const double*)data)[j+k];
2059 sqsum.val[k] += val*val;
2069 inv_area = nonzero ? 1./nonzero : 0.;
2070 for( k = 0; k < cn; k++ )
2072 sum.val[k] *= inv_area;
2073 double t = sqsum.val[k]*inv_area - sum.val[k]*sum.val[k];
2074 sqsum.val[k] = sqrt(MAX(t, 0));
2083 // retrieves global extremums and their positions
2084 void cvTsMinMaxLoc( const CvMat* arr, const CvMat* mask,
2085 double* _minval, double* _maxval,
2086 CvPoint* _minidx, CvPoint* _maxidx, int coi )
2089 int depth, cn, cols, ncols, el_size1;
2090 CvPoint minidx = {-1,-1}, maxidx = {-1,-1};
2092 int iminval = INT_MAX, imaxval = INT_MIN;
2093 double minval = DBL_MAX, maxval = -minval;
2095 cn = CV_MAT_CN(arr->type);
2097 ncols = arr->cols*cn;
2098 depth = CV_MAT_DEPTH(arr->type);
2099 el_size1 = CV_ELEM_SIZE(depth);
2103 assert( CV_ARE_SIZES_EQ( arr, mask ) && CV_IS_MASK_ARR(mask) );
2107 maskbuf = (uchar*)cvStackAlloc( cols );
2108 memset( maskbuf, 1, cols );
2111 if( coi == 0 && cn > 1 )
2117 for( i = 0; i < arr->rows; i++ )
2119 const uchar* data = arr->data.ptr + arr->step*i + (coi - (coi != 0))*el_size1;
2120 const uchar* mdata = mask ? mask->data.ptr + mask->step*i : maskbuf;
2125 for( j = 0; j < ncols; j += cn, mdata++ )
2127 int val = ((const uchar*)data)[j];
2128 if( val < iminval && *mdata )
2131 minidx = cvPoint(j,i);
2133 if( val > imaxval && *mdata )
2136 maxidx = cvPoint(j,i);
2141 for( j = 0; j < ncols; j += cn, mdata++ )
2143 int val = ((const char*)data)[j];
2144 if( val < iminval && *mdata )
2147 minidx = cvPoint(j,i);
2149 if( val > imaxval && *mdata )
2152 maxidx = cvPoint(j,i);
2157 for( j = 0; j < ncols; j += cn, mdata++ )
2159 int val = ((const ushort*)data)[j];
2160 if( val < iminval && *mdata )
2163 minidx = cvPoint(j,i);
2165 if( val > imaxval && *mdata )
2168 maxidx = cvPoint(j,i);
2173 for( j = 0; j < ncols; j += cn, mdata++ )
2175 int val = ((const short*)data)[j];
2176 if( val < iminval && *mdata )
2179 minidx = cvPoint(j,i);
2181 if( val > imaxval && *mdata )
2184 maxidx = cvPoint(j,i);
2189 for( j = 0; j < ncols; j += cn, mdata++ )
2191 int val = ((const int*)data)[j];
2192 if( val < iminval && *mdata )
2195 minidx = cvPoint(j,i);
2197 if( val > imaxval && *mdata )
2200 maxidx = cvPoint(j,i);
2205 for( j = 0; j < ncols; j += cn, mdata++ )
2207 float val = ((const float*)data)[j];
2208 if( val < minval && *mdata )
2211 minidx = cvPoint(j,i);
2213 if( val > maxval && *mdata )
2216 maxidx = cvPoint(j,i);
2221 for( j = 0; j < ncols; j += cn, mdata++ )
2223 double val = ((const double*)data)[j];
2224 if( val < minval && *mdata )
2227 minidx = cvPoint(j,i);
2229 if( val > maxval && *mdata )
2232 maxidx = cvPoint(j,i);
2243 minval = maxval = 0;
2246 if( depth < CV_32F )
2247 minval = iminval, maxval = imaxval;
2266 void cvTsLogic( const CvMat* a, const CvMat* b, CvMat* c, int logic_op )
2268 int i = 0, j = 0, ncols;
2269 ncols = a->cols*CV_ELEM_SIZE(a->type);
2271 assert( CV_ARE_TYPES_EQ(a,b) && CV_ARE_SIZES_EQ(a,b) );
2272 assert( CV_ARE_TYPES_EQ(a,c) && CV_ARE_SIZES_EQ(a,c) );
2274 for( i = 0; i < a->rows; i++ )
2276 uchar* a_data = a->data.ptr + a->step*i;
2277 uchar* b_data = b->data.ptr + b->step*i;
2278 uchar* c_data = c->data.ptr + c->step*i;
2282 case CV_TS_LOGIC_AND:
2283 for( j = 0; j < ncols; j++ )
2284 c_data[j] = (uchar)(a_data[j] & b_data[j]);
2286 case CV_TS_LOGIC_OR:
2287 for( j = 0; j < ncols; j++ )
2288 c_data[j] = (uchar)(a_data[j] | b_data[j]);
2290 case CV_TS_LOGIC_XOR:
2291 for( j = 0; j < ncols; j++ )
2292 c_data[j] = (uchar)(a_data[j] ^ b_data[j]);
2301 void cvTsLogicS( const CvMat* a, CvScalar s, CvMat* c, int logic_op )
2303 int i = 0, j = 0, k;
2304 int cn, ncols, elem_size;
2316 cn = CV_MAT_CN(a->type);
2317 elem_size = CV_ELEM_SIZE(a->type);
2318 ncols = a->cols * elem_size;
2319 b_data = (uchar*)malloc( ncols );
2321 assert( CV_ARE_TYPES_EQ(a,c) && CV_ARE_SIZES_EQ(a,c) );
2323 if( logic_op == CV_TS_LOGIC_NOT )
2325 memset( b_data, -1, ncols );
2326 logic_op = CV_TS_LOGIC_XOR;
2330 switch( CV_MAT_DEPTH(a->type) )
2333 for( k = 0; k < cn; k++ )
2335 int val = cvRound(s.val[k]);
2336 buf.ptr[k] = CV_CAST_8U(val);
2340 for( k = 0; k < cn; k++ )
2342 int val = cvRound(s.val[k]);
2343 buf.c[k] = CV_CAST_8S(val);
2347 for( k = 0; k < cn; k++ )
2349 int val = cvRound(s.val[k]);
2350 buf.w[k] = CV_CAST_16U(val);
2354 for( k = 0; k < cn; k++ )
2356 int val = cvRound(s.val[k]);
2357 buf.s[k] = CV_CAST_16S(val);
2361 for( k = 0; k < cn; k++ )
2363 int val = cvRound(s.val[k]);
2364 buf.i[k] = CV_CAST_32S(val);
2368 for( k = 0; k < cn; k++ )
2370 double val = s.val[k];
2371 buf.f[k] = CV_CAST_32F(val);
2375 for( k = 0; k < cn; k++ )
2377 double val = s.val[k];
2378 buf.d[k] = CV_CAST_64F(val);
2386 for( j = 0; j < ncols; j += elem_size )
2387 memcpy( b_data + j, buf.ptr, elem_size );
2390 for( i = 0; i < a->rows; i++ )
2392 uchar* a_data = a->data.ptr + a->step*i;
2393 uchar* c_data = c->data.ptr + c->step*i;
2397 case CV_TS_LOGIC_AND:
2398 for( j = 0; j < ncols; j++ )
2399 c_data[j] = (uchar)(a_data[j] & b_data[j]);
2401 case CV_TS_LOGIC_OR:
2402 for( j = 0; j < ncols; j++ )
2403 c_data[j] = (uchar)(a_data[j] | b_data[j]);
2405 case CV_TS_LOGIC_XOR:
2406 for( j = 0; j < ncols; j++ )
2407 c_data[j] = (uchar)(a_data[j] ^ b_data[j]);
2420 void cvTsGEMM( const CvMat* a, const CvMat* b, double alpha,
2421 const CvMat* c, double beta, CvMat* d, int flags )
2424 int a_rows, a_cols, b_rows, b_cols;
2425 int c_rows, c_cols, d_rows, d_cols;
2427 int a_step, a_delta, b_step, b_delta;
2428 int c_step, c_delta, d_step;
2430 a_rows = a->rows; a_cols = a->cols;
2431 cn = CV_MAT_CN(a->type);
2432 el_size = CV_ELEM_SIZE(a->type & ~CV_MAT_CN_MASK);
2433 a_step = a->step / el_size; a_delta = cn;
2434 d_rows = d->rows; d_cols = d->cols;
2435 b_rows = b->rows; b_cols = b->cols;
2436 b_step = b->step / el_size; b_delta = cn;
2437 c_rows = c ? c->rows : 0; c_cols = c ? c->cols : 0;
2438 c_step = c ? c->step / el_size : 0; c_delta = c ? cn : 0;
2439 d_step = d->step / el_size;
2441 assert( CV_ARE_TYPES_EQ(a,b) && CV_ARE_TYPES_EQ(a,d) );
2442 assert( CV_MAT_CN(a->type) <= 2 );
2444 if( flags & CV_TS_GEMM_A_T )
2446 CV_SWAP( a_rows, a_cols, i );
2447 CV_SWAP( a_step, a_delta, i );
2450 if( flags & CV_TS_GEMM_B_T )
2452 CV_SWAP( b_rows, b_cols, i );
2453 CV_SWAP( b_step, b_delta, i );
2456 if( flags & CV_TS_GEMM_C_T )
2458 CV_SWAP( c_rows, c_cols, i );
2459 CV_SWAP( c_step, c_delta, i );
2462 assert( a_rows == d_rows && a_cols == b_rows && b_cols == d_cols );
2463 assert( a->data.ptr != d->data.ptr && b->data.ptr != d->data.ptr );
2467 assert( CV_ARE_TYPES_EQ(a,c) && c_rows == d_rows && c_cols == d_cols );
2468 assert( c->data.ptr != d->data.ptr || (flags & CV_TS_GEMM_C_T) == 0 );
2471 if( CV_MAT_DEPTH(a->type) == CV_32F )
2473 float* a_data0 = a->data.fl;
2474 float* b_data0 = b->data.fl;
2475 float* c_data0 = c ? c->data.fl : 0;
2476 float* d_data = d->data.fl;
2478 for( i = 0; i < d_rows; i++, d_data += d_step, c_data0 += c_step, a_data0 += a_step )
2480 for( j = 0; j < d_cols; j++ )
2482 float* a_data = a_data0;
2483 float* b_data = b_data0 + j*b_delta;
2484 float* c_data = c_data0 + j*c_delta;
2489 for( k = 0; k < a_cols; k++ )
2491 s += ((double)a_data[0])*b_data[0];
2495 d_data[j] = (float)(s*alpha + (c_data ? c_data[0]*beta : 0));
2499 double s_re = 0, s_im = 0;
2501 for( k = 0; k < a_cols; k++ )
2503 s_re += ((double)a_data[0])*b_data[0] - ((double)a_data[1])*b_data[1];
2504 s_im += ((double)a_data[0])*b_data[1] + ((double)a_data[1])*b_data[0];
2514 s_re += c_data[0]*beta;
2515 s_im += c_data[1]*beta;
2518 d_data[j*2] = (float)s_re;
2519 d_data[j*2+1] = (float)s_im;
2524 else if( CV_MAT_DEPTH(a->type) == CV_64F )
2526 double* a_data0 = a->data.db;
2527 double* b_data0 = b->data.db;
2528 double* c_data0 = c ? c->data.db : 0;
2529 double* d_data = d->data.db;
2531 for( i = 0; i < d_rows; i++, d_data += d_step, c_data0 += c_step, a_data0 += a_step )
2533 for( j = 0; j < d_cols; j++ )
2535 double* a_data = a_data0;
2536 double* b_data = b_data0 + j*b_delta;
2537 double* c_data = c_data0 + j*c_delta;
2542 for( k = 0; k < a_cols; k++ )
2544 s += a_data[0]*b_data[0];
2548 d_data[j] = s*alpha + (c_data ? c_data[0]*beta : 0);
2552 double s_re = 0, s_im = 0;
2554 for( k = 0; k < a_cols; k++ )
2556 s_re += a_data[0]*b_data[0] - a_data[1]*b_data[1];
2557 s_im += a_data[0]*b_data[1] + a_data[1]*b_data[0];
2566 s_re += c_data[0]*beta;
2567 s_im += c_data[1]*beta;
2571 d_data[j*2+1] = s_im;
2583 CvMat* cvTsSelect( const CvMat* a, CvMat* header, CvRect rect )
2588 h = cvMat( rect.height, rect.width, a->type );
2589 el_size = CV_ELEM_SIZE(a->type);
2591 h.data.ptr = a->data.ptr + rect.y*a->step + rect.x*el_size;
2592 h.step = rect.height > 1 ? a->step : 0;
2593 h.type &= ~CV_MAT_CONT_FLAG;
2594 if( rect.height == 1 || h.step == h.cols*el_size )
2595 h.type |= CV_MAT_CONT_FLAG;
2601 double cvTsMinVal( int type )
2603 switch( CV_MAT_DEPTH(type) )
2624 double cvTsMaxVal( int type )
2626 switch( CV_MAT_DEPTH(type) )
2647 void cvTsPrepareToFilter( const CvMat* a, CvMat* b, CvPoint ofs,
2648 int border_mode, CvScalar fill_val )
2653 assert( 0 <= ofs.x && ofs.x <= b->cols - a->cols &&
2654 0 <= ofs.y && ofs.y <= b->rows - a->rows );
2656 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y, a->cols, a->rows ));
2657 cvTsCopy( a, &temp, 0 );
2659 assert( border_mode == CV_TS_BORDER_FILL ||
2660 border_mode == CV_TS_BORDER_REPLICATE ||
2661 border_mode == CV_TS_BORDER_REFLECT );
2665 if( border_mode == CV_TS_BORDER_FILL )
2667 cvTsSelect( b, &temp, cvRect( ofs.x, 0, a->cols, ofs.y ));
2668 cvTsAdd( 0, cvScalar(0), 0, cvScalar(0), fill_val, &temp, 0 );
2670 else if( border_mode == CV_TS_BORDER_REPLICATE || a->rows == 1 )
2672 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y, a->cols, 1 ));
2673 for( i = ofs.y-1; i >= 0; i-- )
2675 cvTsSelect( b, &temp2, cvRect( ofs.x, i, a->cols, 1 ));
2676 cvTsCopy( &temp, &temp2, 0 );
2679 else if( border_mode == CV_TS_BORDER_REFLECT )
2682 for( i = ofs.y-1; i >= 0; i-- )
2684 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y+j, a->cols, 1 ));
2685 cvTsSelect( b, &temp2, cvRect( ofs.x, i, a->cols, 1 ));
2686 cvTsCopy( &temp, &temp2, 0 );
2687 if( (unsigned)(j + dir) >= (unsigned)a->rows )
2695 if( ofs.y < b->rows )
2697 if( border_mode == CV_TS_BORDER_FILL )
2699 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y, a->cols, b->rows - ofs.y ));
2700 cvTsAdd( 0, cvScalar(0), 0, cvScalar(0), fill_val, &temp, 0 );
2702 else if( border_mode == CV_TS_BORDER_REPLICATE || a->rows == 1 )
2704 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y - 1, a->cols, 1 ));
2705 for( i = ofs.y; i < b->rows; i++ )
2707 cvTsSelect( b, &temp2, cvRect( ofs.x, i, a->cols, 1 ));
2708 cvTsCopy( &temp, &temp2, 0 );
2713 j = a->rows - 2; dir = -1;
2714 for( i = ofs.y; i < b->rows; i++ )
2716 cvTsSelect( b, &temp, cvRect( ofs.x, ofs.y-a->rows+j, a->cols, 1 ));
2717 cvTsSelect( b, &temp2, cvRect( ofs.x, i, a->cols, 1 ));
2718 cvTsCopy( &temp, &temp2, 0 );
2719 if( (unsigned)(j + dir) >= (unsigned)a->rows )
2728 if( border_mode == CV_TS_BORDER_FILL )
2730 cvTsSelect( b, &temp, cvRect( 0, 0, ofs.x, b->rows ));
2731 cvTsAdd( 0, cvScalar(0), 0, cvScalar(0), fill_val, &temp, 0 );
2733 else if( border_mode == CV_TS_BORDER_REPLICATE || a->cols == 1 )
2735 cvTsSelect( b, &temp, cvRect( ofs.x, 0, 1, b->rows ));
2736 for( i = ofs.x-1; i >= 0; i-- )
2738 cvTsSelect( b, &temp2, cvRect( i, 0, 1, b->rows ));
2739 cvTsCopy( &temp, &temp2, 0 );
2742 else if( border_mode == CV_TS_BORDER_REFLECT )
2745 for( i = ofs.x-1; i >= 0; i-- )
2747 cvTsSelect( b, &temp, cvRect( ofs.x+j, 0, 1, b->rows ));
2748 cvTsSelect( b, &temp2, cvRect( i, 0, 1, b->rows ));
2749 cvTsCopy( &temp, &temp2, 0 );
2750 if( (unsigned)(j + dir) >= (unsigned)a->cols )
2758 if( ofs.x < b->cols )
2760 if( border_mode == CV_TS_BORDER_FILL )
2762 cvTsSelect( b, &temp, cvRect( ofs.x, 0, b->cols - ofs.x, b->rows ));
2763 cvTsAdd( 0, cvScalar(0), 0, cvScalar(0), fill_val, &temp, 0 );
2765 else if( border_mode == CV_TS_BORDER_REPLICATE || a->cols == 1 )
2767 cvTsSelect( b, &temp, cvRect( ofs.x-1, 0, 1, b->rows ));
2768 for( i = ofs.x; i < b->cols; i++ )
2770 cvTsSelect( b, &temp2, cvRect( i, 0, 1, b->rows ));
2771 cvTsCopy( &temp, &temp2, 0 );
2774 else if( border_mode == CV_TS_BORDER_REFLECT )
2776 j = a->cols - 2; dir = -1;
2777 for( i = ofs.x; i < b->cols; i++ )
2779 cvTsSelect( b, &temp, cvRect( ofs.x-a->cols+j, 0, 1, b->rows ));
2780 cvTsSelect( b, &temp2, cvRect( i, 0, 1, b->rows ));
2781 cvTsCopy( &temp, &temp2, 0 );
2782 if( (unsigned)(j + dir) >= (unsigned)a->cols )
2791 void cvTsConvolve2D( const CvMat* a, CvMat* b, const CvMat* kernel, CvPoint anchor )
2794 int cn, ncols, a_step;
2795 int ker_size = kernel->rows*kernel->cols;
2796 int* offset = (int*)malloc( ker_size*sizeof(offset[0]));
2797 float* k_data = (float*)malloc( ker_size*sizeof(k_data[0]));
2799 float first = kernel->data.fl[0];
2800 uchar *a_data, *b_data;
2802 cn = CV_MAT_CN(a->type);
2804 a_step = a->step / CV_ELEM_SIZE(a->type & ~CV_MAT_CN_MASK);
2806 assert( a->cols == b->cols + kernel->cols - 1 &&
2807 a->rows == b->rows + kernel->rows - 1 && CV_ARE_TYPES_EQ( a, b ) );
2808 assert( CV_MAT_TYPE(kernel->type) == CV_32FC1 );
2809 assert( 0 <= anchor.x && anchor.x < kernel->cols &&
2810 0 <= anchor.y && anchor.y < kernel->rows );
2812 for( i = 0, k = 0; i < kernel->rows; i++ )
2813 for( j = 0; j < kernel->cols; j++ )
2815 float f = ((float*)(kernel->data.ptr + kernel->step*i))[j];
2819 offset[k++] = (i - anchor.y)*a_step + (j - anchor.x)*cn;
2826 a_data = a->data.ptr + a->step*anchor.y + CV_ELEM_SIZE(a->type)*anchor.x;
2827 b_data = b->data.ptr;
2829 for( i = 0; i < b->rows; i++, a_data += a->step, b_data += b->step )
2831 switch( CV_MAT_DEPTH(a->type) )
2834 for( j = 0; j < ncols; j++ )
2838 for( k = 0; k < ker_size; k++ )
2839 s += ((uchar*)a_data)[j+offset[k]]*k_data[k];
2841 ((uchar*)b_data)[j] = CV_CAST_8U(val);
2845 for( j = 0; j < ncols; j++ )
2849 for( k = 0; k < ker_size; k++ )
2850 s += ((char*)a_data)[j+offset[k]]*k_data[k];
2852 ((char*)b_data)[j] = CV_CAST_8S(val);
2856 for( j = 0; j < ncols; j++ )
2860 for( k = 0; k < ker_size; k++ )
2861 s += ((ushort*)a_data)[j+offset[k]]*k_data[k];
2863 ((ushort*)b_data)[j] = CV_CAST_16U(val);
2867 for( j = 0; j < ncols; j++ )
2871 for( k = 0; k < ker_size; k++ )
2872 s += ((short*)a_data)[j+offset[k]]*k_data[k];
2874 ((short*)b_data)[j] = CV_CAST_16S(val);
2878 for( j = 0; j < ncols; j++ )
2881 for( k = 0; k < ker_size; k++ )
2882 s += ((int*)a_data)[j+offset[k]]*k_data[k];
2883 ((int*)b_data)[j] = cvRound(s);
2889 for( j = 0; j < ncols; j++ )
2892 for( k = 0; k < ker_size; k++ )
2893 s += (double)((float*)a_data)[j+offset[k]]*k_data[k];
2894 ((float*)b_data)[j] = (float)s;
2899 // special branch to speedup feature selection and blur tests
2900 for( j = 0; j < ncols; j++ )
2903 for( k = 0; k < ker_size; k++ )
2904 s += (double)((float*)a_data)[j+offset[k]];
2905 ((float*)b_data)[j] = (float)(s*first);
2910 for( j = 0; j < ncols; j++ )
2913 for( k = 0; k < ker_size; k++ )
2914 s += ((double*)a_data)[j+offset[k]]*k_data[k];
2915 ((double*)b_data)[j] = (double)s;
2928 void cvTsMinMaxFilter( const CvMat* a, CvMat* b, const IplConvKernel* kernel, int op_type )
2931 int cn, ncols, a_step;
2932 int ker_size = kernel->nRows*kernel->nCols;
2933 int* offset = (int*)malloc( ker_size*sizeof(offset[0]));
2934 int calc_max = op_type == CV_TS_MAX;
2935 uchar *a_data, *b_data;
2937 cn = CV_MAT_CN(a->type);
2939 a_step = a->step / CV_ELEM_SIZE(a->type & ~CV_MAT_CN_MASK);
2941 assert( a->cols == b->cols + kernel->nCols - 1 &&
2942 a->rows == b->rows + kernel->nRows - 1 && CV_ARE_TYPES_EQ( a, b ) );
2943 assert( 0 <= kernel->anchorX && kernel->anchorX < kernel->nCols &&
2944 0 <= kernel->anchorY && kernel->anchorY < kernel->nRows );
2946 for( i = 0, k = 0; i < kernel->nRows; i++ )
2947 for( j = 0; j < kernel->nCols; j++ )
2949 if( !kernel->values || kernel->values[i*kernel->nCols + j] )
2950 offset[k++] = (i - kernel->anchorY)*a_step + (j - kernel->anchorX)*cn;
2958 a_data = a->data.ptr + kernel->anchorY*a->step + kernel->anchorX*CV_ELEM_SIZE(a->type);
2959 b_data = b->data.ptr;
2961 for( i = 0; i < b->rows; i++, a_data += a->step, b_data += b->step )
2963 switch( CV_MAT_DEPTH(a->type) )
2966 for( j = 0; j < ncols; j++ )
2968 int m = ((uchar*)a_data)[j+offset[0]];
2969 for( k = 1; k < ker_size; k++ )
2971 int v = ((uchar*)a_data)[j+offset[k]];
2980 ((uchar*)b_data)[j] = (uchar)m;
2984 for( j = 0; j < ncols; j++ )
2986 int m = ((ushort*)a_data)[j+offset[0]];
2987 for( k = 1; k < ker_size; k++ )
2989 int v = ((ushort*)a_data)[j+offset[k]];
2998 ((ushort*)b_data)[j] = (ushort)m;
3002 for( j = 0; j < ncols; j++ )
3004 int m = ((short*)a_data)[j+offset[0]];
3005 for( k = 1; k < ker_size; k++ )
3007 int v = ((short*)a_data)[j+offset[k]];
3016 ((short*)b_data)[j] = (short)m;
3020 for( j = 0; j < ncols; j++ )
3022 int m = ((int*)a_data)[j+offset[0]];
3023 for( k = 1; k < ker_size; k++ )
3025 int v = ((int*)a_data)[j+offset[k]];
3034 ((int*)b_data)[j] = m;
3038 for( j = 0; j < ncols; j++ )
3040 float m = ((float*)a_data)[j+offset[0]];
3041 for( k = 1; k < ker_size; k++ )
3043 float v = ((float*)a_data)[j+offset[k]];
3052 ((float*)b_data)[j] = (float)m;
3056 for( j = 0; j < ncols; j++ )
3058 double m = ((double*)a_data)[j+offset[0]];
3059 for( k = 1; k < ker_size; k++ )
3061 double v = ((double*)a_data)[j+offset[k]];
3070 ((double*)b_data)[j] = (double)m;
3082 double cvTsCrossCorr( const CvMat* a, const CvMat* b )
3088 cn = CV_MAT_CN(a->type);
3091 assert( CV_ARE_SIZES_EQ( a, b ) && CV_ARE_TYPES_EQ( a, b ) );
3092 for( i = 0; i < a->rows; i++ )
3094 uchar* a_data = a->data.ptr + a->step*i;
3095 uchar* b_data = b->data.ptr + b->step*i;
3097 switch( CV_MAT_DEPTH(a->type) )
3100 for( j = 0; j < ncols; j++ )
3101 s += ((uchar*)a_data)[j]*((uchar*)b_data)[j];
3104 for( j = 0; j < ncols; j++ )
3105 s += ((char*)a_data)[j]*((char*)b_data)[j];
3108 for( j = 0; j < ncols; j++ )
3109 s += (double)((ushort*)a_data)[j]*((ushort*)b_data)[j];
3112 for( j = 0; j < ncols; j++ )
3113 s += ((short*)a_data)[j]*((short*)b_data)[j];
3116 for( j = 0; j < ncols; j++ )
3117 s += ((double)((int*)a_data)[j])*((int*)b_data)[j];
3120 for( j = 0; j < ncols; j++ )
3121 s += ((double)((float*)a_data)[j])*((float*)b_data)[j];
3124 for( j = 0; j < ncols; j++ )
3125 s += ((double*)a_data)[j]*((double*)b_data)[j];
3137 void cvTsTransform( const CvMat* a, CvMat* b, const CvMat* transmat, const CvMat* shift )
3139 int i, j, k, cols, dst_cols;
3140 int cn, dst_cn, depth, mat_depth, shiftstep;
3141 double mat[20], *buf, *dst_buf;
3143 cn = CV_MAT_CN(a->type);
3144 dst_cn = CV_MAT_CN(b->type);
3145 depth = CV_MAT_DEPTH(a->type);
3146 mat_depth = CV_MAT_DEPTH(transmat->type);
3147 cols = transmat->cols;
3149 // prepare cn x (cn + 1) transform matrix
3150 if( mat_depth == CV_32F )
3152 shiftstep = shift && shift->step ? shift->step/sizeof(float) : 1;
3153 for( i = 0; i < transmat->rows; i++ )
3155 mat[i*(cn+1) + cn] = 0.;
3156 for( j = 0; j < cols; j++ )
3157 mat[i*(cn+1) + j] = ((float*)(transmat->data.ptr + transmat->step*i))[j];
3159 mat[i*(cn+1) + cn] = shift->data.fl[i*shiftstep];
3164 assert( mat_depth == CV_64F );
3166 shiftstep = shift && shift->step ? shift->step/sizeof(double) : 1;
3167 for( i = 0; i < transmat->rows; i++ )
3169 mat[i*(cn+1) + cn] = 0.;
3170 for( j = 0; j < cols; j++ )
3171 mat[i*(cn+1) + j] = ((double*)(transmat->data.ptr + transmat->step*i))[j];
3173 mat[i*(cn+1) + cn] = shift->data.db[i*shiftstep];
3178 cols = a->cols * cn;
3179 dst_cols = a->cols * dst_cn;
3180 buf = (double*)cvStackAlloc( cols * sizeof(double) );
3181 dst_buf = (double*)cvStackAlloc( dst_cols * sizeof(double) );
3183 for( i = 0; i < a->rows; i++ )
3185 uchar* src = a->data.ptr + i*a->step;
3186 uchar* dst = b->data.ptr + i*b->step;
3187 double* _dst = dst_buf;
3192 for( j = 0; j < cols; j++ )
3193 buf[j] = ((uchar*)src)[j];
3196 for( j = 0; j < cols; j++ )
3197 buf[j] = ((ushort*)src)[j];
3200 for( j = 0; j < cols; j++ )
3201 buf[j] = ((short*)src)[j];
3204 for( j = 0; j < cols; j++ )
3205 buf[j] = ((int*)src)[j];
3208 for( j = 0; j < cols; j++ )
3209 buf[j] = ((float*)src)[j];
3212 for( j = 0; j < cols; j++ )
3213 buf[j] = ((double*)src)[j];
3222 for( j = 0; j < cols; j++, _dst += dst_cn )
3223 for( k = 0; k < dst_cn; k++ )
3224 _dst[k] = buf[j]*mat[2*k] + mat[2*k+1];
3227 for( j = 0; j < cols; j += 2, _dst += dst_cn )
3228 for( k = 0; k < dst_cn; k++ )
3229 _dst[k] = buf[j]*mat[3*k] + buf[j+1]*mat[3*k+1] + mat[3*k+2];
3232 for( j = 0; j < cols; j += 3, _dst += dst_cn )
3233 for( k = 0; k < dst_cn; k++ )
3234 _dst[k] = buf[j]*mat[4*k] + buf[j+1]*mat[4*k+1] +
3235 buf[j+2]*mat[4*k+2] + mat[4*k+3];
3238 for( j = 0; j < cols; j += 4, _dst += dst_cn )
3239 for( k = 0; k < dst_cn; k++ )
3240 _dst[k] = buf[j]*mat[5*k] + buf[j+1]*mat[5*k+1] +
3241 buf[j+2]*mat[5*k+2] + buf[j+3]*mat[5*k+3] + mat[5*k+4];
3250 for( j = 0; j < dst_cols; j++ )
3252 int val = cvRound(dst_buf[j]);
3253 ((uchar*)dst)[j] = CV_CAST_8U(val);
3257 for( j = 0; j < dst_cols; j++ )
3259 int val = cvRound(dst_buf[j]);
3260 ((ushort*)dst)[j] = CV_CAST_16U(val);
3264 for( j = 0; j < dst_cols; j++ )
3266 int val = cvRound(dst_buf[j]);
3267 ((short*)dst)[j] = CV_CAST_16S(val);
3271 for( j = 0; j < dst_cols; j++ )
3272 ((int*)dst)[j] = cvRound(dst_buf[j]);
3275 for( j = 0; j < dst_cols; j++ )
3276 ((float*)dst)[j] = (float)dst_buf[j];
3279 for( j = 0; j < dst_cols; j++ )
3280 ((double*)dst)[j] = dst_buf[j];
3289 CvMat* cvTsTranspose( const CvMat* a, CvMat* b )
3291 int i, j, k, rows, cols, elem_size;
3292 uchar *a_data, *b_data;
3295 elem_size = CV_ELEM_SIZE(a->type);
3299 assert( a->rows == b->cols && a->cols == b->rows && CV_ARE_TYPES_EQ(a,b) );
3300 a_data = a->data.ptr;
3302 b_data = b->data.ptr;
3307 for( i = 0; i < rows; i++ )
3309 for( j = 0; j <= i; j++ )
3311 uchar* a_ij = a_data + a_step*i + elem_size*j;
3312 uchar* a_ji = a_data + a_step*j + elem_size*i;
3313 uchar* b_ij = b_data + b_step*i + elem_size*j;
3314 uchar* b_ji = b_data + b_step*j + elem_size*i;
3315 for( k = 0; k < elem_size; k++ )
3327 for( i = 0; i < cols; i++ )
3329 for( j = 0; j < rows; j++ )
3331 uchar* a_ji = a_data + a_step*j + elem_size*i;
3332 uchar* b_ij = b_data + b_step*i + elem_size*j;
3333 for( k = 0; k < elem_size; k++ )
3343 void cvTsFlip( const CvMat* a, CvMat* b, int flip_type )
3345 int i, j, k, rows, cols, elem_size;
3346 uchar *a_data, *b_data;
3349 elem_size = CV_ELEM_SIZE(a->type);
3351 cols = a->cols*elem_size;
3353 assert( CV_ARE_SIZES_EQ(a,b) && CV_ARE_TYPES_EQ(a,b) && a->data.ptr != b->data.ptr );
3354 a_data = a->data.ptr;
3356 b_data = b->data.ptr;
3359 if( flip_type <= 0 )
3361 a_data += a_step*(rows-1);
3365 for( i = 0; i < rows; i++ )
3367 if( flip_type == 0 )
3368 memcpy( b_data, a_data, cols );
3371 for( j = 0; j < cols; j += elem_size )
3372 for( k = 0; k < elem_size; k++ )
3373 b_data[j+k] = a_data[cols - elem_size - j + k];
3381 void cvTsPatchZeros( CvMat* mat, double level )
3383 int i, j, ncols = mat->cols * CV_MAT_CN(mat->type);
3385 for( i = 0; i < mat->rows; i++ )
3387 switch( CV_MAT_DEPTH(mat->type) )
3391 float* data = (float*)(mat->data.ptr + i*mat->step);
3392 for( j = 0; j < ncols; j++ )
3393 if( fabs(data[j]) < level )
3399 double* data = (double*)(mat->data.ptr + i*mat->step);
3400 for( j = 0; j < ncols; j++ )
3401 if( fabs(data[j]) < level )