1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
48 /****************************************************************************************\
50 \****************************************************************************************/
53 GEMM_CopyBlock( const uchar* src, size_t src_step,
54 uchar* dst, size_t dst_step,
55 Size size, size_t pix_size )
58 size.width *= (int)(pix_size / sizeof(int));
60 for( ; size.height--; src += src_step, dst += dst_step )
62 for( j = 0; j <= size.width - 4; j += 4 )
64 int t0 = ((const int*)src)[j];
65 int t1 = ((const int*)src)[j+1];
67 ((int*)dst)[j+1] = t1;
68 t0 = ((const int*)src)[j+2];
69 t1 = ((const int*)src)[j+3];
70 ((int*)dst)[j+2] = t0;
71 ((int*)dst)[j+3] = t1;
74 for( ; j < size.width; j++ )
75 ((int*)dst)[j] = ((const int*)src)[j];
81 GEMM_TransposeBlock( const uchar* src, size_t src_step,
82 uchar* dst, size_t dst_step,
83 Size size, size_t pix_size )
86 for( i = 0; i < size.width; i++, dst += dst_step, src += pix_size )
88 const uchar* _src = src;
92 for( j = 0; j < size.height; j++, _src += src_step )
93 ((int*)dst)[j] = ((int*)_src)[0];
96 for( j = 0; j < size.height*2; j += 2, _src += src_step )
98 int t0 = ((int*)_src)[0];
99 int t1 = ((int*)_src)[1];
101 ((int*)dst)[j+1] = t1;
105 for( j = 0; j < size.height*4; j += 4, _src += src_step )
107 int t0 = ((int*)_src)[0];
108 int t1 = ((int*)_src)[1];
110 ((int*)dst)[j+1] = t1;
111 t0 = ((int*)_src)[2];
112 t1 = ((int*)_src)[3];
113 ((int*)dst)[j+2] = t0;
114 ((int*)dst)[j+3] = t1;
125 template<typename T, typename WT> static void
126 GEMMSingleMul( const T* a_data, size_t a_step,
127 const T* b_data, size_t b_step,
128 const T* c_data, size_t c_step,
129 T* d_data, size_t d_step,
130 Size a_size, Size d_size,
131 double alpha, double beta, int flags )
133 int i, j, k, n = a_size.width, m = d_size.width, drows = d_size.height;
134 const T *_a_data = a_data, *_b_data = b_data, *_c_data = c_data;
136 size_t a_step0, a_step1, c_step0, c_step1, t_step;
138 a_step /= sizeof(a_data[0]);
139 b_step /= sizeof(b_data[0]);
140 c_step /= sizeof(c_data[0]);
141 d_step /= sizeof(d_data[0]);
146 c_step0 = c_step1 = 0;
147 else if( !(flags & GEMM_3_T) )
148 c_step0 = c_step, c_step1 = 1;
150 c_step0 = 1, c_step1 = c_step;
152 if( flags & GEMM_1_T )
154 CV_SWAP( a_step0, a_step1, t_step );
156 if( a_step > 1 && n > 1 )
157 a_buf = (T*)cvStackAlloc(n*sizeof(a_data[0]));
160 if( n == 1 ) /* external product */
164 if( a_step > 1 && a_size.height > 1 )
166 a_buf = (T*)cvStackAlloc(drows*sizeof(a_data[0]));
167 for( k = 0; k < drows; k++ )
168 a_buf[k] = a_data[a_step*k];
174 b_buf = (T*)cvStackAlloc(d_size.width*sizeof(b_buf[0]) );
175 for( j = 0; j < d_size.width; j++ )
176 b_buf[j] = b_data[j*b_step];
180 for( i = 0; i < drows; i++, _c_data += c_step0, d_data += d_step )
182 WT al = WT(a_data[i])*alpha;
184 for( j = 0; j <= d_size.width - 2; j += 2, c_data += 2*c_step1 )
186 WT s0 = al*WT(b_data[j]);
187 WT s1 = al*WT(b_data[j+1]);
195 d_data[j] = T(s0 + WT(c_data[0])*beta);
196 d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta);
200 for( ; j < d_size.width; j++, c_data += c_step1 )
202 WT s0 = al*WT(b_data[j]);
206 d_data[j] = T(s0 + WT(c_data[0])*beta);
210 else if( flags & GEMM_2_T ) /* A * Bt */
212 for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
220 for( k = 0; k < n; k++ )
221 a_buf[k] = a_data[a_step1*k];
225 for( j = 0; j < d_size.width; j++, b_data += b_step,
228 WT s0(0), s1(0), s2(0), s3(0);
230 for( k = 0; k <= n - 4; k += 4 )
232 s0 += WT(a_data[k])*WT(b_data[k]);
233 s1 += WT(a_data[k+1])*WT(b_data[k+1]);
234 s2 += WT(a_data[k+2])*WT(b_data[k+2]);
235 s3 += WT(a_data[k+3])*WT(b_data[k+3]);
239 s0 += WT(a_data[k])*WT(b_data[k]);
240 s0 = (s0+s1+s2+s3)*alpha;
245 d_data[j] = T(s0 + WT(c_data[0])*beta);
249 else if( d_size.width*sizeof(d_data[0]) <= 1600 )
251 for( i = 0; i < drows; i++, _a_data += a_step0,
255 a_data = _a_data, c_data = _c_data;
259 for( k = 0; k < n; k++ )
260 a_buf[k] = a_data[a_step1*k];
264 for( j = 0; j <= m - 4; j += 4, c_data += 4*c_step1 )
266 const T* b = _b_data + j;
267 WT s0(0), s1(0), s2(0), s3(0);
269 for( k = 0; k < n; k++, b += b_step )
272 s0 += a * WT(b[0]); s1 += a * WT(b[1]);
273 s2 += a * WT(b[2]); s3 += a * WT(b[3]);
278 d_data[j] = T(s0*alpha);
279 d_data[j+1] = T(s1*alpha);
280 d_data[j+2] = T(s2*alpha);
281 d_data[j+3] = T(s3*alpha);
285 s0 = s0*alpha; s1 = s1*alpha;
286 s2 = s2*alpha; s3 = s3*alpha;
287 d_data[j] = T(s0 + WT(c_data[0])*beta);
288 d_data[j+1] = T(s1 + WT(c_data[c_step1])*beta);
289 d_data[j+2] = T(s2 + WT(c_data[c_step1*2])*beta);
290 d_data[j+3] = T(s3 + WT(c_data[c_step1*3])*beta);
294 for( ; j < m; j++, c_data += c_step1 )
296 const T* b = _b_data + j;
299 for( k = 0; k < n; k++, b += b_step )
300 s0 += WT(a_data[k]) * WT(b[0]);
306 d_data[j] = T(s0 + WT(c_data[0])*beta);
312 WT* d_buf = (WT*)cvStackAlloc(m*sizeof(d_buf[0]));
314 for( i = 0; i < drows; i++, _a_data += a_step0, _c_data += c_step0, d_data += d_step )
322 for( k = 0; k < n; k++ )
323 a_buf[k] = _a_data[a_step1*k];
327 for( j = 0; j < m; j++ )
330 for( k = 0; k < n; k++, b_data += b_step )
334 for( j = 0; j <= m - 4; j += 4 )
336 WT t0 = d_buf[j] + WT(b_data[j])*al;
337 WT t1 = d_buf[j+1] + WT(b_data[j+1])*al;
340 t0 = d_buf[j+2] + WT(b_data[j+2])*al;
341 t1 = d_buf[j+3] + WT(b_data[j+3])*al;
347 d_buf[j] += WT(b_data[j])*al;
351 for( j = 0; j < m; j++ )
352 d_data[j] = T(d_buf[j]*alpha);
354 for( j = 0; j < m; j++, c_data += c_step1 )
356 WT t = d_buf[j]*alpha;
357 d_data[j] = T(t + WT(c_data[0])*beta);
364 template<typename T, typename WT> static void
365 GEMMBlockMul( const T* a_data, size_t a_step,
366 const T* b_data, size_t b_step,
367 WT* d_data, size_t d_step,
368 Size a_size, Size d_size, int flags )
370 int i, j, k, n = a_size.width, m = d_size.width;
371 const T *_a_data = a_data, *_b_data = b_data;
373 size_t a_step0, a_step1, t_step;
374 int do_acc = flags & 16;
376 a_step /= sizeof(a_data[0]);
377 b_step /= sizeof(b_data[0]);
378 d_step /= sizeof(d_data[0]);
383 if( flags & GEMM_1_T )
385 CV_SWAP( a_step0, a_step1, t_step );
387 a_buf = (T*)cvStackAlloc(n*sizeof(a_data[0]));
390 if( flags & GEMM_2_T )
392 /* second operand is transposed */
393 for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step )
395 a_data = _a_data; b_data = _b_data;
399 for( k = 0; k < n; k++ )
400 a_buf[k] = a_data[a_step1*k];
404 for( j = 0; j < d_size.width; j++, b_data += b_step )
406 WT s0 = do_acc ? d_data[j]:WT(0), s1(0);
407 for( k = 0; k <= n - 2; k += 2 )
409 s0 += WT(a_data[k])*WT(b_data[k]);
410 s1 += WT(a_data[k+1])*WT(b_data[k+1]);
414 s0 += WT(a_data[k])*WT(b_data[k]);
422 for( i = 0; i < d_size.height; i++, _a_data += a_step0, d_data += d_step )
424 a_data = _a_data, b_data = _b_data;
428 for( k = 0; k < n; k++ )
429 a_buf[k] = a_data[a_step1*k];
433 for( j = 0; j <= m - 4; j += 4 )
436 const T* b = b_data + j;
440 s0 = d_data[j]; s1 = d_data[j+1];
441 s2 = d_data[j+2]; s3 = d_data[j+3];
444 s0 = s1 = s2 = s3 = WT(0);
446 for( k = 0; k < n; k++, b += b_step )
449 s0 += a * WT(b[0]); s1 += a * WT(b[1]);
450 s2 += a * WT(b[2]); s3 += a * WT(b[3]);
453 d_data[j] = s0; d_data[j+1] = s1;
454 d_data[j+2] = s2; d_data[j+3] = s3;
459 const T* b = b_data + j;
460 WT s0 = do_acc ? d_data[j] : WT(0);
462 for( k = 0; k < n; k++, b += b_step )
463 s0 += WT(a_data[k]) * WT(b[0]);
472 template<typename T, typename WT> static void
473 GEMMStore( const T* c_data, size_t c_step,
474 const WT* d_buf, size_t d_buf_step,
475 T* d_data, size_t d_step, Size d_size,
476 double alpha, double beta, int flags )
478 const T* _c_data = c_data;
480 size_t c_step0, c_step1;
482 c_step /= sizeof(c_data[0]);
483 d_buf_step /= sizeof(d_buf[0]);
484 d_step /= sizeof(d_data[0]);
487 c_step0 = c_step1 = 0;
488 else if( !(flags & GEMM_3_T) )
489 c_step0 = c_step, c_step1 = 1;
491 c_step0 = 1, c_step1 = c_step;
493 for( ; d_size.height--; _c_data += c_step0, d_buf += d_buf_step, d_data += d_step )
498 for( j = 0; j <= d_size.width - 4; j += 4, c_data += 4*c_step1 )
500 WT t0 = alpha*d_buf[j];
501 WT t1 = alpha*d_buf[j+1];
502 t0 += beta*WT(c_data[0]);
503 t1 += beta*WT(c_data[c_step1]);
506 t0 = alpha*d_buf[j+2];
507 t1 = alpha*d_buf[j+3];
508 t0 += beta*WT(c_data[c_step1*2]);
509 t1 += beta*WT(c_data[c_step1*3]);
513 for( ; j < d_size.width; j++, c_data += c_step1 )
515 WT t0 = alpha*d_buf[j];
516 d_data[j] = T(t0 + WT(c_data[0])*beta);
521 for( j = 0; j <= d_size.width - 4; j += 4 )
523 WT t0 = alpha*d_buf[j];
524 WT t1 = alpha*d_buf[j+1];
527 t0 = alpha*d_buf[j+2];
528 t1 = alpha*d_buf[j+3];
532 for( ; j < d_size.width; j++ )
533 d_data[j] = T(alpha*d_buf[j]);
539 typedef void (*GEMMSingleMulFunc)( const void* src1, size_t step1,
540 const void* src2, size_t step2, const void* src3, size_t step3,
541 void* dst, size_t dststep, Size srcsize, Size dstsize,
542 double alpha, double beta, int flags );
544 typedef void (*GEMMBlockMulFunc)( const void* src1, size_t step1,
545 const void* src2, size_t step2, void* dst, size_t dststep,
546 Size srcsize, Size dstsize, int flags );
548 typedef void (*GEMMStoreFunc)( const void* src1, size_t step1,
549 const void* src2, size_t step2, void* dst, size_t dststep,
550 Size dstsize, double alpha, double beta, int flags );
552 static void GEMMSingleMul_32f( const float* a_data, size_t a_step,
553 const float* b_data, size_t b_step,
554 const float* c_data, size_t c_step,
555 float* d_data, size_t d_step,
556 Size a_size, Size d_size,
557 double alpha, double beta, int flags )
559 GEMMSingleMul<float,double>(a_data, a_step, b_data, b_step, c_data,
560 c_step, d_data, d_step, a_size, d_size,
564 static void GEMMSingleMul_64f( const double* a_data, size_t a_step,
565 const double* b_data, size_t b_step,
566 const double* c_data, size_t c_step,
567 double* d_data, size_t d_step,
568 Size a_size, Size d_size,
569 double alpha, double beta, int flags )
571 GEMMSingleMul<double,double>(a_data, a_step, b_data, b_step, c_data,
572 c_step, d_data, d_step, a_size, d_size,
577 static void GEMMSingleMul_32fc( const Complexf* a_data, size_t a_step,
578 const Complexf* b_data, size_t b_step,
579 const Complexf* c_data, size_t c_step,
580 Complexf* d_data, size_t d_step,
581 Size a_size, Size d_size,
582 double alpha, double beta, int flags )
584 GEMMSingleMul<Complexf,Complexd>(a_data, a_step, b_data, b_step, c_data,
585 c_step, d_data, d_step, a_size, d_size,
589 static void GEMMSingleMul_64fc( const Complexd* a_data, size_t a_step,
590 const Complexd* b_data, size_t b_step,
591 const Complexd* c_data, size_t c_step,
592 Complexd* d_data, size_t d_step,
593 Size a_size, Size d_size,
594 double alpha, double beta, int flags )
596 GEMMSingleMul<Complexd,Complexd>(a_data, a_step, b_data, b_step, c_data,
597 c_step, d_data, d_step, a_size, d_size,
601 static void GEMMBlockMul_32f( const float* a_data, size_t a_step,
602 const float* b_data, size_t b_step,
603 double* d_data, size_t d_step,
604 Size a_size, Size d_size, int flags )
606 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
610 static void GEMMBlockMul_64f( const double* a_data, size_t a_step,
611 const double* b_data, size_t b_step,
612 double* d_data, size_t d_step,
613 Size a_size, Size d_size, int flags )
615 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
619 static void GEMMBlockMul_32fc( const Complexf* a_data, size_t a_step,
620 const Complexf* b_data, size_t b_step,
621 Complexd* d_data, size_t d_step,
622 Size a_size, Size d_size, int flags )
624 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
628 static void GEMMBlockMul_64fc( const Complexd* a_data, size_t a_step,
629 const Complexd* b_data, size_t b_step,
630 Complexd* d_data, size_t d_step,
631 Size a_size, Size d_size, int flags )
633 GEMMBlockMul(a_data, a_step, b_data, b_step, d_data, d_step, a_size, d_size, flags);
637 static void GEMMStore_32f( const float* c_data, size_t c_step,
638 const double* d_buf, size_t d_buf_step,
639 float* d_data, size_t d_step, Size d_size,
640 double alpha, double beta, int flags )
642 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
646 static void GEMMStore_64f( const double* c_data, size_t c_step,
647 const double* d_buf, size_t d_buf_step,
648 double* d_data, size_t d_step, Size d_size,
649 double alpha, double beta, int flags )
651 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
655 static void GEMMStore_32fc( const Complexf* c_data, size_t c_step,
656 const Complexd* d_buf, size_t d_buf_step,
657 Complexf* d_data, size_t d_step, Size d_size,
658 double alpha, double beta, int flags )
660 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
664 static void GEMMStore_64fc( const Complexd* c_data, size_t c_step,
665 const Complexd* d_buf, size_t d_buf_step,
666 Complexd* d_data, size_t d_step, Size d_size,
667 double alpha, double beta, int flags )
669 GEMMStore(c_data, c_step, d_buf, d_buf_step, d_data, d_step, d_size, alpha, beta, flags);
673 void gemm( const Mat& _A, const Mat& _B, double alpha,
674 const Mat& _C, double beta, Mat& D, int flags )
676 const int block_lin_size = 128;
677 const int block_size = block_lin_size * block_lin_size;
679 static double zero[] = {0,0,0,0};
680 static float zerof[] = {0,0,0,0};
683 const Mat* C = _C.data && beta != 0 ? &_C : 0;
684 Size a_size = A.size(), d_size;
685 int i, len = 0, type = A.type();
687 CV_Assert( type == B.type() && (type == CV_32FC1 || type == CV_64FC1 || type == CV_32FC2 || type == CV_64FC2) );
689 switch( flags & (GEMM_1_T|GEMM_2_T) )
692 d_size = Size( B.cols, a_size.height );
694 CV_Assert( a_size.width == len );
697 d_size = Size( B.cols, a_size.width );
699 CV_Assert( a_size.height == len );
702 d_size = Size( B.rows, a_size.height );
704 CV_Assert( a_size.width == len );
707 d_size = Size( B.rows, a_size.width );
709 CV_Assert( a_size.height == len );
715 CV_Assert( C->type() == type &&
716 (((flags&GEMM_3_T) == 0 && C->rows == d_size.height && C->cols == d_size.width) ||
717 ((flags&GEMM_3_T) != 0 && C->rows == d_size.width && C->cols == d_size.height)));
718 if( (flags & GEMM_3_T) != 0 && C->data == D.data )
725 D.create( d_size.height, d_size.width, type );
727 if( flags == 0 && 2 <= len && len <= 4 && (len == d_size.width || len == d_size.height) )
731 float* d = (float*)D.data;
732 const float *a = (const float*)A.data,
733 *b = (const float*)B.data,
734 *c = (const float*)(C ? C->data : 0);
735 size_t d_step = D.step/sizeof(d[0]),
736 a_step = A.step/sizeof(a[0]),
737 b_step = B.step/sizeof(b[0]),
738 c_step = C ? C->step/sizeof(c[0]) : 0;
746 if( len == d_size.width && b != d )
748 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
750 float t0 = a[0]*b[0] + a[1]*b[b_step];
751 float t1 = a[0]*b[1] + a[1]*b[b_step+1];
752 d[0] = (float)(t0*alpha + c[0]*beta);
753 d[1] = (float)(t1*alpha + c[1]*beta);
765 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
767 float t0 = a[0]*b[0] + a[1]*b[b_step];
768 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
769 d[0] = (float)(t0*alpha + c[0]*beta);
770 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
777 if( len == d_size.width && b != d )
779 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
781 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
782 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
783 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
784 d[0] = (float)(t0*alpha + c[0]*beta);
785 d[1] = (float)(t1*alpha + c[1]*beta);
786 d[2] = (float)(t2*alpha + c[2]*beta);
798 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
800 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
801 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
802 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
804 d[0] = (float)(t0*alpha + c[0]*beta);
805 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
806 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
813 if( len == d_size.width && b != d )
815 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
817 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
818 float t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
819 float t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
820 float t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
821 d[0] = (float)(t0*alpha + c[0]*beta);
822 d[1] = (float)(t1*alpha + c[1]*beta);
823 d[2] = (float)(t2*alpha + c[2]*beta);
824 d[3] = (float)(t3*alpha + c[3]*beta);
827 else if( len <= 16 && a != d )
836 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
838 float t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
839 float t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
840 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
841 float t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
842 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
843 float t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
844 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
845 d[0] = (float)(t0*alpha + c[0]*beta);
846 d[d_step] = (float)(t1*alpha + c[c_step]*beta);
847 d[d_step*2] = (float)(t2*alpha + c[c_step*2]*beta);
848 d[d_step*3] = (float)(t3*alpha + c[c_step*3]*beta);
859 double* d = (double*)D.data;
860 const double *a = (const double*)A.data,
861 *b = (const double*)B.data,
862 *c = (const double*)(C ? C->data : 0);
863 size_t d_step = D.step/sizeof(d[0]),
864 a_step = A.step/sizeof(a[0]),
865 b_step = B.step/sizeof(b[0]),
866 c_step = C ? C->step/sizeof(c[0]) : 0;
873 if( len == d_size.width && b != d )
875 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
877 double t0 = a[0]*b[0] + a[1]*b[b_step];
878 double t1 = a[0]*b[1] + a[1]*b[b_step+1];
879 d[0] = t0*alpha + c[0]*beta;
880 d[1] = t1*alpha + c[1]*beta;
892 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
894 double t0 = a[0]*b[0] + a[1]*b[b_step];
895 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step];
896 d[0] = t0*alpha + c[0]*beta;
897 d[d_step] = t1*alpha + c[c_step]*beta;
904 if( len == d_size.width && b != d )
906 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
908 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
909 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1];
910 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2];
911 d[0] = t0*alpha + c[0]*beta;
912 d[1] = t1*alpha + c[1]*beta;
913 d[2] = t2*alpha + c[2]*beta;
925 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
927 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2];
928 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] + a[a_step+2]*b[b_step*2];
929 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] + a[a_step*2+2]*b[b_step*2];
931 d[0] = t0*alpha + c[0]*beta;
932 d[d_step] = t1*alpha + c[c_step]*beta;
933 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
940 if( len == d_size.width && b != d )
942 for( i = 0; i < d_size.height; i++, d += d_step, a += a_step, c += c_step )
944 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
945 double t1 = a[0]*b[1] + a[1]*b[b_step+1] + a[2]*b[b_step*2+1] + a[3]*b[b_step*3+1];
946 double t2 = a[0]*b[2] + a[1]*b[b_step+2] + a[2]*b[b_step*2+2] + a[3]*b[b_step*3+2];
947 double t3 = a[0]*b[3] + a[1]*b[b_step+3] + a[2]*b[b_step*2+3] + a[3]*b[b_step*3+3];
948 d[0] = t0*alpha + c[0]*beta;
949 d[1] = t1*alpha + c[1]*beta;
950 d[2] = t2*alpha + c[2]*beta;
951 d[3] = t3*alpha + c[3]*beta;
954 else if( d_size.width <= 16 && a != d )
963 for( i = 0; i < d_size.width; i++, d++, b++, c += c_step0 )
965 double t0 = a[0]*b[0] + a[1]*b[b_step] + a[2]*b[b_step*2] + a[3]*b[b_step*3];
966 double t1 = a[a_step]*b[0] + a[a_step+1]*b[b_step] +
967 a[a_step+2]*b[b_step*2] + a[a_step+3]*b[b_step*3];
968 double t2 = a[a_step*2]*b[0] + a[a_step*2+1]*b[b_step] +
969 a[a_step*2+2]*b[b_step*2] + a[a_step*2+3]*b[b_step*3];
970 double t3 = a[a_step*3]*b[0] + a[a_step*3+1]*b[b_step] +
971 a[a_step*3+2]*b[b_step*2] + a[a_step*3+3]*b[b_step*3];
972 d[0] = t0*alpha + c[0]*beta;
973 d[d_step] = t1*alpha + c[c_step]*beta;
974 d[d_step*2] = t2*alpha + c[c_step*2]*beta;
975 d[d_step*3] = t3*alpha + c[c_step*3]*beta;
986 size_t b_step = B.step;
987 GEMMSingleMulFunc singleMulFunc;
988 GEMMBlockMulFunc blockMulFunc;
989 GEMMStoreFunc storeFunc;
991 const uchar* Cdata = C ? C->data : 0;
992 size_t Cstep = C ? C->step : 0;
993 AutoBuffer<uchar> buf;
995 if( type == CV_32FC1 )
997 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32f;
998 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32f;
999 storeFunc = (GEMMStoreFunc)GEMMStore_32f;
1001 else if( type == CV_64FC1 )
1003 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64f;
1004 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64f;
1005 storeFunc = (GEMMStoreFunc)GEMMStore_64f;
1007 else if( type == CV_32FC2 )
1009 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_32fc;
1010 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_32fc;
1011 storeFunc = (GEMMStoreFunc)GEMMStore_32fc;
1015 CV_Assert( type == CV_64FC2 );
1016 singleMulFunc = (GEMMSingleMulFunc)GEMMSingleMul_64fc;
1017 blockMulFunc = (GEMMBlockMulFunc)GEMMBlockMul_64fc;
1018 storeFunc = (GEMMStoreFunc)GEMMStore_64fc;
1021 if( D.data == A.data || D.data == B.data )
1023 buf.allocate(d_size.width*d_size.height*CV_ELEM_SIZE(type));
1024 tmat = Mat(d_size.height, d_size.width, type, (uchar*)buf );
1028 if( (d_size.width == 1 || len == 1) && !(flags & GEMM_2_T) && B.isContinuous() )
1030 b_step = d_size.width == 1 ? 0 : CV_ELEM_SIZE(type);
1034 /*if( (d_size.width | d_size.height | len) >= 16 && icvBLAS_GEMM_32f_p != 0 )
1036 blas_func = type == CV_32FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32f_p :
1037 type == CV_64FC1 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64f_p :
1038 type == CV_32FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_32fc_p :
1039 type == CV_64FC2 ? (icvBLAS_GEMM_32f_t)icvBLAS_GEMM_64fc_p : 0;
1044 const char* transa = flags & GEMM_1_T ? "t" : "n";
1045 const char* transb = flags & GEMM_2_T ? "t" : "n";
1050 if( C->data.ptr != D->data.ptr )
1052 if( !(flags & GEMM_3_T) )
1055 cvTranspose( C, D );
1059 if( CV_MAT_DEPTH(type) == CV_32F )
1061 Complex32f _alpha, _beta;
1063 lda = A->step/sizeof(float);
1064 ldb = b_step/sizeof(float);
1065 ldd = D->step/sizeof(float);
1066 _alpha.re = (float)alpha;
1068 _beta.re = C->data.ptr ? (float)beta : 0;
1070 if( CV_MAT_CN(type) == 2 )
1071 lda /= 2, ldb /= 2, ldd /= 2;
1073 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1074 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1075 &_beta, D->data.ptr, &ldd );
1079 CvComplex64f _alpha, _beta;
1081 lda = A->step/sizeof(double);
1082 ldb = b_step/sizeof(double);
1083 ldd = D->step/sizeof(double);
1086 _beta.re = C->data.ptr ? beta : 0;
1088 if( CV_MAT_CN(type) == 2 )
1089 lda /= 2, ldb /= 2, ldd /= 2;
1091 blas_func( transb, transa, &d_size.width, &d_size.height, &len,
1092 &_alpha, B->data.ptr, &ldb, A->data.ptr, &lda,
1093 &_beta, D->data.ptr, &ldd );
1096 else*/ if( ((d_size.height <= block_lin_size/2 || d_size.width <= block_lin_size/2) &&
1097 len <= 10000) || len <= 10 ||
1098 (d_size.width <= block_lin_size &&
1099 d_size.height <= block_lin_size && len <= block_lin_size) )
1101 singleMulFunc( A.data, A.step, B.data, b_step, Cdata, Cstep,
1102 _D->data, _D->step, a_size, d_size, alpha, beta, flags );
1106 int is_a_t = flags & GEMM_1_T;
1107 int is_b_t = flags & GEMM_2_T;
1108 int elem_size = CV_ELEM_SIZE(type);
1110 int a_buf_size = 0, b_buf_size, d_buf_size;
1114 int j, k, di = 0, dj = 0, dk = 0;
1116 size_t a_step0, a_step1, b_step0, b_step1, c_step0, c_step1;
1117 int work_elem_size = elem_size << (CV_MAT_DEPTH(type) == CV_32F ? 1 : 0);
1120 a_step0 = A.step, a_step1 = elem_size;
1122 a_step0 = elem_size, a_step1 = A.step;
1125 b_step0 = b_step, b_step1 = elem_size;
1127 b_step0 = elem_size, b_step1 = b_step;
1131 c_step0 = c_step1 = 0;
1134 else if( !(flags & GEMM_3_T) )
1135 c_step0 = C->step, c_step1 = elem_size;
1137 c_step0 = elem_size, c_step1 = C->step;
1139 dm0 = std::min( block_lin_size, d_size.height );
1140 dn0 = std::min( block_lin_size, d_size.width );
1141 dk0_1 = block_size / dm0;
1142 dk0_2 = block_size / dn0;
1143 dk0 = std::min( dk0_1, dk0_2 );
1144 dk0 = std::min( dk0, len );
1145 if( dk0*dm0 > block_size )
1146 dm0 = block_size / dk0;
1147 if( dk0*dn0 > block_size )
1148 dn0 = block_size / dk0;
1150 dk0_1 = (dn0+dn0/8+2) & -2;
1151 b_buf_size = (dk0+dk0/8+1)*dk0_1*elem_size;
1152 d_buf_size = (dk0+dk0/8+1)*dk0_1*work_elem_size;
1156 a_buf_size = (dm0+dm0/8+1)*((dk0+dk0/8+2)&-2)*elem_size;
1160 buf.allocate(a_buf_size + b_buf_size + d_buf_size);
1161 d_buf = (uchar*)buf;
1162 b_buf = d_buf + d_buf_size;
1165 a_buf = b_buf + b_buf_size;
1167 for( i = 0; i < d_size.height; i += di )
1170 if( i + di >= d_size.height || 8*(i + di) + di > 8*d_size.height )
1171 di = d_size.height - i;
1173 for( j = 0; j < d_size.width; j += dj )
1175 uchar* _d = _D->data + i*_D->step + j*elem_size;
1176 const uchar* _c = Cdata + i*c_step0 + j*c_step1;
1177 size_t _d_step = _D->step;
1180 if( j + dj >= d_size.width || 8*(j + dj) + dj > 8*d_size.width )
1181 dj = d_size.width - j;
1187 _d_step = dj*work_elem_size;
1190 for( k = 0; k < len; k += dk )
1192 const uchar* _a = A.data + i*a_step0 + k*a_step1;
1193 size_t _a_step = A.step;
1194 const uchar* _b = B.data + k*b_step0 + j*b_step1;
1195 size_t _b_step = b_step;
1199 if( k + dk >= len || 8*(k + dk) + dk > 8*len )
1203 a_bl_size.width = dk, a_bl_size.height = di;
1205 a_bl_size.width = di, a_bl_size.height = dk;
1207 if( a_buf && is_a_t )
1209 _a_step = dk*elem_size;
1210 GEMM_TransposeBlock( _a, A.step, a_buf, _a_step, a_bl_size, elem_size );
1211 std::swap( a_bl_size.width, a_bl_size.height );
1215 if( dj < d_size.width )
1219 b_size.width = dj, b_size.height = dk;
1221 b_size.width = dk, b_size.height = dj;
1223 _b_step = b_size.width*elem_size;
1224 GEMM_CopyBlock( _b, b_step, b_buf, _b_step, b_size, elem_size );
1229 blockMulFunc( _a, _a_step, _b, _b_step, _d, _d_step,
1230 a_bl_size, Size(dj,di), flags );
1232 singleMulFunc( _a, _a_step, _b, _b_step, _c, Cstep,
1233 _d, _d_step, a_bl_size, Size(dj,di), alpha, beta, flags );
1238 storeFunc( _c, Cstep, _d, _d_step,
1239 _D->data + i*_D->step + j*elem_size,
1240 _D->step, Size(dj,di), alpha, beta, flags );
1250 /****************************************************************************************\
1252 \****************************************************************************************/
1254 template<typename T, typename WT> static void
1255 transformC1_( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1257 Size size = getContinuousSize( srcmat, dstmat );
1258 const WT* m = (const WT*)tmat.data;
1259 int dst_cn = dstmat.channels();
1262 for( y = 0; y < size.height; y++ )
1264 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1265 T* dst = (T*)(dstmat.data + dstmat.step*y);
1268 for( k = 0; k < dst_cn; k++, dst++, _m += 2 )
1269 for( x = 0; x < size.width; x++ )
1270 dst[x*dst_cn] = saturate_cast<T>(_m[0]*src[x] + _m[1]);
1274 template<typename T, typename WT> static void
1275 transformC2_( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1277 Size size = getContinuousSize( srcmat, dstmat );
1278 const WT* m = (const WT*)tmat.data;
1279 int dst_cn = dstmat.channels();
1282 for( y = 0; y < size.height; y++ )
1284 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1285 T* dst = (T*)(dstmat.data + dstmat.step*y);
1288 for( x = 0; x < size.width*2; x += 2 )
1290 WT v0 = src[x], v1 = src[x+1];
1291 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]);
1292 T t1 = saturate_cast<T>(m[3]*v0 + m[4]*v1 + m[5]);
1293 dst[x] = t0; dst[x+1] = t1;
1298 for( k = 0; k < dst_cn; k++, dst++, _m += 3 )
1299 for( x = 0; x < size.width; x++ )
1300 dst[x*dst_cn] = saturate_cast<T>(_m[0]*src[x*2] +
1301 _m[1]*src[x*2+1] + _m[2]);
1306 template<typename T, typename WT> static void
1307 transformC3_( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1309 Size size = getContinuousSize( srcmat, dstmat );
1310 const WT* m = (const WT*)tmat.data;
1311 int dst_cn = dstmat.channels();
1314 for( y = 0; y < size.height; y++ )
1316 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1317 T* dst = (T*)(dstmat.data + dstmat.step*y);
1320 for( x = 0; x < size.width*3; x += 3 )
1322 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1323 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1324 T t1 = saturate_cast<T>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1325 T t2 = saturate_cast<T>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1326 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1328 else if( dst_cn == 1 )
1329 for( x = 0; x < size.width; x++, src += 3 )
1330 dst[x] = saturate_cast<T>(m[0]*src[0] + m[1]*src[1] + m[2]*src[2] + m[3]);
1334 for( k = 0; k < dst_cn; k++, dst++, _m += 4 )
1335 for( x = 0; x < size.width; x++ )
1336 dst[x*dst_cn] = saturate_cast<T>(_m[0]*src[x*3] +
1337 _m[1]*src[x*3+1] + _m[2]*src[x*3+2] + _m[3]);
1343 template<typename T, typename WT> static void
1344 transformC4_( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1346 Size size = getContinuousSize( srcmat, dstmat );
1347 const WT* m = (const WT*)tmat.data;
1348 int dst_cn = dstmat.channels();
1351 for( y = 0; y < size.height; y++ )
1353 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1354 T* dst = (T*)(dstmat.data + dstmat.step*y);
1357 for( x = 0; x < size.width*4; x += 4 )
1359 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2], v3 = src[x+3];
1360 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]*v3 + m[4]);
1361 T t1 = saturate_cast<T>(m[5]*v0 + m[6]*v1 + m[7]*v2 + m[8]*v3 + m[9]);
1362 dst[x] = t0; dst[x+1] = t1;
1363 t0 = saturate_cast<T>(m[10]*v0 + m[11]*v1 + m[12]*v2 + m[13]*v3 + m[14]);
1364 t1 = saturate_cast<T>(m[15]*v0 + m[16]*v1 + m[17]*v2 + m[18]*v3 + m[19]);
1365 dst[x+2] = t0; dst[x+3] = t1;
1370 for( k = 0; k < dst_cn; k++, dst++, _m += 5 )
1371 for( x = 0; x < size.width; x++ )
1372 dst[x*dst_cn] = saturate_cast<T>(_m[0]*src[x*4] + _m[1]*src[x*4+1] +
1373 _m[2]*src[x*4+2] + _m[3]*src[x*4+3] + _m[4]);
1382 load3x3Matrix( const float* m, __m128& m0, __m128& m1, __m128& m2, __m128& m3 )
1384 m0 = _mm_setr_ps(m[0], m[4], m[8], 0);
1385 m1 = _mm_setr_ps(m[1], m[5], m[9], 0);
1386 m2 = _mm_setr_ps(m[2], m[6], m[10], 0);
1387 m3 = _mm_setr_ps(m[3], m[7], m[11], 0);
1391 load4x4Matrix( const float* m, __m128& m0, __m128& m1, __m128& m2, __m128& m3, __m128& m4 )
1393 m0 = _mm_setr_ps(m[0], m[5], m[10], m[15]);
1394 m1 = _mm_setr_ps(m[1], m[6], m[11], m[16]);
1395 m2 = _mm_setr_ps(m[2], m[7], m[12], m[17]);
1396 m3 = _mm_setr_ps(m[3], m[8], m[13], m[18]);
1397 m4 = _mm_setr_ps(m[4], m[9], m[14], m[19]);
1401 transformC3_<uchar, float>( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1405 const int BITS = 10, SCALE = 1 << BITS;
1406 const float MAX_M = (float)(1 << (15 - BITS));
1407 Size size = getContinuousSize( srcmat, dstmat );
1408 const float* m = (const float*)tmat.data;
1409 int dst_cn = dstmat.channels();
1413 std::abs(m[0]) < MAX_M && std::abs(m[1]) < MAX_M && std::abs(m[2]) < MAX_M && std::abs(m[3]) < MAX_M*256 &&
1414 std::abs(m[4]) < MAX_M && std::abs(m[5]) < MAX_M && std::abs(m[6]) < MAX_M && std::abs(m[7]) < MAX_M*256 &&
1415 std::abs(m[8]) < MAX_M && std::abs(m[9]) < MAX_M && std::abs(m[10]) < MAX_M && std::abs(m[11]) < MAX_M*256 )
1417 // faster fixed-point transformation
1418 short m00 = saturate_cast<short>(m[0]*SCALE), m01 = saturate_cast<short>(m[1]*SCALE),
1419 m02 = saturate_cast<short>(m[2]*SCALE), m10 = saturate_cast<short>(m[4]*SCALE),
1420 m11 = saturate_cast<short>(m[5]*SCALE), m12 = saturate_cast<short>(m[6]*SCALE),
1421 m20 = saturate_cast<short>(m[8]*SCALE), m21 = saturate_cast<short>(m[9]*SCALE),
1422 m22 = saturate_cast<short>(m[10]*SCALE);
1423 int m03 = saturate_cast<int>((m[3]+0.5f)*SCALE), m13 = saturate_cast<int>((m[7]+0.5f)*SCALE ),
1424 m23 = saturate_cast<int>((m[11]+0.5f)*SCALE);
1426 __m128i m0 = _mm_setr_epi16(0, m00, m01, m02, m00, m01, m02, 0);
1427 __m128i m1 = _mm_setr_epi16(0, m10, m11, m12, m10, m11, m12, 0);
1428 __m128i m2 = _mm_setr_epi16(0, m20, m21, m22, m20, m21, m22, 0);
1429 __m128i m3 = _mm_setr_epi32(m03, m13, m23, 0);
1431 for( y = 0; y < size.height; y++ )
1433 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1434 T* dst = (T*)(dstmat.data + dstmat.step*y);
1437 for( ; x <= (size.width - 8)*3; x += 8*3 )
1439 __m128i z = _mm_setzero_si128(), t0, t1, t2, r0, r1;
1440 __m128i v0 = _mm_loadl_epi64((const __m128i*)(src + x));
1441 __m128i v1 = _mm_loadl_epi64((const __m128i*)(src + x + 8));
1442 __m128i v2 = _mm_loadl_epi64((const __m128i*)(src + x + 16)), v3;
1443 v0 = _mm_unpacklo_epi8(v0, z); // b0 g0 r0 b1 g1 r1 b2 g2
1444 v1 = _mm_unpacklo_epi8(v1, z); // r2 b3 g3 r3 b4 g4 r4 b5
1445 v2 = _mm_unpacklo_epi8(v2, z); // g5 r5 b6 g6 r6 b7 g7 r7
1447 v3 = _mm_srli_si128(v2, 2); // ? b6 g6 r6 b7 g7 r7 0
1448 v2 = _mm_or_si128(_mm_slli_si128(v2, 10), _mm_srli_si128(v1, 6)); // ? b4 g4 r4 b5 g5 r5 ?
1449 v1 = _mm_or_si128(_mm_slli_si128(v1, 6), _mm_srli_si128(v0, 10)); // ? b2 g2 r2 b3 g3 r3 ?
1450 v0 = _mm_slli_si128(v0, 2); // 0 b0 g0 r0 b1 g1 r1 ?
1452 // process pixels 0 & 1
1453 t0 = _mm_madd_epi16(v0, m0); // a0 b0 a1 b1
1454 t1 = _mm_madd_epi16(v0, m1); // c0 d0 c1 d1
1455 t2 = _mm_madd_epi16(v0, m2); // e0 f0 e1 f1
1456 v0 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1457 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1458 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1459 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1460 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v0, t1), _mm_unpackhi_epi64(v0,t1)), m3); // B0 G0 R0 0
1461 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B1 G1 R1 0
1462 r0 = _mm_srai_epi32(r0, BITS);
1463 r1 = _mm_srai_epi32(r1, BITS);
1464 v0 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B0 G0 R0 B1 G1 R1 0
1466 // process pixels 2 & 3
1467 t0 = _mm_madd_epi16(v1, m0); // a0 b0 a1 b1
1468 t1 = _mm_madd_epi16(v1, m1); // c0 d0 c1 d1
1469 t2 = _mm_madd_epi16(v1, m2); // e0 f0 e1 f1
1470 v1 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1471 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1472 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1473 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1474 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v1, t1), _mm_unpackhi_epi64(v1,t1)), m3); // B2 G2 R2 0
1475 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B3 G3 R3 0
1476 r0 = _mm_srai_epi32(r0, BITS);
1477 r1 = _mm_srai_epi32(r1, BITS);
1478 v1 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B2 G2 R2 B3 G3 R3 0
1480 // process pixels 4 & 5
1481 t0 = _mm_madd_epi16(v2, m0); // a0 b0 a1 b1
1482 t1 = _mm_madd_epi16(v2, m1); // c0 d0 c1 d1
1483 t2 = _mm_madd_epi16(v2, m2); // e0 f0 e1 f1
1484 v2 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1485 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1486 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1487 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1488 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v2, t1), _mm_unpackhi_epi64(v2,t1)), m3); // B4 G4 R4 0
1489 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B5 G5 R5 0
1490 r0 = _mm_srai_epi32(r0, BITS);
1491 r1 = _mm_srai_epi32(r1, BITS);
1492 v2 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B4 G4 R4 B5 G5 R5 0
1494 // process pixels 6 & 7
1495 t0 = _mm_madd_epi16(v3, m0); // a0 b0 a1 b1
1496 t1 = _mm_madd_epi16(v3, m1); // c0 d0 c1 d1
1497 t2 = _mm_madd_epi16(v3, m2); // e0 f0 e1 f1
1498 v3 = _mm_unpacklo_epi32(t0, t1); // a0 c0 b0 d0
1499 t0 = _mm_unpackhi_epi32(t0, t1); // a1 b1 c1 d1
1500 t1 = _mm_unpacklo_epi32(t2, z); // e0 0 f0 0
1501 t2 = _mm_unpackhi_epi32(t2, z); // e1 0 f1 0
1502 r0 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(v3, t1), _mm_unpackhi_epi64(v3,t1)), m3); // B6 G6 R6 0
1503 r1 = _mm_add_epi32(_mm_add_epi32(_mm_unpacklo_epi64(t0, t2), _mm_unpackhi_epi64(t0,t2)), m3); // B7 G7 R7 0
1504 r0 = _mm_srai_epi32(r0, BITS);
1505 r1 = _mm_srai_epi32(r1, BITS);
1506 v3 = _mm_packus_epi16(_mm_packs_epi32(_mm_slli_si128(r0, 4), r1), z); // 0 B6 G6 R6 B7 G7 R7 0
1508 v0 = _mm_or_si128(_mm_srli_si128(v0, 1), _mm_slli_si128(v1, 5));
1509 v1 = _mm_or_si128(_mm_srli_si128(v1, 3), _mm_slli_si128(v2, 3));
1510 v2 = _mm_or_si128(_mm_srli_si128(v2, 5), _mm_slli_si128(v3, 1));
1511 _mm_storel_epi64((__m128i*)(dst + x), v0);
1512 _mm_storel_epi64((__m128i*)(dst + x + 8), v1);
1513 _mm_storel_epi64((__m128i*)(dst + x + 16), v2);
1516 for( ; x < size.width*3; x += 3 )
1518 int v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1519 uchar t0 = saturate_cast<uchar>((m00*v0 + m01*v1 + m02*v2 + m03)>>BITS);
1520 uchar t1 = saturate_cast<uchar>((m10*v0 + m11*v1 + m12*v2 + m13)>>BITS);
1521 uchar t2 = saturate_cast<uchar>((m20*v0 + m21*v1 + m22*v2 + m23)>>BITS);
1522 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1528 for( y = 0; y < size.height; y++ )
1530 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1531 T* dst = (T*)(dstmat.data + dstmat.step*y);
1534 for( x = 0; x < size.width; x++, src += 3 )
1535 dst[x] = saturate_cast<T>(m[0]*CV_8TO32F(src[0]) +
1536 m[1]*CV_8TO32F(src[1]) + m[2]*CV_8TO32F(src[2]) + m[3]);
1540 for( k = 0; k < dst_cn; k++, dst++, _m += 4 )
1541 for( x = 0; x < size.width; x++ )
1542 dst[x*dst_cn] = saturate_cast<T>(_m[0]*CV_8TO32F(src[x*3]) +
1543 _m[1]*CV_8TO32F(src[x*3+1]) + _m[2]*CV_8TO32F(src[x*3+2]) + _m[3]);
1549 transformC3_<ushort, float>( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1553 Size size = getContinuousSize( srcmat, dstmat );
1554 const float* m = (const float*)tmat.data;
1555 int dst_cn = dstmat.channels();
1560 __m128 m0, m1, m2, m3;
1561 __m128i delta = _mm_setr_epi16(0,-32768,-32768,-32768,-32768,-32768,-32768,0);
1562 load3x3Matrix(m, m0, m1, m2, m3);
1563 m3 = _mm_sub_ps(m3, _mm_setr_ps(32768.f, 32768.f, 32768.f, 0.f));
1565 for( y = 0; y < size.height; y++ )
1567 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1568 T* dst = (T*)(dstmat.data + dstmat.step*y);
1571 for( ; x <= (size.width - 4)*3; x += 4*3 )
1573 __m128i z = _mm_setzero_si128();
1574 __m128i v0 = _mm_loadu_si128((const __m128i*)(src + x)), v1;
1575 __m128i v2 = _mm_loadl_epi64((const __m128i*)(src + x + 8)), v3;
1576 v1 = _mm_unpacklo_epi16(_mm_srli_si128(v0, 6), z); // b1 g1 r1
1577 v3 = _mm_unpacklo_epi16(_mm_srli_si128(v2, 2), z); // b3 g3 r3
1578 v2 = _mm_or_si128(_mm_srli_si128(v0, 12), _mm_slli_si128(v2, 4));
1579 v0 = _mm_unpacklo_epi16(v0, z); // b0 g0 r0
1580 v2 = _mm_unpacklo_epi16(v2, z); // b2 g2 r2
1581 __m128 x0 = _mm_cvtepi32_ps(v0), x1 = _mm_cvtepi32_ps(v1);
1582 __m128 x2 = _mm_cvtepi32_ps(v2), x3 = _mm_cvtepi32_ps(v3);
1583 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1584 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1585 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1586 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), m3);
1587 __m128 y1 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1588 _mm_mul_ps(m0, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(0,0,0,0))),
1589 _mm_mul_ps(m1, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(1,1,1,1)))),
1590 _mm_mul_ps(m2, _mm_shuffle_ps(x1,x1,_MM_SHUFFLE(2,2,2,2)))), m3);
1591 __m128 y2 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1592 _mm_mul_ps(m0, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(0,0,0,0))),
1593 _mm_mul_ps(m1, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(1,1,1,1)))),
1594 _mm_mul_ps(m2, _mm_shuffle_ps(x2,x2,_MM_SHUFFLE(2,2,2,2)))), m3);
1595 __m128 y3 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1596 _mm_mul_ps(m0, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(0,0,0,0))),
1597 _mm_mul_ps(m1, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(1,1,1,1)))),
1598 _mm_mul_ps(m2, _mm_shuffle_ps(x3,x3,_MM_SHUFFLE(2,2,2,2)))), m3);
1599 v0 = _mm_cvtps_epi32(y0); v1 = _mm_cvtps_epi32(y1);
1600 v2 = _mm_cvtps_epi32(y2); v3 = _mm_cvtps_epi32(y3);
1602 v0 = _mm_add_epi16(_mm_packs_epi32(_mm_slli_si128(v0,4), v1), delta); // 0 b0 g0 r0 b1 g1 r1 0
1603 v2 = _mm_add_epi16(_mm_packs_epi32(_mm_slli_si128(v2,4), v3), delta); // 0 b2 g2 r2 b3 g3 r3 0
1604 v1 = _mm_or_si128(_mm_srli_si128(v0,2), _mm_slli_si128(v2,10)); // b0 g0 r0 b1 g1 r1 b2 g2
1605 v2 = _mm_srli_si128(v2, 6); // r2 b3 g3 r3 0 0 0 0
1606 _mm_storeu_si128((__m128i*)(dst + x), v1);
1607 _mm_storel_epi64((__m128i*)(dst + x + 8), v2);
1610 for( ; x < size.width*3; x += 3 )
1612 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1613 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1614 T t1 = saturate_cast<T>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1615 T t2 = saturate_cast<T>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1616 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1622 for( y = 0; y < size.height; y++ )
1624 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1625 T* dst = (T*)(dstmat.data + dstmat.step*y);
1628 for( x = 0; x < size.width; x++, src += 3 )
1629 dst[x] = saturate_cast<T>(m[0]*src[0] + m[1]*src[1] + m[2]*src[2] + m[3]);
1633 for( k = 0; k < dst_cn; k++, dst++, _m += 4 )
1634 for( x = 0; x < size.width; x++ )
1635 dst[x*dst_cn] = saturate_cast<T>(_m[0]*src[x*3] + _m[1]*src[x*3+1] + _m[2]*src[x*3+2] + _m[3]);
1641 transformC3_<float, float>( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1645 Size size = getContinuousSize( srcmat, dstmat );
1646 const float* m = (const float*)tmat.data;
1647 int dst_cn = dstmat.channels();
1652 __m128 m0, m1, m2, m3;
1653 load3x3Matrix(m, m0, m1, m2, m3);
1655 for( y = 0; y < size.height; y++ )
1657 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1658 T* dst = (T*)(dstmat.data + dstmat.step*y);
1661 for( ; x < (size.width - 1)*3; x += 3 )
1663 __m128 x0 = _mm_loadu_ps(src + x);
1664 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(
1665 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1666 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1667 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))), m3);
1668 _mm_storel_pi((__m64*)(dst + x), y0);
1669 _mm_store_ss(dst + x + 2, _mm_movehl_ps(y0,y0));
1672 for( ; x < size.width*3; x += 3 )
1674 WT v0 = src[x], v1 = src[x+1], v2 = src[x+2];
1675 T t0 = saturate_cast<T>(m[0]*v0 + m[1]*v1 + m[2]*v2 + m[3]);
1676 T t1 = saturate_cast<T>(m[4]*v0 + m[5]*v1 + m[6]*v2 + m[7]);
1677 T t2 = saturate_cast<T>(m[8]*v0 + m[9]*v1 + m[10]*v2 + m[11]);
1678 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1684 for( y = 0; y < size.height; y++ )
1686 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1687 T* dst = (T*)(dstmat.data + dstmat.step*y);
1690 for( x = 0; x < size.width; x++, src += 3 )
1691 dst[x] = saturate_cast<T>(m[0]*src[0] + m[1]*src[1] + m[2]*src[2] + m[3]);
1695 for( k = 0; k < dst_cn; k++, dst++, _m += 4 )
1696 for( x = 0; x < size.width; x++ )
1697 dst[x*dst_cn] = saturate_cast<T>(_m[0]*src[x*3] + _m[1]*src[x*3+1] + _m[2]*src[x*3+2] + _m[3]);
1704 transformC4_<float, float>( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1708 Size size = getContinuousSize( srcmat, dstmat );
1709 const WT* m = (const WT*)tmat.data;
1710 int dst_cn = dstmat.channels();
1715 __m128 m0, m1, m2, m3, m4;
1716 load4x4Matrix(m, m0, m1, m2, m3, m4);
1718 for( y = 0; y < size.height; y++ )
1720 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1721 T* dst = (T*)(dstmat.data + dstmat.step*y);
1722 for( x = 0; x < size.width*4; x += 4 )
1724 __m128 x0 = _mm_loadu_ps(src + x);
1725 __m128 y0 = _mm_add_ps(_mm_add_ps(_mm_add_ps(_mm_add_ps(
1726 _mm_mul_ps(m0, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(0,0,0,0))),
1727 _mm_mul_ps(m1, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(1,1,1,1)))),
1728 _mm_mul_ps(m2, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(2,2,2,2)))),
1729 _mm_mul_ps(m3, _mm_shuffle_ps(x0,x0,_MM_SHUFFLE(3,3,3,3)))), m4);
1730 _mm_storeu_ps(dst + x, y0);
1736 for( y = 0; y < size.height; y++ )
1738 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1739 T* dst = (T*)(dstmat.data + dstmat.step*y);
1741 for( k = 0; k < dst_cn; k++, dst++, _m += 5 )
1742 for( x = 0; x < size.width; x++ )
1743 dst[x*dst_cn] = saturate_cast<T>(_m[0]*src[x*4] + _m[1]*src[x*4+1] +
1744 _m[2]*src[x*4+2] + _m[3]*src[x*4+3] + _m[4]);
1752 template<typename T, typename WT> static void
1753 diagtransC2_( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1755 Size size = getContinuousSize( srcmat, dstmat );
1756 const WT* m = (const WT*)tmat.data;
1759 for( y = 0; y < size.height; y++ )
1761 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1762 T* dst = (T*)(dstmat.data + dstmat.step*y);
1764 for( x = 0; x < size.width*2; x += 2 )
1766 T t0 = saturate_cast<T>(m[0]*src[x] + m[2]);
1767 T t1 = saturate_cast<T>(m[4]*src[x+1] + m[5]);
1768 dst[x] = t0; dst[x+1] = t1;
1773 template<typename T, typename WT> static void
1774 diagtransC3_( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1776 Size size = getContinuousSize( srcmat, dstmat );
1777 const WT* m = (const WT*)tmat.data;
1780 for( y = 0; y < size.height; y++ )
1782 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1783 T* dst = (T*)(dstmat.data + dstmat.step*y);
1785 for( x = 0; x < size.width*3; x += 3 )
1787 T t0 = saturate_cast<T>(m[0]*src[x] + m[3]);
1788 T t1 = saturate_cast<T>(m[5]*src[x+1] + m[7]);
1789 T t2 = saturate_cast<T>(m[10]*src[x+2] + m[11]);
1790 dst[x] = t0; dst[x+1] = t1; dst[x+2] = t2;
1795 template<typename T, typename WT> static void
1796 diagtransC4_( const Mat& srcmat, Mat& dstmat, Mat& tmat )
1798 Size size = getContinuousSize( srcmat, dstmat );
1799 const WT* m = (const WT*)tmat.data;
1802 for( y = 0; y < size.height; y++ )
1804 const T* src = (const T*)(srcmat.data + srcmat.step*y);
1805 T* dst = (T*)(dstmat.data + dstmat.step*y);
1807 for( x = 0; x < size.width*4; x += 4 )
1809 T t0 = saturate_cast<T>(m[0]*src[x] + m[4]);
1810 T t1 = saturate_cast<T>(m[6]*src[x+1] + m[9]);
1811 dst[x] = t0; dst[x+1] = t1;
1812 t0 = saturate_cast<T>(m[12]*src[x+2] + m[14]);
1813 t1 = saturate_cast<T>(m[18]*src[x+3] + m[19]);
1814 dst[x+2] = t0; dst[x+3] = t1;
1819 typedef void (*TransformFunc)( const Mat &src, Mat& dst, Mat& M );
1821 void transform( const Mat& src, Mat& dst, const Mat& _m )
1823 static TransformFunc tab[2][32] =
1825 {transformC1_<uchar, float>, 0, transformC1_<ushort, float>, transformC1_<short,float>,
1826 transformC1_<int, double>, transformC1_<float, float>, transformC1_<double, double>, 0,
1827 transformC2_<uchar, float>, 0, transformC2_<ushort, float>, transformC2_<short,float>,
1828 transformC2_<int, double>, transformC2_<float, float>, transformC2_<double, double>, 0,
1829 transformC3_<uchar, float>, 0, transformC3_<ushort, float>, transformC3_<short,float>,
1830 transformC3_<int, double>, transformC3_<float, float>, transformC3_<double, double>, 0,
1831 transformC4_<uchar, float>, 0, transformC4_<ushort, float>, transformC4_<short,float>,
1832 transformC4_<int, double>, transformC4_<float, float>, transformC4_<double, double>, 0},
1834 {0, 0, 0, 0, 0, 0, 0, 0,
1835 diagtransC2_<uchar, float>, 0, diagtransC2_<ushort, float>, diagtransC2_<short,float>,
1836 diagtransC2_<int, double>, diagtransC2_<float, float>, diagtransC2_<double, double>, 0,
1837 diagtransC3_<uchar, float>, 0, diagtransC3_<ushort, float>, diagtransC3_<short,float>,
1838 diagtransC3_<int, double>, diagtransC3_<float, float>, diagtransC3_<double, double>, 0,
1839 diagtransC4_<uchar, float>, 0, diagtransC4_<ushort, float>, diagtransC4_<short,float>,
1840 diagtransC4_<int, double>, diagtransC4_<float, float>, diagtransC4_<double, double>, 0}
1843 int type = src.type(), depth = src.depth(), scn = src.channels(), dcn = _m.rows;
1844 bool isDiag = false;
1845 CV_Assert( (scn == _m.cols || scn + 1 == _m.cols) && scn <= 4 && dcn <= 4 );
1847 double mbuf[20] = {0};
1850 dst.create( src.size(), CV_MAKETYPE(depth, dcn) );
1851 Size size = getContinuousSize( src, dst );
1853 int mtype = depth == CV_32S || depth == CV_64F ? CV_64F : CV_32F;
1854 if( !_m.isContinuous() || _m.type() != mtype || _m.cols != scn + 1 )
1856 m = Mat(dcn, scn + 1, mtype, mbuf);
1857 Mat tmat_part = m.colRange(0, _m.cols);
1858 _m.convertTo(tmat_part, mtype);
1864 double eps = mtype == CV_32F ? FLT_EPSILON : DBL_EPSILON;
1869 if( mtype == CV_32F )
1870 alpha = ((float*)m.data)[0], beta = ((float*)m.data)[1];
1872 alpha = ((double*)m.data)[0], beta = ((double*)m.data)[1];
1873 src.convertTo( dst, dst.type(), alpha, beta );
1877 for( i = 0, isDiag = true; isDiag && i < scn; i++ )
1879 for( j = 0; isDiag && j < scn; j++ )
1881 double v = mtype == CV_32F ? ((float*)m.data)[i*(scn+1)+j] :
1882 ((double*)m.data)[i*(scn+1)+j];
1883 if( i != j && fabs(v) > eps )
1888 if( isDiag && depth == CV_8U )
1890 Mat lut(1, 256, CV_8UC(scn));
1891 for( i = 0; i < scn; i++ )
1893 uchar* data = lut.data + i;
1895 if( mtype == CV_32F )
1897 val = ((float*)m.data)[i*(scn+1) + scn];
1898 delta = ((float*)m.data)[i*(scn+1) + i];
1902 val = ((double*)m.data)[i*(scn+1) + scn];
1903 delta = ((double*)m.data)[i*(scn+1) + i];
1905 for( j = 0; j < 256; j++, val += delta )
1907 int ival = cvRound(val);
1908 data[j] = CV_CAST_8U(ival);
1911 LUT( src, lut, dst );
1916 TransformFunc func = tab[0][type];
1917 CV_Assert( func != 0 );
1918 func( src, dst, m );
1922 /****************************************************************************************\
1923 * Perspective Transform *
1924 \****************************************************************************************/
1926 template<typename T> static void
1927 perspectiveTransform2_( const Mat& srcmat, Mat& dstmat, const double* mat )
1929 Size size = getContinuousSize( srcmat, dstmat, srcmat.channels() );
1931 for( int i = 0; i < size.height; i++ )
1933 const T* src = (const T*)(srcmat.data + srcmat.step*i);
1934 T* dst = (T*)(dstmat.data + dstmat.step*i);
1936 for( int j = 0; j < size.width; j += 2 )
1938 T x = src[j], y = src[j + 1];
1939 double w = x*mat[6] + y*mat[7] + mat[8];
1941 if( fabs(w) > FLT_EPSILON )
1944 dst[j] = (T)((x*mat[0] + y*mat[1] + mat[2])*w);
1945 dst[j+1] = (T)((x*mat[3] + y*mat[4] + mat[5])*w);
1948 dst[j] = dst[j+1] = (T)0;
1953 template<typename T> static void
1954 perspectiveTransform3_( const Mat& srcmat, Mat& dstmat, const double* mat )
1956 Size size = getContinuousSize( srcmat, dstmat, srcmat.channels() );
1958 for( int i = 0; i < size.height; i++ )
1960 const T* src = (const T*)(srcmat.data + srcmat.step*i);
1961 T* dst = (T*)(dstmat.data + dstmat.step*i);
1963 for( int j = 0; j < size.width; j += 3 )
1965 T x = src[j], y = src[j + 1], z = src[j + 2];
1966 double w = x*mat[12] + y*mat[13] + z*mat[14] + mat[15];
1968 if( fabs(w) > FLT_EPSILON )
1971 dst[j] = (T)((x*mat[0] + y*mat[1] + z*mat[2] + mat[3]) * w);
1972 dst[j+1] = (T)((x*mat[4] + y*mat[5] + z*mat[6] + mat[7]) * w);
1973 dst[j+2] = (T)((x*mat[8] + y*mat[9] + z*mat[10] + mat[11]) * w);
1976 dst[j] = dst[j+1] = dst[j+2] = (T)0;
1981 template<typename T> static void
1982 perspectiveTransform23_( const Mat& srcmat, Mat& dstmat, const double* mat )
1984 Size size = getContinuousSize( srcmat, dstmat, srcmat.channels() );
1986 for( int i = 0; i < size.height; i++ )
1988 const T* src = (const T*)(srcmat.data + srcmat.step*i);
1989 T* dst = (T*)(dstmat.data + dstmat.step*i);
1991 for( int j = 0; j < size.width; j++, src += 3, dst += 2 )
1993 T x = src[0], y = src[1], z = src[2];
1994 double w = x*mat[8] + y*mat[9] + z*mat[10] + mat[11];
1996 if( fabs(w) > FLT_EPSILON )
1999 dst[0] = (T)((x*mat[0] + y*mat[1] + z*mat[2] + mat[3])*w);
2000 dst[1] = (T)((x*mat[4] + y*mat[5] + z*mat[6] + mat[7])*w);
2003 dst[0] = dst[1] = (T)0;
2008 typedef void (*PerspectiveTransformFunc)(const Mat& src, Mat& dst, const double* mat );
2010 void perspectiveTransform( const Mat& src, Mat& dst, const Mat& _m )
2012 int depth = src.depth(), scn = src.channels(), dcn = _m.rows-1;
2013 CV_Assert( (depth == CV_32F || depth == CV_64F) && scn+1 == _m.cols && scn <= 4 &&
2014 ((scn == 2 && dcn == 2) || (scn == 3 && dcn == 3) || (scn == 2 && dcn == 3)));
2016 double mbuf[16] = {0};
2018 const double* m = (const double*)_m.data;
2020 dst.create( src.size(), CV_MAKETYPE(depth, dcn) );
2022 if( !_m.isContinuous() || _m.type() != CV_64F )
2024 tmat = Mat(dcn + 1, scn + 1, CV_64F, mbuf);
2025 _m.convertTo(tmat, CV_64F);
2026 m = (const double*)tmat.data;
2029 PerspectiveTransformFunc func = 0;
2030 if( scn == 2 && dcn == 2 )
2033 func = perspectiveTransform2_<float>;
2035 func = perspectiveTransform2_<double>;
2037 else if( scn == 2 && dcn == 3 )
2040 func = perspectiveTransform23_<float>;
2042 func = perspectiveTransform23_<double>;
2044 else if( scn == 3 && dcn == 3 )
2047 func = perspectiveTransform3_<float>;
2049 func = perspectiveTransform3_<double>;
2052 CV_Error( CV_StsNotImplemented, "Only 2->2, 2->3 and 3->3 perspective transformation is implemented" );
2053 func( src, dst, m );
2056 /****************************************************************************************\
2058 \****************************************************************************************/
2060 void scaleAdd( const Mat& src1, double alpha, const Mat& src2, Mat& dst )
2062 int type = src1.type(), depth = CV_MAT_DEPTH(type);
2063 CV_Assert( src1.size() == src2.size() && type == src2.type() );
2064 dst.create( src1.size(), type );
2065 Size size = getContinuousSize( src1, src2, dst, src1.channels() );
2067 if( depth == CV_32F )
2069 const float *s1 = (const float*)src1.data, *s2 = (const float*)src2.data;
2070 float* d = (float*)dst.data;
2071 size_t step1 = src1.step/sizeof(s1[0]), step2 = src2.step/sizeof(s2[0]);
2072 size_t step = dst.step/sizeof(d[0]);
2073 float a = (float)alpha;
2075 if( size.width == 1 )
2077 for( ; size.height--; s1 += step1, s2 += step2, d += step )
2078 d[0] = s1[0]*a + s2[0];
2082 for( ; size.height--; s1 += step1, s2 += step2, d += step )
2085 for( i = 0; i <= size.width - 4; i += 4 )
2087 float t0 = s1[i]*a + s2[i];
2088 float t1 = s1[i+1]*a + s2[i+1];
2091 t0 = s1[i+2]*a + s2[i+2];
2092 t1 = s1[i+3]*a + s2[i+3];
2097 for( ; i < size.width; i++ )
2098 d[i] = s1[i]*a + s2[i];
2101 else if( depth == CV_64F )
2103 const double *s1 = (const double*)src1.data, *s2 = (const double*)src2.data;
2104 double* d = (double*)dst.data;
2105 size_t step1 = src1.step/sizeof(s1[0]), step2 = src2.step/sizeof(s2[0]);
2106 size_t step = dst.step/sizeof(d[0]);
2108 if( size.width == 1 )
2110 for( ; size.height--; s1 += step1, s2 += step2, d += step )
2111 d[0] = s1[0]*alpha + s2[0];
2115 for( ; size.height--; s1 += step1, s2 += step2, d += step )
2118 for( i = 0; i <= size.width - 4; i += 4 )
2120 double t0 = s1[i]*alpha + s2[i];
2121 double t1 = s1[i+1]*alpha + s2[i+1];
2124 t0 = s1[i+2]*alpha + s2[i+2];
2125 t1 = s1[i+3]*alpha + s2[i+3];
2130 for( ; i < size.width; i++ )
2131 d[i] = s1[i]*alpha + s2[i];
2135 CV_Error( CV_StsUnsupportedFormat, "" );
2138 /****************************************************************************************\
2139 * Covariation Matrix *
2140 \****************************************************************************************/
2142 void calcCovarMatrix( const Mat* data, int nsamples, Mat& covar, Mat& _mean, int flags, int ctype )
2144 CV_Assert( data && nsamples > 0 );
2145 Size size = data[0].size();
2146 int sz = size.width*size.height, esz = (int)data[0].elemSize();
2147 int type = data[0].type();
2149 ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), _mean.depth()), CV_32F);
2151 if( (flags & CV_COVAR_USE_AVG) != 0 )
2153 CV_Assert( _mean.size() == size );
2154 if( _mean.isContinuous() && _mean.type() == ctype )
2155 mean = _mean.reshape(1, 1);
2158 _mean.convertTo(mean, ctype);
2159 mean = mean.reshape(1, 1);
2163 Mat _data(nsamples, sz, type);
2164 for( int i = 0; i < nsamples; i++ )
2166 CV_Assert( data[i].size() == size && data[i].type() == type );
2167 if( data[i].isContinuous() )
2168 memcpy( _data.ptr(i), data[i].data, sz*esz );
2171 Mat dataRow(size.height, size.width, type, _data.ptr(i));
2172 data[i].copyTo(dataRow);
2176 calcCovarMatrix( _data, covar, mean, (flags & ~(CV_COVAR_ROWS|CV_COVAR_COLS)) | CV_COVAR_ROWS, ctype );
2177 if( (flags & CV_COVAR_USE_AVG) == 0 )
2178 _mean = mean.reshape(1, size.height);
2181 void calcCovarMatrix( const Mat& data, Mat& covar, Mat& _mean, int flags, int ctype )
2183 CV_Assert( ((flags & CV_COVAR_ROWS) != 0) ^ ((flags & CV_COVAR_COLS) != 0) );
2184 bool takeRows = (flags & CV_COVAR_ROWS) != 0;
2185 int type = data.type();
2186 int nsamples = takeRows ? data.rows : data.cols;
2187 CV_Assert( nsamples > 0 );
2188 Size size = takeRows ? Size(data.cols, 1) : Size(1, data.rows);
2190 ctype = std::max(std::max(CV_MAT_DEPTH(ctype >= 0 ? ctype : type), _mean.depth()), CV_32F);
2192 if( (flags & CV_COVAR_USE_AVG) != 0 )
2194 CV_Assert( mean.size() == size );
2195 if( mean.type() != ctype )
2196 _mean.convertTo(mean, ctype);
2200 reduce( data, _mean, takeRows ? 0 : 1, CV_REDUCE_AVG, ctype );
2204 mulTransposed( data, covar, ((flags & CV_COVAR_NORMAL) == 0) ^ takeRows,
2205 mean, (flags & CV_COVAR_SCALE) != 0 ? 1./nsamples : 1, ctype );
2208 /****************************************************************************************\
2210 \****************************************************************************************/
2212 double Mahalanobis( const Mat& v1, const Mat& v2, const Mat& icovar )
2214 int type = v1.type(), depth = v1.depth();
2215 Size sz = v1.size();
2216 int i, j, len = sz.width*sz.height*v1.channels();
2217 AutoBuffer<uchar> buf(len*v1.elemSize());
2220 CV_Assert( type == v2.type() && type == icovar.type() &&
2221 sz == v2.size() && len == icovar.rows && len == icovar.cols );
2223 if( v1.isContinuous() && v2.isContinuous() )
2225 sz.width *= sz.height;
2229 if( depth == CV_32F )
2231 const float* src1 = (const float*)v1.data;
2232 const float* src2 = (const float*)v2.data;
2233 size_t step1 = v1.step/sizeof(src1[0]);
2234 size_t step2 = v2.step/sizeof(src2[0]);
2235 float* diff = (float*)(uchar*)buf;
2236 const float* mat = (const float*)icovar.data;
2237 size_t matstep = icovar.step/sizeof(mat[0]);
2239 for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
2241 for( i = 0; i < sz.width; i++ )
2242 diff[i] = src1[i] - src2[i];
2245 diff = (float*)(uchar*)buf;
2246 for( i = 0; i < len; i++, mat += matstep )
2249 for( j = 0; j <= len - 4; j += 4 )
2250 row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
2251 diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
2252 for( ; j < len; j++ )
2253 row_sum += diff[j]*mat[j];
2254 result += row_sum * diff[i];
2257 else if( depth == CV_64F )
2259 const double* src1 = (const double*)v1.data;
2260 const double* src2 = (const double*)v2.data;
2261 size_t step1 = v1.step/sizeof(src1[0]);
2262 size_t step2 = v2.step/sizeof(src2[0]);
2263 double* diff = (double*)(uchar*)buf;
2264 const double* mat = (const double*)icovar.data;
2265 size_t matstep = icovar.step/sizeof(mat[0]);
2267 for( ; sz.height--; src1 += step1, src2 += step2, diff += sz.width )
2269 for( i = 0; i < sz.width; i++ )
2270 diff[i] = src1[i] - src2[i];
2273 diff = (double*)(uchar*)buf;
2274 for( i = 0; i < len; i++, mat += matstep )
2277 for( j = 0; j <= len - 4; j += 4 )
2278 row_sum += diff[j]*mat[j] + diff[j+1]*mat[j+1] +
2279 diff[j+2]*mat[j+2] + diff[j+3]*mat[j+3];
2280 for( ; j < len; j++ )
2281 row_sum += diff[j]*mat[j];
2282 result += row_sum * diff[i];
2286 CV_Error( CV_StsUnsupportedFormat, "" );
2288 return std::sqrt(result);
2292 /****************************************************************************************\
2294 \****************************************************************************************/
2296 template<typename sT, typename dT> static void
2297 MulTransposedR( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
2300 const sT* src = (const sT*)srcmat.data;
2301 dT* dst = (dT*)dstmat.data;
2302 const dT* delta = (const dT*)deltamat.data;
2303 size_t srcstep = srcmat.step/sizeof(src[0]);
2304 size_t dststep = dstmat.step/sizeof(dst[0]);
2305 size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
2306 int delta_cols = deltamat.cols;
2307 Size size = srcmat.size();
2311 int buf_size = size.height*sizeof(dT);
2312 AutoBuffer<uchar> buf;
2314 if( delta && delta_cols < size.width )
2316 assert( delta_cols == 1 );
2319 buf.allocate(buf_size);
2320 col_buf = (dT*)(uchar*)buf;
2322 if( delta && delta_cols < size.width )
2324 delta_buf = col_buf + size.height;
2325 for( i = 0; i < size.height; i++ )
2326 delta_buf[i*4] = delta_buf[i*4+1] =
2327 delta_buf[i*4+2] = delta_buf[i*4+3] = delta[i*deltastep];
2329 deltastep = deltastep ? 4 : 0;
2333 for( i = 0; i < size.width; i++, tdst += dststep )
2335 for( k = 0; k < size.height; k++ )
2336 col_buf[k] = src[k*srcstep+i];
2338 for( j = i; j <= size.width - 4; j += 4 )
2340 double s0 = 0, s1 = 0, s2 = 0, s3 = 0;
2341 const sT *tsrc = src + j;
2343 for( k = 0; k < size.height; k++, tsrc += srcstep )
2345 double a = col_buf[k];
2352 tdst[j] = (dT)(s0*scale);
2353 tdst[j+1] = (dT)(s1*scale);
2354 tdst[j+2] = (dT)(s2*scale);
2355 tdst[j+3] = (dT)(s3*scale);
2358 for( ; j < size.width; j++ )
2361 const sT *tsrc = src + j;
2363 for( k = 0; k < size.height; k++, tsrc += srcstep )
2364 s0 += col_buf[k] * tsrc[0];
2366 tdst[j] = (dT)(s0*scale);
2370 for( i = 0; i < size.width; i++, tdst += dststep )
2373 for( k = 0; k < size.height; k++ )
2374 col_buf[k] = src[k*srcstep+i] - delta[k*deltastep+i];
2376 for( k = 0; k < size.height; k++ )
2377 col_buf[k] = src[k*srcstep+i] - delta_buf[k*deltastep];
2379 for( j = i; j <= size.width - 4; j += 4 )
2381 double s0 = 0, s1 = 0, s2 = 0, s3 = 0;
2382 const sT *tsrc = src + j;
2383 const dT *d = delta_buf ? delta_buf : delta + j;
2385 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep )
2387 double a = col_buf[k];
2388 s0 += a * (tsrc[0] - d[0]);
2389 s1 += a * (tsrc[1] - d[1]);
2390 s2 += a * (tsrc[2] - d[2]);
2391 s3 += a * (tsrc[3] - d[3]);
2394 tdst[j] = (dT)(s0*scale);
2395 tdst[j+1] = (dT)(s1*scale);
2396 tdst[j+2] = (dT)(s2*scale);
2397 tdst[j+3] = (dT)(s3*scale);
2400 for( ; j < size.width; j++ )
2403 const sT *tsrc = src + j;
2404 const dT *d = delta_buf ? delta_buf : delta + j;
2406 for( k = 0; k < size.height; k++, tsrc+=srcstep, d+=deltastep )
2407 s0 += col_buf[k] * (tsrc[0] - d[0]);
2409 tdst[j] = (dT)(s0*scale);
2415 template<typename sT, typename dT> static void
2416 MulTransposedL( const Mat& srcmat, Mat& dstmat, const Mat& deltamat, double scale )
2419 const sT* src = (const sT*)srcmat.data;
2420 dT* dst = (dT*)dstmat.data;
2421 const dT* delta = (const dT*)deltamat.data;
2422 size_t srcstep = srcmat.step/sizeof(src[0]);
2423 size_t dststep = dstmat.step/sizeof(dst[0]);
2424 size_t deltastep = deltamat.rows > 1 ? deltamat.step/sizeof(delta[0]) : 0;
2425 int delta_cols = deltamat.cols;
2426 Size size = srcmat.size();
2430 for( i = 0; i < size.height; i++, tdst += dststep )
2431 for( j = i; j < size.height; j++ )
2434 const sT *tsrc1 = src + i*srcstep;
2435 const sT *tsrc2 = src + j*srcstep;
2437 for( k = 0; k <= size.width - 4; k += 4 )
2438 s += tsrc1[k]*tsrc2[k] + tsrc1[k+1]*tsrc2[k+1] +
2439 tsrc1[k+2]*tsrc2[k+2] + tsrc1[k+3]*tsrc2[k+3];
2440 for( ; k < size.width; k++ )
2441 s += tsrc1[k] * tsrc2[k];
2442 tdst[j] = (dT)(s*scale);
2447 int delta_shift = delta_cols == size.width ? 4 : 0;
2448 AutoBuffer<uchar> buf(size.width*sizeof(dT));
2449 dT* row_buf = (dT*)(uchar*)buf;
2451 for( i = 0; i < size.height; i++, tdst += dststep )
2453 const sT *tsrc1 = src + i*srcstep;
2454 const dT *tdelta1 = delta + i*deltastep;
2456 if( delta_cols < size.width )
2457 for( k = 0; k < size.width; k++ )
2458 row_buf[k] = tsrc1[k] - tdelta1[0];
2460 for( k = 0; k < size.width; k++ )
2461 row_buf[k] = tsrc1[k] - tdelta1[k];
2463 for( j = i; j < size.height; j++ )
2466 const sT *tsrc2 = src + j*srcstep;
2467 const dT *tdelta2 = delta + j*deltastep;
2468 if( delta_cols < size.width )
2470 delta_buf[0] = delta_buf[1] =
2471 delta_buf[2] = delta_buf[3] = tdelta2[0];
2472 tdelta2 = delta_buf;
2474 for( k = 0; k <= size.width-4; k += 4, tdelta2 += delta_shift )
2475 s += row_buf[k]*(tsrc2[k] - tdelta2[0]) +
2476 row_buf[k+1]*(tsrc2[k+1] - tdelta2[1]) +
2477 row_buf[k+2]*(tsrc2[k+2] - tdelta2[2]) +
2478 row_buf[k+3]*(tsrc2[k+3] - tdelta2[3]);
2479 for( ; k < size.width; k++, tdelta2++ )
2480 s += row_buf[k]*(tsrc2[k] - tdelta2[0]);
2481 tdst[j] = (dT)(s*scale);
2487 typedef void (*MulTransposedFunc)(const Mat& src, Mat& dst, const Mat& delta, double scale);
2489 void mulTransposed( const Mat& src, Mat& dst, bool ata,
2490 const Mat& _delta, double scale, int dtype )
2492 const int gemm_level = 100; // boundary above which GEMM is faster.
2493 int stype = src.type();
2495 dtype = std::max(std::max(CV_MAT_DEPTH(dtype >= 0 ? dtype : stype), delta.depth()), CV_32F);
2496 CV_Assert( src.channels() == 1 );
2500 CV_Assert( delta.channels() == 1 &&
2501 (delta.rows == src.rows || delta.rows == 1) &&
2502 (delta.cols == src.cols || delta.cols == 1));
2503 if( delta.type() != dtype )
2504 _delta.convertTo(delta, dtype);
2507 int dsize = ata ? src.cols : src.rows;
2508 dst.create( dsize, dsize, dtype );
2510 if( src.data == dst.data || (stype == dtype &&
2511 (dst.cols >= gemm_level && dst.rows >= gemm_level &&
2512 src.cols >= gemm_level && src.rows >= gemm_level)))
2515 const Mat* tsrc = &src;
2518 if( delta.size() == src.size() )
2519 subtract( src, delta, src2 );
2522 repeat(delta, src.rows/delta.rows, src.cols/delta.cols, src2);
2523 subtract( src, src2, src2 );
2527 gemm( *tsrc, *tsrc, scale, Mat(), 0, dst, ata ? GEMM_1_T : GEMM_2_T );
2531 MulTransposedFunc func = 0;
2532 if(stype == CV_8U && dtype == CV_32F)
2535 func = MulTransposedR<uchar,float>;
2537 func = MulTransposedL<uchar,float>;
2539 else if(stype == CV_8U && dtype == CV_64F)
2542 func = MulTransposedR<uchar,double>;
2544 func = MulTransposedL<uchar,double>;
2546 else if(stype == CV_16U && dtype == CV_32F)
2549 func = MulTransposedR<ushort,float>;
2551 func = MulTransposedL<ushort,float>;
2553 else if(stype == CV_16U && dtype == CV_64F)
2556 func = MulTransposedR<ushort,double>;
2558 func = MulTransposedL<ushort,double>;
2560 else if(stype == CV_16S && dtype == CV_32F)
2563 func = MulTransposedR<short,float>;
2565 func = MulTransposedL<short,float>;
2567 else if(stype == CV_16S && dtype == CV_64F)
2570 func = MulTransposedR<short,double>;
2572 func = MulTransposedL<short,double>;
2574 else if(stype == CV_32F && dtype == CV_32F)
2577 func = MulTransposedR<float,float>;
2579 func = MulTransposedL<float,float>;
2581 else if(stype == CV_32F && dtype == CV_64F)
2584 func = MulTransposedR<float,double>;
2586 func = MulTransposedL<float,double>;
2588 else if(stype == CV_64F && dtype == CV_64F)
2591 func = MulTransposedR<double,double>;
2593 func = MulTransposedL<double,double>;
2596 CV_Error( CV_StsUnsupportedFormat, "" );
2598 func( src, dst, delta, scale );
2599 completeSymm( dst, false );
2603 /****************************************************************************************\
2605 \****************************************************************************************/
2607 template<typename T, typename WT, typename ST> static double
2608 dotprod_( const Mat& srcmat1, const Mat& srcmat2 )
2610 const T *src1 = (const T*)srcmat1.data, *src2 = (const T*)srcmat2.data;
2611 size_t step1 = srcmat1.step/sizeof(src1[0]), step2 = srcmat2.step/sizeof(src2[0]);
2613 Size size = getContinuousSize( srcmat1, srcmat2, srcmat1.channels() );
2615 if( size.width == 1 )
2618 for( ; size.height--; src1 += step1, src2 += step2 )
2619 t += (WT)src1[0]*src2[0];
2624 for( ; size.height--; src1 += step1, src2 += step2 )
2628 for( i = 0; i <= size.width - 4; i += 4 )
2630 sum += (WT)src1[i]*src2[i] +
2631 (WT)src1[i+1]*src2[i+1] +
2632 (WT)src1[i+2]*src2[i+2] +
2633 (WT)src1[i+3]*src2[i+3];
2636 for( ; i < size.width; i++ )
2637 t += (WT)src1[i]*src2[i];
2644 typedef double (*DotProductFunc)(const Mat& src1, const Mat& src2);
2646 double Mat::dot(const Mat& mat) const
2648 static DotProductFunc tab[] = {
2649 dotprod_<uchar, int, int64>, 0,
2650 dotprod_<ushort, double, double>,
2651 dotprod_<short, double, double>,
2652 dotprod_<int, double, double>,
2653 dotprod_<float, double, double>,
2654 dotprod_<double, double, double>, 0 };
2656 DotProductFunc func = tab[depth()];
2657 CV_Assert( mat.type() == type() && mat.size() == size() && func != 0 );
2658 return func( *this, mat );
2661 /****************************************************************************************\
2663 \****************************************************************************************/
2667 PCA::PCA(const Mat& data, const Mat& mean, int flags, int maxComponents)
2669 operator()(data, mean, flags, maxComponents);
2672 PCA& PCA::operator()(const Mat& data, const Mat& _mean, int flags, int maxComponents)
2674 int covar_flags = CV_COVAR_SCALE;
2675 int i, len, in_count;
2678 CV_Assert( data.channels() == 1 );
2679 if( flags & CV_PCA_DATA_AS_COL )
2682 in_count = data.cols;
2683 covar_flags |= CV_COVAR_COLS;
2684 mean_sz = Size(1, len);
2689 in_count = data.rows;
2690 covar_flags |= CV_COVAR_ROWS;
2691 mean_sz = Size(len, 1);
2694 int count = std::min(len, in_count), out_count = count;
2695 if( maxComponents > 0 )
2696 out_count = std::min(count, maxComponents);
2698 // "scrambled" way to compute PCA (when cols(A)>rows(A)):
2699 // B = A'A; B*x=b*x; C = AA'; C*y=c*y -> AA'*y=c*y -> A'A*(A'*y)=c*(A'*y) -> c = b, x=A'*y
2700 if( len <= in_count )
2701 covar_flags |= CV_COVAR_NORMAL;
2703 int ctype = std::max(CV_32F, data.depth());
2704 mean.create( mean_sz, ctype );
2706 Mat covar( count, count, ctype );
2710 CV_Assert( _mean.size() == mean_sz );
2711 _mean.convertTo(mean, ctype);
2714 calcCovarMatrix( data, covar, mean, covar_flags, ctype );
2715 eigen( covar, eigenvalues, eigenvectors );
2717 if( !(covar_flags & CV_COVAR_NORMAL) )
2719 // CV_PCA_DATA_AS_ROW: cols(A)>rows(A). x=A'*y -> x'=y'*A
2720 // CV_PCA_DATA_AS_COL: rows(A)>cols(A). x=A''*y -> x'=y'*A'
2721 Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
2722 if( data.type() != ctype || tmp_mean.data == mean.data )
2724 data.convertTo( tmp_data, ctype );
2725 subtract( tmp_data, tmp_mean, tmp_data );
2729 subtract( data, tmp_mean, tmp_mean );
2730 tmp_data = tmp_mean;
2733 Mat evects1(count, len, ctype);
2734 gemm( eigenvectors, tmp_data, 1, Mat(), 0, evects1,
2735 (flags & CV_PCA_DATA_AS_COL) ? CV_GEMM_B_T : 0);
2736 eigenvectors = evects1;
2738 // normalize eigenvectors
2739 for( i = 0; i < out_count; i++ )
2741 Mat vec = eigenvectors.row(i);
2742 normalize(vec, vec);
2746 if( count > out_count )
2748 // use clone() to physically copy the data and thus deallocate the original matrices
2749 eigenvalues = eigenvalues.rowRange(0,out_count).clone();
2750 eigenvectors = eigenvectors.rowRange(0,out_count).clone();
2756 void PCA::project(const Mat& data, Mat& result) const
2758 CV_Assert( mean.data && eigenvectors.data &&
2759 ((mean.rows == 1 && mean.cols == data.cols) || (mean.cols == 1 && mean.rows == data.rows)));
2760 Mat tmp_data, tmp_mean = repeat(mean, data.rows/mean.rows, data.cols/mean.cols);
2761 int ctype = mean.type();
2762 if( data.type() != ctype || tmp_mean.data == mean.data )
2764 data.convertTo( tmp_data, ctype );
2765 subtract( tmp_data, tmp_mean, tmp_data );
2769 subtract( data, tmp_mean, tmp_mean );
2770 tmp_data = tmp_mean;
2772 if( mean.rows == 1 )
2773 gemm( tmp_data, eigenvectors, 1, Mat(), 0, result, GEMM_2_T );
2775 gemm( eigenvectors, tmp_data, 1, Mat(), 0, result, 0 );
2778 Mat PCA::project(const Mat& data) const
2781 project(data, result);
2785 void PCA::backProject(const Mat& data, Mat& result) const
2787 CV_Assert( mean.data && eigenvectors.data &&
2788 ((mean.rows == 1 && eigenvectors.rows == data.cols) ||
2789 (mean.cols == 1 && eigenvectors.rows == data.rows)));
2791 Mat tmp_data, tmp_mean;
2792 data.convertTo(tmp_data, mean.type());
2793 if( mean.rows == 1 )
2795 tmp_mean = repeat(mean, data.rows, 1);
2796 gemm( tmp_data, eigenvectors, 1, tmp_mean, 1, result, 0 );
2800 tmp_mean = repeat(mean, 1, data.cols);
2801 gemm( eigenvectors, tmp_data, 1, tmp_mean, 1, result, GEMM_1_T );
2805 Mat PCA::backProject(const Mat& data) const
2808 backProject(data, result);
2814 /****************************************************************************************\
2816 \****************************************************************************************/
2818 CV_IMPL void cvGEMM( const CvArr* Aarr, const CvArr* Barr, double alpha,
2819 const CvArr* Carr, double beta, CvArr* Darr, int flags )
2821 cv::Mat A = cv::cvarrToMat(Aarr), B = cv::cvarrToMat(Barr);
2822 cv::Mat C, D = cv::cvarrToMat(Darr);
2825 C = cv::cvarrToMat(Carr);
2827 CV_Assert( (D.rows == ((flags & CV_GEMM_A_T) == 0 ? A.rows : A.cols)) &&
2828 (D.cols == ((flags & CV_GEMM_B_T) == 0 ? B.cols : B.rows)) &&
2829 D.type() == A.type() );
2831 gemm( A, B, alpha, C, beta, D, flags );
2836 cvTransform( const CvArr* srcarr, CvArr* dstarr,
2837 const CvMat* transmat, const CvMat* shiftvec )
2839 cv::Mat m = cv::cvarrToMat(transmat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2843 cv::Mat v = cv::cvarrToMat(shiftvec).reshape(1,m.rows),
2844 _m(m.rows, m.cols + 1, m.type()), m1 = _m.colRange(0,m.cols), v1 = _m.col(m.cols);
2845 m.convertTo(m1, m1.type());
2846 v.convertTo(v1, v1.type());
2850 CV_Assert( dst.depth() == src.depth() && dst.channels() == m.rows );
2851 cv::transform( src, dst, m );
2856 cvPerspectiveTransform( const CvArr* srcarr, CvArr* dstarr, const CvMat* mat )
2858 cv::Mat m = cv::cvarrToMat(mat), src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr);
2860 CV_Assert( dst.type() == src.type() && dst.channels() == m.rows-1 );
2861 cv::perspectiveTransform( src, dst, m );
2865 CV_IMPL void cvScaleAdd( const CvArr* srcarr1, CvScalar scale,
2866 const CvArr* srcarr2, CvArr* dstarr )
2868 cv::Mat src1 = cv::cvarrToMat(srcarr1), dst = cv::cvarrToMat(dstarr);
2870 CV_Assert( src1.size() == dst.size() && src1.type() == dst.type() );
2871 cv::scaleAdd( src1, scale.val[0], cv::cvarrToMat(srcarr2), dst );
2876 cvCalcCovarMatrix( const CvArr** vecarr, int count,
2877 CvArr* covarr, CvArr* avgarr, int flags )
2879 cv::Mat cov0 = cv::cvarrToMat(covarr), cov = cov0, mean0, mean;
2880 CV_Assert( vecarr != 0 && count >= 1 );
2883 mean = mean0 = cv::cvarrToMat(avgarr);
2885 if( count == 1 || (flags & CV_COVAR_COLS) != 0 || (flags & CV_COVAR_ROWS) != 0 )
2887 cv::Mat data = cv::cvarrToMat(vecarr[0]);
2888 cv::calcCovarMatrix( data, cov, mean, flags, cov.type() );
2892 std::vector<cv::Mat> data(count);
2893 for( int i = 0; i < count; i++ )
2894 data[i] = cv::cvarrToMat(vecarr[i]);
2895 cv::calcCovarMatrix( &data[0], count, cov, mean, flags, cov.type() );
2898 if( mean.data != mean0.data && mean0.data )
2899 mean.convertTo(mean0, mean0.type());
2901 if( cov.data != cov0.data )
2902 cov.convertTo(cov0, cov0.type());
2907 cvMahalanobis( const CvArr* srcAarr, const CvArr* srcBarr, const CvArr* matarr )
2909 return cv::Mahalanobis(cv::cvarrToMat(srcAarr),
2910 cv::cvarrToMat(srcBarr), cv::cvarrToMat(matarr));
2914 cvMulTransposed( const CvArr* srcarr, CvArr* dstarr,
2915 int order, const CvArr* deltaarr, double scale )
2917 cv::Mat src = cv::cvarrToMat(srcarr), dst0 = cv::cvarrToMat(dstarr), dst = dst0, delta;
2919 delta = cv::cvarrToMat(deltaarr);
2920 cv::mulTransposed( src, dst, order != 0, delta, scale, dst.type());
2921 if( dst.data != dst0.data )
2922 dst.convertTo(dst0, dst0.type());
2925 CV_IMPL double cvDotProduct( const CvArr* srcAarr, const CvArr* srcBarr )
2927 return cv::cvarrToMat(srcAarr).dot(cv::cvarrToMat(srcBarr));
2932 cvCalcPCA( const CvArr* data_arr, CvArr* avg_arr, CvArr* eigenvals, CvArr* eigenvects, int flags )
2934 cv::Mat data = cv::cvarrToMat(data_arr), mean0 = cv::cvarrToMat(avg_arr);
2935 cv::Mat evals0 = cv::cvarrToMat(eigenvals), evects0 = cv::cvarrToMat(eigenvects);
2936 cv::Mat mean = mean0, evals = evals0, evects = evects0;
2940 pca.eigenvalues = evals;
2941 pca.eigenvectors = evects;
2943 pca(data, (flags & CV_PCA_USE_AVG) ? mean : cv::Mat(),
2944 flags, evals.data ? evals.rows + evals.cols - 1 : 0);
2946 if( pca.mean.size() == mean.size() )
2947 pca.mean.convertTo( mean, mean.type() );
2950 cv::Mat temp; pca.mean.convertTo( temp, mean.type() );
2951 transpose( temp, mean );
2954 if( pca.eigenvalues.size() == evals.size() )
2955 pca.eigenvalues.convertTo( evals, evals.type() );
2958 cv::Mat temp; pca.eigenvalues.convertTo( temp, evals.type() );
2959 transpose( temp, evals );
2962 pca.eigenvectors.convertTo( evects, evects.type() );
2964 // otherwise some datatype's or size's were incorrect, so the output arrays have been reallocated
2965 CV_Assert( mean0.data == mean.data && evals0.data == evals.data && evects0.data == evects.data );
2970 cvProjectPCA( const CvArr* data_arr, const CvArr* avg_arr,
2971 const CvArr* eigenvects, CvArr* result_arr )
2973 cv::Mat data = cv::cvarrToMat(data_arr), mean = cv::cvarrToMat(avg_arr);
2974 cv::Mat evects = cv::cvarrToMat(eigenvects), dst0 = cv::cvarrToMat(result_arr), dst = dst0;
2978 pca.eigenvectors = evects;
2980 cv::Mat result = pca.project(data);
2981 if( result.cols != dst.cols )
2982 result = result.reshape(1, 1);
2983 result.convertTo(dst, dst.type());
2985 CV_Assert(dst0.data == dst.data);
2990 cvBackProjectPCA( const CvArr* proj_arr, const CvArr* avg_arr,
2991 const CvArr* eigenvects, CvArr* result_arr )
2993 cv::Mat data = cv::cvarrToMat(proj_arr), mean = cv::cvarrToMat(avg_arr);
2994 cv::Mat evects = cv::cvarrToMat(eigenvects), dst0 = cv::cvarrToMat(result_arr), dst = dst0;
2998 pca.eigenvectors = evects;
3000 cv::Mat result = pca.backProject(data);
3001 result.convertTo(dst, dst.type());
3003 CV_Assert(dst0.data == dst.data);