1 /*M///////////////////////////////////////////////////////////////////////////////////////
3 // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
5 // By downloading, copying, installing or using the software you agree to this license.
6 // If you do not agree to this license, do not download, install,
7 // copy or use the software.
11 // For Open Source Computer Vision Library
13 // Copyright (C) 2000-2008, Intel Corporation, all rights reserved.
14 // Copyright (C) 2009, Willow Garage Inc., all rights reserved.
15 // Third party copyrights are property of their respective owners.
17 // Redistribution and use in source and binary forms, with or without modification,
18 // are permitted provided that the following conditions are met:
20 // * Redistribution's of source code must retain the above copyright notice,
21 // this list of conditions and the following disclaimer.
23 // * Redistribution's in binary form must reproduce the above copyright notice,
24 // this list of conditions and the following disclaimer in the documentation
25 // and/or other materials provided with the distribution.
27 // * The name of the copyright holders may not be used to endorse or promote products
28 // derived from this software without specific prior written permission.
30 // This software is provided by the copyright holders and contributors "as is" and
31 // any express or implied warranties, including, but not limited to, the implied
32 // warranties of merchantability and fitness for a particular purpose are disclaimed.
33 // In no event shall the Intel Corporation or contributors be liable for any direct,
34 // indirect, incidental, special, exemplary, or consequential damages
35 // (including, but not limited to, procurement of substitute goods or services;
36 // loss of use, data, or profits; or business interruption) however caused
37 // and on any theory of liability, whether in contract, strict liability,
38 // or tort (including negligence or otherwise) arising in any way out of
39 // the use of this software, even if advised of the possibility of such damage.
49 thresh_8u( const Mat& _src, Mat& _dst, uchar thresh, uchar maxval, int type )
53 Size roi = _src.size();
54 roi.width *= _src.channels();
56 __m128i _x80 = _mm_set1_epi8('\x80');
57 __m128i thresh_u = _mm_set1_epi8(thresh);
58 __m128i thresh_s = _mm_set1_epi8(thresh ^ 0x80);
59 __m128i maxval_ = _mm_set1_epi8(maxval);
62 if( _src.isContinuous() && _dst.isContinuous() )
64 roi.width *= roi.height;
71 for( i = 0; i <= thresh; i++ )
76 case THRESH_BINARY_INV:
77 for( i = 0; i <= thresh; i++ )
83 for( i = 0; i <= thresh; i++ )
89 for( i = 0; i <= thresh; i++ )
94 case THRESH_TOZERO_INV:
95 for( i = 0; i <= thresh; i++ )
101 CV_Error( CV_StsBadArg, "Unknown threshold type" );
104 for( i = 0; i < roi.height; i++ )
106 const uchar* src = (const uchar*)(_src.data + _src.step*i);
107 uchar* dst = (uchar*)(_dst.data + _dst.step*i);
114 for( ; j <= roi.width - 32; j += 32 )
117 v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
118 v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
119 v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s );
120 v1 = _mm_cmpgt_epi8( _mm_xor_si128(v1, _x80), thresh_s );
121 v0 = _mm_and_si128( v0, maxval_ );
122 v1 = _mm_and_si128( v1, maxval_ );
123 _mm_storeu_si128( (__m128i*)(dst + j), v0 );
124 _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
127 for( ; j <= roi.width - 8; j += 8 )
129 __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
130 v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s );
131 v0 = _mm_and_si128( v0, maxval_ );
132 _mm_storel_epi64( (__m128i*)(dst + j), v0 );
136 case THRESH_BINARY_INV:
137 for( ; j <= roi.width - 32; j += 32 )
140 v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
141 v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
142 v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s );
143 v1 = _mm_cmpgt_epi8( _mm_xor_si128(v1, _x80), thresh_s );
144 v0 = _mm_andnot_si128( v0, maxval_ );
145 v1 = _mm_andnot_si128( v1, maxval_ );
146 _mm_storeu_si128( (__m128i*)(dst + j), v0 );
147 _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
150 for( ; j <= roi.width - 8; j += 8 )
152 __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
153 v0 = _mm_cmpgt_epi8( _mm_xor_si128(v0, _x80), thresh_s );
154 v0 = _mm_andnot_si128( v0, maxval_ );
155 _mm_storel_epi64( (__m128i*)(dst + j), v0 );
160 for( ; j <= roi.width - 32; j += 32 )
163 v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
164 v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
165 v0 = _mm_subs_epu8( v0, _mm_subs_epu8( v0, thresh_u ));
166 v1 = _mm_subs_epu8( v1, _mm_subs_epu8( v1, thresh_u ));
167 _mm_storeu_si128( (__m128i*)(dst + j), v0 );
168 _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
171 for( ; j <= roi.width - 8; j += 8 )
173 __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
174 v0 = _mm_subs_epu8( v0, _mm_subs_epu8( v0, thresh_u ));
175 _mm_storel_epi64( (__m128i*)(dst + j), v0 );
180 for( ; j <= roi.width - 32; j += 32 )
183 v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
184 v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
185 v0 = _mm_and_si128( v0, _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ));
186 v1 = _mm_and_si128( v1, _mm_cmpgt_epi8(_mm_xor_si128(v1, _x80), thresh_s ));
187 _mm_storeu_si128( (__m128i*)(dst + j), v0 );
188 _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
191 for( ; j <= roi.width - 8; j += 8 )
193 __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
194 v0 = _mm_and_si128( v0, _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ));
195 _mm_storel_epi64( (__m128i*)(dst + j), v0 );
199 case THRESH_TOZERO_INV:
200 for( ; j <= roi.width - 32; j += 32 )
203 v0 = _mm_loadu_si128( (const __m128i*)(src + j) );
204 v1 = _mm_loadu_si128( (const __m128i*)(src + j + 16) );
205 v0 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ), v0 );
206 v1 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v1, _x80), thresh_s ), v1 );
207 _mm_storeu_si128( (__m128i*)(dst + j), v0 );
208 _mm_storeu_si128( (__m128i*)(dst + j + 16), v1 );
211 for( ; j <= roi.width - 8; j += 8 )
213 __m128i v0 = _mm_loadl_epi64( (const __m128i*)(src + j) );
214 v0 = _mm_andnot_si128( _mm_cmpgt_epi8(_mm_xor_si128(v0, _x80), thresh_s ), v0 );
215 _mm_storel_epi64( (__m128i*)(dst + j), v0 );
221 for( ; j <= roi.width - 4; j += 4 )
223 uchar t0 = tab[src[j]];
224 uchar t1 = tab[src[j+1]];
236 for( ; j < roi.width; j++ )
237 dst[j] = tab[src[j]];
243 thresh_32f( const Mat& _src, Mat& _dst, float thresh, float maxval, int type )
246 Size roi = _src.size();
247 roi.width *= _src.channels();
248 const float* src = (const float*)_src.data;
249 float* dst = (float*)_dst.data;
250 size_t src_step = _src.step/sizeof(src[0]);
251 size_t dst_step = _dst.step/sizeof(dst[0]);
253 __m128 thresh4 = _mm_set1_ps(thresh), maxval4 = _mm_set1_ps(maxval);
256 if( _src.isContinuous() && _dst.isContinuous() )
258 roi.width *= roi.height;
265 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
269 for( ; j <= roi.width - 8; j += 8 )
272 v0 = _mm_loadu_ps( src + j );
273 v1 = _mm_loadu_ps( src + j + 4 );
274 v0 = _mm_cmpgt_ps( v0, thresh4 );
275 v1 = _mm_cmpgt_ps( v1, thresh4 );
276 v0 = _mm_and_ps( v0, maxval4 );
277 v1 = _mm_and_ps( v1, maxval4 );
278 _mm_storeu_ps( dst + j, v0 );
279 _mm_storeu_ps( dst + j + 4, v1 );
283 for( ; j < roi.width; j++ )
284 dst[j] = src[j] > thresh ? maxval : 0;
288 case THRESH_BINARY_INV:
289 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
293 for( ; j <= roi.width - 8; j += 8 )
296 v0 = _mm_loadu_ps( src + j );
297 v1 = _mm_loadu_ps( src + j + 4 );
298 v0 = _mm_cmple_ps( v0, thresh4 );
299 v1 = _mm_cmple_ps( v1, thresh4 );
300 v0 = _mm_and_ps( v0, maxval4 );
301 v1 = _mm_and_ps( v1, maxval4 );
302 _mm_storeu_ps( dst + j, v0 );
303 _mm_storeu_ps( dst + j + 4, v1 );
307 for( ; j < roi.width; j++ )
308 dst[j] = src[j] <= thresh ? maxval : 0;
313 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
317 for( ; j <= roi.width - 8; j += 8 )
320 v0 = _mm_loadu_ps( src + j );
321 v1 = _mm_loadu_ps( src + j + 4 );
322 v0 = _mm_min_ps( v0, thresh4 );
323 v1 = _mm_min_ps( v1, thresh4 );
324 _mm_storeu_ps( dst + j, v0 );
325 _mm_storeu_ps( dst + j + 4, v1 );
329 for( ; j < roi.width; j++ )
330 dst[j] = std::min(src[j], thresh);
335 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
339 for( ; j <= roi.width - 8; j += 8 )
342 v0 = _mm_loadu_ps( src + j );
343 v1 = _mm_loadu_ps( src + j + 4 );
344 v0 = _mm_and_ps(v0, _mm_cmpgt_ps(v0, thresh4));
345 v1 = _mm_and_ps(v1, _mm_cmpgt_ps(v1, thresh4));
346 _mm_storeu_ps( dst + j, v0 );
347 _mm_storeu_ps( dst + j + 4, v1 );
351 for( ; j < roi.width; j++ )
354 dst[j] = v > thresh ? v : 0;
359 case THRESH_TOZERO_INV:
360 for( i = 0; i < roi.height; i++, src += src_step, dst += dst_step )
364 for( ; j <= roi.width - 8; j += 8 )
367 v0 = _mm_loadu_ps( src + j );
368 v1 = _mm_loadu_ps( src + j + 4 );
369 v0 = _mm_and_ps(v0, _mm_cmple_ps(v0, thresh4));
370 v1 = _mm_and_ps(v1, _mm_cmple_ps(v1, thresh4));
371 _mm_storeu_ps( dst + j, v0 );
372 _mm_storeu_ps( dst + j + 4, v1 );
375 for( ; j < roi.width; j++ )
378 dst[j] = v <= thresh ? v : 0;
383 return CV_Error( CV_StsBadArg, "" );
389 getThreshVal_Otsu_8u( const Mat& _src )
391 Size size = _src.size();
392 if( _src.isContinuous() )
394 size.width *= size.height;
398 int i, j, h[N] = {0};
399 for( i = 0; i < size.height; i++ )
401 const uchar* src = _src.data + _src.step*i;
402 for( j = 0; j <= size.width - 4; j += 4 )
404 int v0 = src[j], v1 = src[j+1];
406 v0 = src[j+2]; v1 = src[j+3];
409 for( ; j < size.width; j++ )
413 double mu = 0, scale = 1./(size.width*size.height);
414 for( i = 0; i < N; i++ )
418 double mu1 = 0, q1 = 0;
419 double max_sigma = 0, max_val = 0;
421 for( i = 0; i < N; i++ )
423 double p_i, q2, mu2, sigma;
430 if( std::min(q1,q2) < FLT_EPSILON || std::max(q1,q2) > 1. - FLT_EPSILON )
433 mu1 = (mu1 + i*p_i)/q1;
434 mu2 = (mu - q1*mu1)/q2;
435 sigma = q1*q2*(mu1 - mu2)*(mu1 - mu2);
436 if( sigma > max_sigma )
447 double threshold( const Mat& _src, Mat& _dst, double thresh, double maxval, int type )
449 bool use_otsu = (type & THRESH_OTSU) != 0;
454 CV_Assert( _src.type() == CV_8UC1 );
455 thresh = getThreshVal_Otsu_8u(_src);
458 _dst.create( _src.size(), _src.type() );
459 if( _src.depth() == CV_8U )
461 int ithresh = cvFloor(thresh);
463 int imaxval = cvRound(maxval);
464 if( type == THRESH_TRUNC )
466 imaxval = saturate_cast<uchar>(imaxval);
468 if( ithresh < 0 || ithresh >= 255 )
470 if( type == THRESH_BINARY || type == THRESH_BINARY_INV ||
471 ((type == THRESH_TRUNC || type == THRESH_TOZERO_INV) && ithresh < 0) ||
472 (type == THRESH_TOZERO && ithresh >= 255) )
474 int v = type == THRESH_BINARY ? (ithresh >= 255 ? 0 : imaxval) :
475 type == THRESH_BINARY_INV ? (ithresh >= 255 ? imaxval : 0) :
476 type == THRESH_TRUNC ? imaxval : 0;
477 _dst = Scalar::all(v);
483 thresh_8u( _src, _dst, (uchar)ithresh, (uchar)imaxval, type );
485 else if( _src.depth() == CV_32F )
486 thresh_32f( _src, _dst, (float)thresh, (float)maxval, type );
488 CV_Error( CV_StsUnsupportedFormat, "" );
494 void adaptiveThreshold( const Mat& _src, Mat& _dst, double maxValue,
495 int method, int type, int blockSize, double delta )
497 CV_Assert( _src.type() == CV_8UC1 );
498 CV_Assert( blockSize % 2 == 1 && blockSize > 1 );
499 Size size = _src.size();
501 _dst.create( size, _src.type() );
511 if( _src.data != _dst.data )
514 if( method == ADAPTIVE_THRESH_MEAN_C )
515 boxFilter( _src, _mean, _src.type(), Size(blockSize, blockSize),
516 Point(-1,-1), true, BORDER_REPLICATE );
517 else if( method == ADAPTIVE_THRESH_GAUSSIAN_C )
518 GaussianBlur( _src, _mean, Size(blockSize, blockSize), 0, 0, BORDER_REPLICATE );
520 CV_Error( CV_StsBadFlag, "Unknown/unsupported adaptive threshold method" );
523 uchar imaxval = saturate_cast<uchar>(maxValue);
524 int idelta = type == THRESH_BINARY ? cvCeil(delta) : cvFloor(delta);
527 if( type == CV_THRESH_BINARY )
528 for( i = 0; i < 768; i++ )
529 tab[i] = (uchar)(i - 255 > -idelta ? imaxval : 0);
530 else if( type == CV_THRESH_BINARY_INV )
531 for( i = 0; i < 768; i++ )
532 tab[i] = (uchar)(i - 255 <= -idelta ? imaxval : 0);
534 CV_Error( CV_StsBadFlag, "Unknown/unsupported threshold type" );
536 if( _src.isContinuous() && _mean.isContinuous() && _dst.isContinuous() )
538 size.width *= size.height;
542 for( i = 0; i < size.height; i++ )
544 const uchar* src = _src.data + _src.step*i;
545 const uchar* mean = _mean.data + _mean.step*i;
546 uchar* dst = _dst.data + _dst.step*i;
548 for( j = 0; j < size.width; j++ )
549 dst[j] = tab[src[j] - mean[j] + 255];
556 cvThreshold( const void* srcarr, void* dstarr, double thresh, double maxval, int type )
558 cv::Mat src = cv::cvarrToMat(srcarr), dst = cv::cvarrToMat(dstarr), dst0 = dst;
560 CV_Assert( src.size() == dst.size() && src.channels() == dst.channels() &&
561 (src.depth() == dst.depth() || dst.depth() == CV_8U));
563 thresh = cv::threshold( src, dst, thresh, maxval, type );
564 if( dst0.data != dst.data )
565 dst.convertTo( dst0, dst0.depth() );
571 cvAdaptiveThreshold( const void *srcIm, void *dstIm, double maxValue,
572 int method, int type, int blockSize, double delta )
574 cv::Mat src = cv::cvarrToMat(srcIm), dst = cv::cvarrToMat(dstIm);
575 CV_Assert( src.size() == dst.size() && src.type() == dst.type() );
576 cv::adaptiveThreshold( src, dst, maxValue, method, type, blockSize, delta );