+
+// This file implements the foreground/background pixel
+// discrimination algorithm described in
+//
+// Foreground Object Detection from Videos Containing Complex Background
+// Li, Huan, Gu, Tian 2003 9p
+// http://muq.org/~cynbe/bib/foreground-object-detection-from-videos-containing-complex-background.pdf
+
+
// parameters:
// first_frame - frame from video sequence
// parameters - (optional) if NULL default parameters of the algorithm will be used
// parameters:
// first_frame - frame from video sequence
// parameters - (optional) if NULL default parameters of the algorithm will be used
if (first_frame->nChannels != 3)
CV_ERROR( CV_StsBadArg, "first_frame must have 3 color channels" );
if (first_frame->nChannels != 3)
CV_ERROR( CV_StsBadArg, "first_frame must have 3 color channels" );
- params.Lc = CV_BGFG_FGD_LC;
- params.N1c = CV_BGFG_FGD_N1C;
- params.N2c = CV_BGFG_FGD_N2C;
- params.Lcc = CV_BGFG_FGD_LCC;
- params.N1cc = CV_BGFG_FGD_N1CC;
- params.N2cc = CV_BGFG_FGD_N2CC;
- params.delta = CV_BGFG_FGD_DELTA;
- params.alpha1 = CV_BGFG_FGD_ALPHA_1;
- params.alpha2 = CV_BGFG_FGD_ALPHA_2;
- params.alpha3 = CV_BGFG_FGD_ALPHA_3;
- params.T = CV_BGFG_FGD_T;
+ params.Lc = CV_BGFG_FGD_LC;
+ params.N1c = CV_BGFG_FGD_N1C;
+ params.N2c = CV_BGFG_FGD_N2C;
+
+ params.Lcc = CV_BGFG_FGD_LCC;
+ params.N1cc = CV_BGFG_FGD_N1CC;
+ params.N2cc = CV_BGFG_FGD_N2CC;
+
+ params.delta = CV_BGFG_FGD_DELTA;
+
+ params.alpha1 = CV_BGFG_FGD_ALPHA_1;
+ params.alpha2 = CV_BGFG_FGD_ALPHA_2;
+ params.alpha3 = CV_BGFG_FGD_ALPHA_3;
+
+ params.T = CV_BGFG_FGD_T;
buf_size = pixel_count*sizeof(p_model->pixel_stat[0]);
CV_CALL( p_model->pixel_stat = (CvBGPixelStat*)cvAlloc(buf_size) );
buf_size = pixel_count*sizeof(p_model->pixel_stat[0]);
CV_CALL( p_model->pixel_stat = (CvBGPixelStat*)cvAlloc(buf_size) );
CV_CALL( p_model->pixel_stat[0].cctable = (CvBGPixelCCStatTable*)cvAlloc(buf_size) );
memset( p_model->pixel_stat[0].cctable, 0, buf_size );
CV_CALL( p_model->pixel_stat[0].cctable = (CvBGPixelCCStatTable*)cvAlloc(buf_size) );
memset( p_model->pixel_stat[0].cctable, 0, buf_size );
- for( i = 0, k = 0; i < first_frame->height; i++ )
- for( j = 0; j < first_frame->width; j++, k++ )
+ for( i = 0, k = 0; i < first_frame->height; i++ ) {
+ for( j = 0; j < first_frame->width; j++, k++ )
{
p_model->pixel_stat[k].ctable = p_model->pixel_stat[0].ctable + k*params.N2c;
p_model->pixel_stat[k].cctable = p_model->pixel_stat[0].cctable + k*params.N2cc;
}
{
p_model->pixel_stat[k].ctable = p_model->pixel_stat[0].ctable + k*params.N2c;
p_model->pixel_stat[k].cctable = p_model->pixel_stat[0].cctable + k*params.N2cc;
}
CV_CALL( p_model->Ftd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
CV_CALL( p_model->Fbd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
CV_CALL( p_model->Ftd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
CV_CALL( p_model->Fbd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
CV_CALL( p_model->prev_frame = cvCloneImage(first_frame));
CV_CALL( p_model->storage = cvCreateMemStorage());
CV_CALL( p_model->prev_frame = cvCloneImage(first_frame));
CV_CALL( p_model->storage = cvCreateMemStorage());
int i, j, b, x, y, thres;
const int PIXELRANGE=256;
int i, j, b, x, y, thres;
const int PIXELRANGE=256;
- if( !prev_frame || !curr_frame || !change_mask ||
- prev_frame->nChannels != 3 || curr_frame->nChannels != 3 || change_mask->nChannels != 1 ||
- prev_frame->depth != IPL_DEPTH_8U || curr_frame->depth != IPL_DEPTH_8U || change_mask->depth != IPL_DEPTH_8U ||
- !CV_ARE_SIZES_EQ( prev_frame, curr_frame ) || !CV_ARE_SIZES_EQ( prev_frame, change_mask ) ) return 0;
+ if( !prev_frame
+ || !curr_frame
+ || !change_mask
+ || prev_frame->nChannels != 3
+ || curr_frame->nChannels != 3
+ || change_mask->nChannels != 1
+ || prev_frame->depth != IPL_DEPTH_8U
+ || curr_frame->depth != IPL_DEPTH_8U
+ || change_mask->depth != IPL_DEPTH_8U
+ || prev_frame->width != curr_frame->width
+ || prev_frame->height != curr_frame->height
+ || prev_frame->width != change_mask->width
+ || prev_frame->height != change_mask->height
+ ){
+ return 0;
+ }
relativeVariance[thres] = sigma;
// fprintf(stderr, "Iter %d finished\n", thres);
}
relativeVariance[thres] = sigma;
// fprintf(stderr, "Iter %d finished\n", thres);
}
- //form FG pixels candidates using image differencing with adaptive threshold [P.Rosin, Thresholding for change detection, ICCV, 1998 ]
+ // From foreground pixel candidates using image differencing
+ // with adaptive thresholding. The algorithm is from:
+ //
+ // Thresholding for Change Detection
+ // Paul L. Rosin 1998 6p
+ // http://www.cis.temple.edu/~latecki/Courses/CIS750-03/Papers/thresh-iccv.pdf
+ //
cvChangeDetection( prev_frame, curr_frame, model->Ftd );
cvChangeDetection( model->background, curr_frame, model->Fbd );
cvChangeDetection( prev_frame, curr_frame, model->Ftd );
cvChangeDetection( model->background, curr_frame, model->Fbd );
- uchar* curr_data = (uchar*)(curr_frame->imageData)+i*curr_frame->widthStep+j*3;
- uchar* prev_data = (uchar*)(prev_frame->imageData)+i*prev_frame->widthStep+j*3;
+ uchar* curr_data = (uchar*)(curr_frame->imageData) + i*curr_frame->widthStep + j*3;
+ uchar* prev_data = (uchar*)(prev_frame->imageData) + i*prev_frame->widthStep + j*3;
- if( !stat->is_trained_dyn_model ) val = 1;
- else
- {
- //compare with stored CCt vectors
- for( k = 0; PV_CC(k) > model->params.alpha2 && k < model->params.N1cc; k++ )
+ if( !stat->is_trained_dyn_model ) {
+
+ val = 1;
+
+ } else {
+
+ // Compare with stored CCt vectors:
+ for( k = 0; PV_CC(k) > model->params.alpha2 && k < model->params.N1cc; k++ )
{
if ( abs( V_CC(k,0) - prev_data[0]) <= deltaCC &&
abs( V_CC(k,1) - prev_data[1]) <= deltaCC &&
{
if ( abs( V_CC(k,0) - prev_data[0]) <= deltaCC &&
abs( V_CC(k,1) - prev_data[1]) <= deltaCC &&
- //compare with stored Ct vectors
- for( k = 0; PV_C(k) > model->params.alpha2 && k < model->params.N1c; k++ )
+ // Compare with stored Ct vectors:
+ for( k = 0; PV_C(k) > model->params.alpha2 && k < model->params.N1c; k++ )
{
if ( abs( V_C(k,0) - curr_data[0]) <= deltaC &&
abs( V_C(k,1) - curr_data[1]) <= deltaC &&
{
if ( abs( V_C(k,0) - curr_data[0]) <= deltaC &&
abs( V_C(k,1) - curr_data[1]) <= deltaC &&
- cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_OPEN, 1 );
- cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_CLOSE, 1 );
+ cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_OPEN, model->params.perform_morphing );
+ cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_CLOSE, model->params.perform_morphing );
cvFindContours( model->foreground, model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
for( seq = first_seq; seq; seq = seq->h_next )
{
cvFindContours( model->foreground, model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
for( seq = first_seq; seq; seq = seq->h_next )
{
if( cnt->rect.width * cnt->rect.height < model->params.minArea ||
(model->params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)) )
{
if( cnt->rect.width * cnt->rect.height < model->params.minArea ||
(model->params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)) )
{
model->foreground_regions = first_seq;
cvZero(model->foreground);
cvDrawContours(model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);
model->foreground_regions = first_seq;
cvZero(model->foreground);
cvDrawContours(model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);
if( ((float)FG_pixels_count/(model->Ftd->width*model->Ftd->height)) > CV_BGFG_FGD_BG_UPDATE_TRESH )
{
for( i = 0; i < model->Ftd->height; i++ )
if( ((float)FG_pixels_count/(model->Ftd->width*model->Ftd->height)) > CV_BGFG_FGD_BG_UPDATE_TRESH )
{
for( i = 0; i < model->Ftd->height; i++ )
if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] )
{
float alpha = stat->is_trained_st_model ? model->params.alpha2 : model->params.alpha3;
if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] )
{
float alpha = stat->is_trained_st_model ? model->params.alpha2 : model->params.alpha3;
if( !((uchar*)model->foreground->imageData)[i*mask_step+j])
{
uchar* ptr = ((uchar*)model->background->imageData) + i*model->background->widthStep+j*3;
if( !((uchar*)model->foreground->imageData)[i*mask_step+j])
{
uchar* ptr = ((uchar*)model->background->imageData) + i*model->background->widthStep+j*3;
if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] &&
!((uchar*)model->Fbd->imageData)[i*mask_step+j] )
{
if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] &&
!((uchar*)model->Fbd->imageData)[i*mask_step+j] )
{