Update the changelog
[opencv] / cvaux / src / cvbgfg_acmmm2003.cpp
1 /*M///////////////////////////////////////////////////////////////////////////////////////
2 //
3 //  IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
4 //
5 //  By downloading, copying, installing or using the software you agree to this license.
6 //  If you do not agree to this license, do not download, install,
7 //  copy or use the software.
8 //
9 //
10 //                        Intel License Agreement
11 //
12 // Copyright (C) 2000, Intel Corporation, all rights reserved.
13 // Third party copyrights are property of their respective owners.
14 //
15 // Redistribution and use in source and binary forms, with or without modification,
16 // are permitted provided that the following conditions are met:
17 //
18 //   * Redistribution's of source code must retain the above copyright notice,
19 //     this list of conditions and the following disclaimer.
20 //
21 //   * Redistribution's in binary form must reproduce the above copyright notice,
22 //     this list of conditions and the following disclaimer in the documentation
23 //     and/or other materials provided with the distribution.
24 //
25 //   * The name of Intel Corporation may not be used to endorse or promote products
26 //     derived from this software without specific prior written permission.
27 //
28 // This software is provided by the copyright holders and contributors "as is" and
29 // any express or implied warranties, including, but not limited to, the implied
30 // warranties of merchantability and fitness for a particular purpose are disclaimed.
31 // In no event shall the Intel Corporation or contributors be liable for any direct,
32 // indirect, incidental, special, exemplary, or consequential damages
33 // (including, but not limited to, procurement of substitute goods or services;
34 // loss of use, data, or profits; or business interruption) however caused
35 // and on any theory of liability, whether in contract, strict liability,
36 // or tort (including negligence or otherwise) arising in any way out of
37 // the use of this software, even if advised of the possibility of such damage.
38 //
39 //M*/
40
41
42 // This file implements the foreground/background pixel
43 // discrimination algorithm described in
44 //
45 //     Foreground Object Detection from Videos Containing Complex Background
46 //     Li, Huan, Gu, Tian 2003 9p
47 //     http://muq.org/~cynbe/bib/foreground-object-detection-from-videos-containing-complex-background.pdf 
48
49
50 #include "_cvaux.h"
51
52 #include <math.h>
53 #include <stdio.h>
54 #include <stdlib.h>
55 //#include <algorithm>
56
57 static double* _cv_max_element( double* start, double* end )
58 {
59     double* p = start++;
60
61     for( ; start != end;  ++start) {
62
63         if (*p < *start)   p = start;
64     } 
65
66     return p;
67 }
68
69 static void CV_CDECL icvReleaseFGDStatModel( CvFGDStatModel** model );
70 static int CV_CDECL icvUpdateFGDStatModel( IplImage* curr_frame,
71                                            CvFGDStatModel*  model );
72
73 // Function cvCreateFGDStatModel initializes foreground detection process
74 // parameters:
75 //      first_frame - frame from video sequence
76 //      parameters  - (optional) if NULL default parameters of the algorithm will be used
77 //      p_model     - pointer to CvFGDStatModel structure
78 CV_IMPL CvBGStatModel*
79 cvCreateFGDStatModel( IplImage* first_frame, CvFGDStatModelParams* parameters )
80 {
81     CvFGDStatModel* p_model = 0;
82     
83     CV_FUNCNAME( "cvCreateFGDStatModel" );
84
85     __BEGIN__;
86     
87     int i, j, k, pixel_count, buf_size;
88     CvFGDStatModelParams params;
89
90     if( !CV_IS_IMAGE(first_frame) )
91         CV_ERROR( CV_StsBadArg, "Invalid or NULL first_frame parameter" );
92
93     if (first_frame->nChannels != 3)
94         CV_ERROR( CV_StsBadArg, "first_frame must have 3 color channels" );
95
96     // Initialize parameters:
97     if( parameters == NULL )
98     {
99         params.Lc      = CV_BGFG_FGD_LC;
100         params.N1c     = CV_BGFG_FGD_N1C;
101         params.N2c     = CV_BGFG_FGD_N2C;
102
103         params.Lcc     = CV_BGFG_FGD_LCC;
104         params.N1cc    = CV_BGFG_FGD_N1CC;
105         params.N2cc    = CV_BGFG_FGD_N2CC;
106
107         params.delta   = CV_BGFG_FGD_DELTA;
108
109         params.alpha1  = CV_BGFG_FGD_ALPHA_1;
110         params.alpha2  = CV_BGFG_FGD_ALPHA_2;
111         params.alpha3  = CV_BGFG_FGD_ALPHA_3;
112
113         params.T       = CV_BGFG_FGD_T;
114         params.minArea = CV_BGFG_FGD_MINAREA;
115
116         params.is_obj_without_holes = 1;
117         params.perform_morphing     = 1;
118     }
119     else
120     {
121         params = *parameters;
122     }
123
124     CV_CALL( p_model = (CvFGDStatModel*)cvAlloc( sizeof(*p_model) ));
125     memset( p_model, 0, sizeof(*p_model) );
126     p_model->type = CV_BG_MODEL_FGD;
127     p_model->release = (CvReleaseBGStatModel)icvReleaseFGDStatModel;
128     p_model->update = (CvUpdateBGStatModel)icvUpdateFGDStatModel;;
129     p_model->params = params;
130
131     // Initialize storage pools:
132     pixel_count = first_frame->width * first_frame->height;
133     
134     buf_size = pixel_count*sizeof(p_model->pixel_stat[0]);
135     CV_CALL( p_model->pixel_stat = (CvBGPixelStat*)cvAlloc(buf_size) );
136     memset( p_model->pixel_stat, 0, buf_size );
137     
138     buf_size = pixel_count*params.N2c*sizeof(p_model->pixel_stat[0].ctable[0]);
139     CV_CALL( p_model->pixel_stat[0].ctable = (CvBGPixelCStatTable*)cvAlloc(buf_size) );
140     memset( p_model->pixel_stat[0].ctable, 0, buf_size );
141
142     buf_size = pixel_count*params.N2cc*sizeof(p_model->pixel_stat[0].cctable[0]);
143     CV_CALL( p_model->pixel_stat[0].cctable = (CvBGPixelCCStatTable*)cvAlloc(buf_size) );
144     memset( p_model->pixel_stat[0].cctable, 0, buf_size );
145
146     for(     i = 0, k = 0; i < first_frame->height; i++ ) {
147         for( j = 0;        j < first_frame->width;  j++, k++ )
148         {
149             p_model->pixel_stat[k].ctable = p_model->pixel_stat[0].ctable + k*params.N2c;
150             p_model->pixel_stat[k].cctable = p_model->pixel_stat[0].cctable + k*params.N2cc;
151         }
152     }
153
154     // Init temporary images:
155     CV_CALL( p_model->Ftd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
156     CV_CALL( p_model->Fbd = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
157     CV_CALL( p_model->foreground = cvCreateImage(cvSize(first_frame->width, first_frame->height), IPL_DEPTH_8U, 1));
158
159     CV_CALL( p_model->background = cvCloneImage(first_frame));
160     CV_CALL( p_model->prev_frame = cvCloneImage(first_frame));
161     CV_CALL( p_model->storage = cvCreateMemStorage());
162
163     __END__;
164
165     if( cvGetErrStatus() < 0 )
166     {
167         CvBGStatModel* base_ptr = (CvBGStatModel*)p_model;
168
169         if( p_model && p_model->release )
170             p_model->release( &base_ptr );
171         else
172             cvFree( &p_model );
173         p_model = 0;
174     }
175
176     return (CvBGStatModel*)p_model;
177 }
178
179
180 static void CV_CDECL
181 icvReleaseFGDStatModel( CvFGDStatModel** _model )
182 {
183     CV_FUNCNAME( "icvReleaseFGDStatModel" );
184
185     __BEGIN__;
186     
187     if( !_model )
188         CV_ERROR( CV_StsNullPtr, "" );
189
190     if( *_model )
191     {
192         CvFGDStatModel* model = *_model;
193         if( model->pixel_stat )
194         {
195             cvFree( &model->pixel_stat[0].ctable );
196             cvFree( &model->pixel_stat[0].cctable );
197             cvFree( &model->pixel_stat );
198         }
199
200         cvReleaseImage( &model->Ftd );
201         cvReleaseImage( &model->Fbd );
202         cvReleaseImage( &model->foreground );
203         cvReleaseImage( &model->background );
204         cvReleaseImage( &model->prev_frame );
205         cvReleaseMemStorage(&model->storage);
206
207         cvFree( _model );
208     }
209
210     __END__;
211 }
212
213 //  Function cvChangeDetection performs change detection for Foreground detection algorithm
214 // parameters:
215 //      prev_frame -
216 //      curr_frame -
217 //      change_mask -
218 CV_IMPL int
219 cvChangeDetection( IplImage*  prev_frame,
220                    IplImage*  curr_frame,
221                    IplImage*  change_mask )
222 {
223     int i, j, b, x, y, thres;
224     const int PIXELRANGE=256;
225
226     if( !prev_frame
227     ||  !curr_frame
228     ||  !change_mask
229     ||   prev_frame->nChannels  != 3
230     ||   curr_frame->nChannels  != 3
231     ||   change_mask->nChannels != 1
232     ||   prev_frame->depth  != IPL_DEPTH_8U
233     ||   curr_frame->depth  != IPL_DEPTH_8U
234     ||   change_mask->depth != IPL_DEPTH_8U
235     ||   prev_frame->width  != curr_frame->width
236     ||   prev_frame->height != curr_frame->height
237     ||   prev_frame->width  != change_mask->width
238     ||   prev_frame->height != change_mask->height
239     ){
240         return 0;
241     }
242
243     cvZero ( change_mask );
244
245     // All operations per colour
246     for (b=0 ; b<prev_frame->nChannels ; b++) {
247
248         // Create histogram:
249
250         long HISTOGRAM[PIXELRANGE]; 
251         for (i=0 ; i<PIXELRANGE; i++) HISTOGRAM[i]=0;
252         
253         for (y=0 ; y<curr_frame->height ; y++)
254         {
255             uchar* rowStart1 = (uchar*)curr_frame->imageData + y * curr_frame->widthStep + b;
256             uchar* rowStart2 = (uchar*)prev_frame->imageData + y * prev_frame->widthStep + b;
257             for (x=0 ; x<curr_frame->width ; x++, rowStart1+=curr_frame->nChannels, rowStart2+=prev_frame->nChannels) {
258                 int diff = abs( int(*rowStart1) - int(*rowStart2) );
259                 HISTOGRAM[diff]++;
260             }
261         }
262
263         double relativeVariance[PIXELRANGE];
264         for (i=0 ; i<PIXELRANGE; i++) relativeVariance[i]=0;
265
266         for (thres=PIXELRANGE-2; thres>=0 ; thres--)
267         {
268             //            fprintf(stderr, "Iter %d\n", thres);
269             double sum=0;
270             double sqsum=0;
271             int count=0;
272             //            fprintf(stderr, "Iter %d entering loop\n", thres);
273             for (j=thres ; j<PIXELRANGE ; j++) {
274                 sum   += double(j)*double(HISTOGRAM[j]);
275                 sqsum += double(j*j)*double(HISTOGRAM[j]);
276                 count += HISTOGRAM[j];
277             }
278             count = count == 0 ? 1 : count;
279             //            fprintf(stderr, "Iter %d finishing loop\n", thres);
280             double my = sum / count;
281             double sigma = sqrt( sqsum/count - my*my);
282             //            fprintf(stderr, "Iter %d sum=%g sqsum=%g count=%d sigma = %g\n", thres, sum, sqsum, count, sigma);
283             //            fprintf(stderr, "Writing to %x\n", &(relativeVariance[thres]));
284             relativeVariance[thres] = sigma;
285             //            fprintf(stderr, "Iter %d finished\n", thres);
286         }
287
288         // Find maximum:
289         uchar bestThres = 0;
290
291         double* pBestThres = _cv_max_element(relativeVariance, relativeVariance+PIXELRANGE);
292         bestThres = (uchar)(*pBestThres); if (bestThres <10) bestThres=10;
293
294         for (y=0 ; y<prev_frame->height ; y++)
295         {
296             uchar* rowStart1 = (uchar*)(curr_frame->imageData) + y * curr_frame->widthStep + b;
297             uchar* rowStart2 = (uchar*)(prev_frame->imageData) + y * prev_frame->widthStep + b;
298             uchar* rowStart3 = (uchar*)(change_mask->imageData) + y * change_mask->widthStep;
299             for (x = 0; x < curr_frame->width; x++, rowStart1+=curr_frame->nChannels,
300                 rowStart2+=prev_frame->nChannels, rowStart3+=change_mask->nChannels) {
301                 // OR between different color channels
302                 int diff = abs( int(*rowStart1) - int(*rowStart2) );
303                 if ( diff > bestThres)
304                     *rowStart3 |=255;
305             }
306         }
307     }
308
309     return 1;
310 }
311
312
313 #define MIN_PV 1E-10
314
315
316 #define V_C(k,l) ctable[k].v[l]
317 #define PV_C(k) ctable[k].Pv
318 #define PVB_C(k) ctable[k].Pvb
319 #define V_CC(k,l) cctable[k].v[l]
320 #define PV_CC(k) cctable[k].Pv
321 #define PVB_CC(k) cctable[k].Pvb
322
323
324 // Function cvUpdateFGDStatModel updates statistical model and returns number of foreground regions
325 // parameters:
326 //      curr_frame  - current frame from video sequence
327 //      p_model     - pointer to CvFGDStatModel structure
328 static int CV_CDECL
329 icvUpdateFGDStatModel( IplImage* curr_frame, CvFGDStatModel*  model )
330 {
331     int            mask_step = model->Ftd->widthStep;
332     CvSeq         *first_seq = NULL, *prev_seq = NULL, *seq = NULL;
333     IplImage*      prev_frame = model->prev_frame;
334     int            region_count = 0;
335     int            FG_pixels_count = 0;
336     int            deltaC  = cvRound(model->params.delta * 256 / model->params.Lc);
337     int            deltaCC = cvRound(model->params.delta * 256 / model->params.Lcc);
338     int            i, j, k, l;
339
340     //clear storages
341     cvClearMemStorage(model->storage);
342     cvZero(model->foreground);
343
344     // From foreground pixel candidates using image differencing
345     // with adaptive thresholding.  The algorithm is from:
346     //
347     //    Thresholding for Change Detection
348     //    Paul L. Rosin 1998 6p
349     //    http://www.cis.temple.edu/~latecki/Courses/CIS750-03/Papers/thresh-iccv.pdf
350     //
351     cvChangeDetection( prev_frame, curr_frame, model->Ftd );
352     cvChangeDetection( model->background, curr_frame, model->Fbd );
353
354     for( i = 0; i < model->Ftd->height; i++ )
355     {
356         for( j = 0; j < model->Ftd->width; j++ )
357         {
358             if( ((uchar*)model->Fbd->imageData)[i*mask_step+j] || ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
359             {
360                 float Pb  = 0;
361                 float Pv  = 0;
362                 float Pvb = 0;
363
364                 CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
365
366                 CvBGPixelCStatTable*   ctable = stat->ctable;
367                 CvBGPixelCCStatTable* cctable = stat->cctable;
368     
369                 uchar* curr_data = (uchar*)(curr_frame->imageData) + i*curr_frame->widthStep + j*3;
370                 uchar* prev_data = (uchar*)(prev_frame->imageData) + i*prev_frame->widthStep + j*3;
371
372                 int val = 0;
373
374                 // Is it a motion pixel?
375                 if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] )
376                 {
377                     if( !stat->is_trained_dyn_model ) {
378
379                         val = 1;
380
381                     } else {
382
383                         // Compare with stored CCt vectors:
384                         for( k = 0;  PV_CC(k) > model->params.alpha2 && k < model->params.N1cc;  k++ )
385                         {
386                             if ( abs( V_CC(k,0) - prev_data[0]) <=  deltaCC &&
387                                  abs( V_CC(k,1) - prev_data[1]) <=  deltaCC &&
388                                  abs( V_CC(k,2) - prev_data[2]) <=  deltaCC &&
389                                  abs( V_CC(k,3) - curr_data[0]) <=  deltaCC &&
390                                  abs( V_CC(k,4) - curr_data[1]) <=  deltaCC &&
391                                  abs( V_CC(k,5) - curr_data[2]) <=  deltaCC)
392                             {
393                                 Pv += PV_CC(k);
394                                 Pvb += PVB_CC(k);
395                             }
396                         }
397                         Pb = stat->Pbcc;
398                         if( 2 * Pvb * Pb <= Pv ) val = 1;
399                     }
400                 }
401                 else if( stat->is_trained_st_model )
402                 {
403                     // Compare with stored Ct vectors:
404                     for( k = 0;  PV_C(k) > model->params.alpha2 && k < model->params.N1c;  k++ )
405                     {
406                         if ( abs( V_C(k,0) - curr_data[0]) <=  deltaC &&
407                              abs( V_C(k,1) - curr_data[1]) <=  deltaC &&
408                              abs( V_C(k,2) - curr_data[2]) <=  deltaC )
409                         {
410                             Pv += PV_C(k);
411                             Pvb += PVB_C(k);
412                         }
413                     }
414                     Pb = stat->Pbc;
415                     if( 2 * Pvb * Pb <= Pv ) val = 1;
416                 }
417
418                 // Update foreground:
419                 ((uchar*)model->foreground->imageData)[i*mask_step+j] = (uchar)(val*255);
420                 FG_pixels_count += val;
421
422             }           // end if( change detection...
423         }               // for j...
424     }                   // for i...
425     //end BG/FG classification
426
427     // Foreground segmentation.
428     // Smooth foreground map:
429     if( model->params.perform_morphing ){
430         cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_OPEN,  model->params.perform_morphing );
431         cvMorphologyEx( model->foreground, model->foreground, 0, 0, CV_MOP_CLOSE, model->params.perform_morphing );
432     }
433    
434    
435     if( model->params.minArea > 0 || model->params.is_obj_without_holes ){
436
437         // Discard under-size foreground regions:
438         //
439         cvFindContours( model->foreground, model->storage, &first_seq, sizeof(CvContour), CV_RETR_LIST );
440         for( seq = first_seq; seq; seq = seq->h_next )
441         {
442             CvContour* cnt = (CvContour*)seq;
443             if( cnt->rect.width * cnt->rect.height < model->params.minArea || 
444                 (model->params.is_obj_without_holes && CV_IS_SEQ_HOLE(seq)) )
445             {
446                 // Delete under-size contour:
447                 prev_seq = seq->h_prev;
448                 if( prev_seq )
449                 {
450                     prev_seq->h_next = seq->h_next;
451                     if( seq->h_next ) seq->h_next->h_prev = prev_seq;
452                 }
453                 else
454                 {
455                     first_seq = seq->h_next;
456                     if( seq->h_next ) seq->h_next->h_prev = NULL;
457                 }
458             }
459             else
460             {
461                 region_count++;
462             }
463         }        
464         model->foreground_regions = first_seq;
465         cvZero(model->foreground);
466         cvDrawContours(model->foreground, first_seq, CV_RGB(0, 0, 255), CV_RGB(0, 0, 255), 10, -1);
467
468     } else {
469
470         model->foreground_regions = NULL;
471     }
472
473     // Check ALL BG update condition:
474     if( ((float)FG_pixels_count/(model->Ftd->width*model->Ftd->height)) > CV_BGFG_FGD_BG_UPDATE_TRESH )
475     {
476          for( i = 0; i < model->Ftd->height; i++ )
477              for( j = 0; j < model->Ftd->width; j++ )
478              {
479                  CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
480                  stat->is_trained_st_model = stat->is_trained_dyn_model = 1;
481              }
482     }
483
484
485     // Update background model:
486     for( i = 0; i < model->Ftd->height; i++ )
487     {
488         for( j = 0; j < model->Ftd->width; j++ )
489         {
490             CvBGPixelStat* stat = model->pixel_stat + i * model->Ftd->width + j;
491             CvBGPixelCStatTable* ctable = stat->ctable;
492             CvBGPixelCCStatTable* cctable = stat->cctable;
493
494             uchar *curr_data = (uchar*)(curr_frame->imageData)+i*curr_frame->widthStep+j*3;
495             uchar *prev_data = (uchar*)(prev_frame->imageData)+i*prev_frame->widthStep+j*3;
496
497             if( ((uchar*)model->Ftd->imageData)[i*mask_step+j] || !stat->is_trained_dyn_model )
498             {
499                 float alpha = stat->is_trained_dyn_model ? model->params.alpha2 : model->params.alpha3;
500                 float diff = 0;
501                 int dist, min_dist = 2147483647, indx = -1;
502
503                 //update Pb
504                 stat->Pbcc *= (1.f-alpha);
505                 if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
506                 {
507                     stat->Pbcc += alpha;
508                 }
509
510                 // Find best Vi match:
511                 for(k = 0; PV_CC(k) && k < model->params.N2cc; k++ )
512                 {
513                     // Exponential decay of memory
514                     PV_CC(k)  *= (1-alpha);
515                     PVB_CC(k) *= (1-alpha);
516                     if( PV_CC(k) < MIN_PV )
517                     {
518                         PV_CC(k) = 0;
519                         PVB_CC(k) = 0;
520                         continue;
521                     }
522
523                     dist = 0;
524                     for( l = 0; l < 3; l++ )
525                     {
526                         int val = abs( V_CC(k,l) - prev_data[l] );
527                         if( val > deltaCC ) break;
528                         dist += val;
529                         val = abs( V_CC(k,l+3) - curr_data[l] );
530                         if( val > deltaCC) break;
531                         dist += val;
532                     }
533                     if( l == 3 && dist < min_dist )
534                     {
535                         min_dist = dist;
536                         indx = k;
537                     }
538                 }
539
540
541                 if( indx < 0 )
542                 {   // Replace N2th elem in the table by new feature:
543                     indx = model->params.N2cc - 1;
544                     PV_CC(indx) = alpha;
545                     PVB_CC(indx) = alpha;
546                     //udate Vt
547                     for( l = 0; l < 3; l++ )
548                     {
549                         V_CC(indx,l) = prev_data[l];
550                         V_CC(indx,l+3) = curr_data[l];
551                     }
552                 }
553                 else
554                 {   // Update:
555                     PV_CC(indx) += alpha;
556                     if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
557                     {
558                         PVB_CC(indx) += alpha;
559                     }
560                 }
561
562                 //re-sort CCt table by Pv
563                 for( k = 0; k < indx; k++ )
564                 {
565                     if( PV_CC(k) <= PV_CC(indx) )
566                     {
567                         //shift elements
568                         CvBGPixelCCStatTable tmp1, tmp2 = cctable[indx];
569                         for( l = k; l <= indx; l++ )
570                         {
571                             tmp1 = cctable[l];
572                             cctable[l] = tmp2;
573                             tmp2 = tmp1;
574                         }
575                         break;
576                     }
577                 }
578
579
580                 float sum1=0, sum2=0;
581                 //check "once-off" changes
582                 for(k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
583                 {
584                     sum1 += PV_CC(k);
585                     sum2 += PVB_CC(k);
586                 }
587                 if( sum1 > model->params.T ) stat->is_trained_dyn_model = 1;
588                 
589                 diff = sum1 - stat->Pbcc * sum2;
590                 // Update stat table:
591                 if( diff >  model->params.T )
592                 {
593                     //printf("once off change at motion mode\n");
594                     //new BG features are discovered
595                     for( k = 0; PV_CC(k) && k < model->params.N1cc; k++ )
596                     {
597                         PVB_CC(k) =
598                             (PV_CC(k)-stat->Pbcc*PVB_CC(k))/(1-stat->Pbcc);
599                     }
600                     assert(stat->Pbcc<=1 && stat->Pbcc>=0);
601                 }
602             }
603
604             // Handle "stationary" pixel:
605             if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] )
606             {
607                 float alpha = stat->is_trained_st_model ? model->params.alpha2 : model->params.alpha3;
608                 float diff = 0;
609                 int dist, min_dist = 2147483647, indx = -1;
610
611                 //update Pb
612                 stat->Pbc *= (1.f-alpha);
613                 if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
614                 {
615                     stat->Pbc += alpha;
616                 }
617
618                 //find best Vi match
619                 for( k = 0; k < model->params.N2c; k++ )
620                 {
621                     // Exponential decay of memory
622                     PV_C(k) *= (1-alpha);
623                     PVB_C(k) *= (1-alpha);
624                     if( PV_C(k) < MIN_PV )
625                     {
626                         PV_C(k) = 0;
627                         PVB_C(k) = 0;
628                         continue;
629                     }
630                     
631                     dist = 0;
632                     for( l = 0; l < 3; l++ )
633                     {
634                         int val = abs( V_C(k,l) - curr_data[l] );
635                         if( val > deltaC ) break;
636                         dist += val;
637                     }
638                     if( l == 3 && dist < min_dist )
639                     {
640                         min_dist = dist;
641                         indx = k;
642                     }
643                 }
644
645                 if( indx < 0 )
646                 {//N2th elem in the table is replaced by a new features
647                     indx = model->params.N2c - 1;
648                     PV_C(indx) = alpha;
649                     PVB_C(indx) = alpha;
650                     //udate Vt
651                     for( l = 0; l < 3; l++ )
652                     {
653                         V_C(indx,l) = curr_data[l];
654                     }
655                 } else
656                 {//update
657                     PV_C(indx) += alpha;
658                     if( !((uchar*)model->foreground->imageData)[i*mask_step+j] )
659                     {
660                         PVB_C(indx) += alpha;
661                     }
662                 }
663
664                 //re-sort Ct table by Pv
665                 for( k = 0; k < indx; k++ )
666                 {
667                     if( PV_C(k) <= PV_C(indx) )
668                     {
669                         //shift elements
670                         CvBGPixelCStatTable tmp1, tmp2 = ctable[indx];
671                         for( l = k; l <= indx; l++ )
672                         {
673                             tmp1 = ctable[l];
674                             ctable[l] = tmp2;
675                             tmp2 = tmp1;
676                         }
677                         break;
678                     }
679                 }
680
681                 // Check "once-off" changes:
682                 float sum1=0, sum2=0;
683                 for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
684                 {
685                     sum1 += PV_C(k);
686                     sum2 += PVB_C(k);
687                 }
688                 diff = sum1 - stat->Pbc * sum2;
689                 if( sum1 > model->params.T ) stat->is_trained_st_model = 1;
690
691                 // Update stat table:
692                 if( diff >  model->params.T )
693                 {
694                     //printf("once off change at stat mode\n");
695                     //new BG features are discovered
696                     for( k = 0; PV_C(k) && k < model->params.N1c; k++ )
697                     {
698                         PVB_C(k) = (PV_C(k)-stat->Pbc*PVB_C(k))/(1-stat->Pbc);
699                     }
700                     stat->Pbc = 1 - stat->Pbc;
701                 }
702             }           // if !(change detection) at pixel (i,j)
703
704             // Update the reference BG image:
705             if( !((uchar*)model->foreground->imageData)[i*mask_step+j])
706             {
707                 uchar* ptr = ((uchar*)model->background->imageData) + i*model->background->widthStep+j*3;
708                 
709                 if( !((uchar*)model->Ftd->imageData)[i*mask_step+j] &&
710                     !((uchar*)model->Fbd->imageData)[i*mask_step+j] )
711                 {
712                     // Apply IIR filter:
713                     for( l = 0; l < 3; l++ )
714                     {
715                         int a = cvRound(ptr[l]*(1 - model->params.alpha1) + model->params.alpha1*curr_data[l]);
716                         ptr[l] = (uchar)a;
717                         //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l]*=(1 - model->params.alpha1);
718                         //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] += model->params.alpha1*curr_data[l];
719                     }
720                 }
721                 else
722                 {
723                     // Background change detected:
724                     for( l = 0; l < 3; l++ )
725                     {
726                         //((uchar*)model->background->imageData)[i*model->background->widthStep+j*3+l] = curr_data[l];
727                         ptr[l] = curr_data[l];
728                     }
729                 }
730             }
731         }               // j
732     }                   // i
733
734     // Keep previous frame:
735     cvCopy( curr_frame, model->prev_frame );
736
737     return region_count;
738 }
739
740 /* End of file. */