Fawkes API  Fawkes Development Version
pf.c
00001 
00002 /***************************************************************************
00003  *  pf.c: Simple particle filter for localization
00004  *
00005  *  Created: Wed May 16 16:04:41 2012
00006  *  Copyright  2000  Brian Gerkey
00007  *             2000  Kasper Stoy
00008  *             2012  Tim Niemueller [www.niemueller.de]
00009  ****************************************************************************/
00010 
00011 /*  This program is free software; you can redistribute it and/or modify
00012  *  it under the terms of the GNU General Public License as published by
00013  *  the Free Software Foundation; either version 2 of the License, or
00014  *  (at your option) any later version.
00015  *
00016  *  This program is distributed in the hope that it will be useful,
00017  *  but WITHOUT ANY WARRANTY; without even the implied warranty of
00018  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00019  *  GNU Library General Public License for more details.
00020  *
00021  *  Read the full text in the LICENSE.GPL file in the doc directory.
00022  */
00023 
00024 /*  From:
00025  *  Player - One Hell of a Robot Server (LGPL)
00026  *  Copyright (C) 2000  Brian Gerkey   &  Kasper Stoy
00027  *                      gerkey@usc.edu    kaspers@robotics.usc.edu
00028  */
00029 /**************************************************************************
00030  * Desc: Simple particle filter for localization.
00031  * Author: Andrew Howard
00032  * Date: 10 Dec 2002
00033  *************************************************************************/
00034 
00035 #include <assert.h>
00036 #include <math.h>
00037 #include <stdlib.h>
00038 #include <time.h>
00039 
00040 #include "pf.h"
00041 #include "pf_pdf.h"
00042 #include "pf_kdtree.h"
00043 
00044 /// @cond EXTERNAL
00045 
00046 // Compute the required number of samples, given that there are k bins
00047 // with samples in them.
00048 static int pf_resample_limit(pf_t *pf, int k);
00049 
00050 // Re-compute the cluster statistics for a sample set
00051 static void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set);
00052 
00053 
00054 // Create a new filter
00055 pf_t *pf_alloc(int min_samples, int max_samples,
00056                double alpha_slow, double alpha_fast,
00057                pf_init_model_fn_t random_pose_fn, void *random_pose_data)
00058 {
00059   int i, j;
00060   pf_t *pf;
00061   pf_sample_set_t *set;
00062   pf_sample_t *sample;
00063   
00064   srand48(time(NULL));
00065 
00066   pf = calloc(1, sizeof(pf_t));
00067 
00068   pf->random_pose_fn = random_pose_fn;
00069   pf->random_pose_data = random_pose_data;
00070 
00071   pf->min_samples = min_samples;
00072   pf->max_samples = max_samples;
00073 
00074   // Control parameters for the population size calculation.  [err] is
00075   // the max error between the true distribution and the estimated
00076   // distribution.  [z] is the upper standard normal quantile for (1 -
00077   // p), where p is the probability that the error on the estimated
00078   // distrubition will be less than [err].
00079   pf->pop_err = 0.01;
00080   pf->pop_z = 3;
00081   
00082   pf->current_set = 0;
00083   for (j = 0; j < 2; j++)
00084   {
00085     set = pf->sets + j;
00086       
00087     set->sample_count = max_samples;
00088     set->samples = calloc(max_samples, sizeof(pf_sample_t));
00089 
00090     for (i = 0; i < set->sample_count; i++)
00091     {
00092       sample = set->samples + i;
00093       sample->pose.v[0] = 0.0;
00094       sample->pose.v[1] = 0.0;
00095       sample->pose.v[2] = 0.0;
00096       sample->weight = 1.0 / max_samples;
00097     }
00098 
00099     // HACK: is 3 times max_samples enough?
00100     set->kdtree = pf_kdtree_alloc(3 * max_samples);
00101 
00102     set->cluster_count = 0;
00103     set->cluster_max_count = max_samples;
00104     set->clusters = calloc(set->cluster_max_count, sizeof(pf_cluster_t));
00105 
00106     set->mean = pf_vector_zero();
00107     set->cov = pf_matrix_zero();
00108   }
00109 
00110   pf->w_slow = 0.0;
00111   pf->w_fast = 0.0;
00112 
00113   pf->alpha_slow = alpha_slow;
00114   pf->alpha_fast = alpha_fast;
00115 
00116   return pf;
00117 }
00118 
00119 
00120 // Free an existing filter
00121 void pf_free(pf_t *pf)
00122 {
00123   int i;
00124   
00125   for (i = 0; i < 2; i++)
00126   {
00127     free(pf->sets[i].clusters);
00128     pf_kdtree_free(pf->sets[i].kdtree);
00129     free(pf->sets[i].samples);
00130   }
00131   free(pf);
00132   
00133   return;
00134 }
00135 
00136 
00137 // Initialize the filter using a guassian
00138 void pf_init(pf_t *pf, pf_vector_t mean, pf_matrix_t cov)
00139 {
00140   int i;
00141   pf_sample_set_t *set;
00142   pf_sample_t *sample;
00143   pf_pdf_gaussian_t *pdf;
00144   
00145   set = pf->sets + pf->current_set;
00146   
00147   // Create the kd tree for adaptive sampling
00148   pf_kdtree_clear(set->kdtree);
00149 
00150   set->sample_count = pf->max_samples;
00151 
00152   pdf = pf_pdf_gaussian_alloc(mean, cov);
00153     
00154   // Compute the new sample poses
00155   for (i = 0; i < set->sample_count; i++)
00156   {
00157     sample = set->samples + i;
00158     sample->weight = 1.0 / pf->max_samples;
00159     sample->pose = pf_pdf_gaussian_sample(pdf);
00160 
00161     // Add sample to histogram
00162     pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
00163   }
00164 
00165   pf->w_slow = pf->w_fast = 0.0;
00166 
00167   pf_pdf_gaussian_free(pdf);
00168     
00169   // Re-compute cluster statistics
00170   pf_cluster_stats(pf, set);
00171 
00172   return;
00173 }
00174 
00175 
00176 // Initialize the filter using some model
00177 void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data)
00178 {
00179   int i;
00180   pf_sample_set_t *set;
00181   pf_sample_t *sample;
00182 
00183   set = pf->sets + pf->current_set;
00184 
00185   // Create the kd tree for adaptive sampling
00186   pf_kdtree_clear(set->kdtree);
00187 
00188   set->sample_count = pf->max_samples;
00189 
00190   // Compute the new sample poses
00191   for (i = 0; i < set->sample_count; i++)
00192   {
00193     sample = set->samples + i;
00194     sample->weight = 1.0 / pf->max_samples;
00195     sample->pose = (*init_fn) (init_data);
00196 
00197     // Add sample to histogram
00198     pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
00199   }
00200 
00201   pf->w_slow = pf->w_fast = 0.0;
00202 
00203   // Re-compute cluster statistics
00204   pf_cluster_stats(pf, set);
00205   
00206   return;
00207 }
00208 
00209 
00210 // Update the filter with some new action
00211 void pf_update_action(pf_t *pf, pf_action_model_fn_t action_fn, void *action_data)
00212 {
00213   pf_sample_set_t *set;
00214 
00215   set = pf->sets + pf->current_set;
00216 
00217   (*action_fn) (action_data, set);
00218   
00219   return;
00220 }
00221 
00222 
00223 #include <float.h>
00224 // Update the filter with some new sensor observation
00225 void pf_update_sensor(pf_t *pf, pf_sensor_model_fn_t sensor_fn, void *sensor_data)
00226 {
00227   int i;
00228   pf_sample_set_t *set;
00229   pf_sample_t *sample;
00230   double total;
00231 
00232   set = pf->sets + pf->current_set;
00233 
00234   // Compute the sample weights
00235   total = (*sensor_fn) (sensor_data, set);
00236   
00237   if (total > 0.0)
00238   {
00239     // Normalize weights
00240     double w_avg=0.0;
00241     for (i = 0; i < set->sample_count; i++)
00242     {
00243       sample = set->samples + i;
00244       w_avg += sample->weight;
00245       sample->weight /= total;
00246     }
00247     // Update running averages of likelihood of samples (Prob Rob p258)
00248     w_avg /= set->sample_count;
00249     if(pf->w_slow == 0.0)
00250       pf->w_slow = w_avg;
00251     else
00252       pf->w_slow += pf->alpha_slow * (w_avg - pf->w_slow);
00253     if(pf->w_fast == 0.0)
00254       pf->w_fast = w_avg;
00255     else
00256       pf->w_fast += pf->alpha_fast * (w_avg - pf->w_fast);
00257     //printf("w_avg: %e slow: %e fast: %e\n", 
00258            //w_avg, pf->w_slow, pf->w_fast);
00259   }
00260   else
00261   {
00262     //PLAYER_WARN("pdf has zero probability");
00263 
00264     // Handle zero total
00265     for (i = 0; i < set->sample_count; i++)
00266     {
00267       sample = set->samples + i;
00268       sample->weight = 1.0 / set->sample_count;
00269     }
00270   }
00271 
00272   return;
00273 }
00274 
00275 
00276 // Resample the distribution
00277 void pf_update_resample(pf_t *pf)
00278 {
00279   int i;
00280   double total;
00281   pf_sample_set_t *set_a, *set_b;
00282   pf_sample_t *sample_a, *sample_b;
00283 
00284   //double r,c,U;
00285   //int m;
00286   //double count_inv;
00287   double* c;
00288 
00289   double w_diff;
00290 
00291   set_a = pf->sets + pf->current_set;
00292   set_b = pf->sets + (pf->current_set + 1) % 2;
00293 
00294   // Build up cumulative probability table for resampling.
00295   // TODO: Replace this with a more efficient procedure
00296   // (e.g., http://www.network-theory.co.uk/docs/gslref/GeneralDiscreteDistributions.html)
00297   c = (double*)malloc(sizeof(double)*(set_a->sample_count+1));
00298   c[0] = 0.0;
00299   for(i=0;i<set_a->sample_count;i++)
00300     c[i+1] = c[i]+set_a->samples[i].weight;
00301 
00302   // Create the kd tree for adaptive sampling
00303   pf_kdtree_clear(set_b->kdtree);
00304   
00305   // Draw samples from set a to create set b.
00306   total = 0;
00307   set_b->sample_count = 0;
00308 
00309   w_diff = 1.0 - pf->w_fast / pf->w_slow;
00310   if(w_diff < 0.0)
00311     w_diff = 0.0;
00312   //printf("w_diff: %9.6f\n", w_diff);
00313 
00314   // Can't (easily) combine low-variance sampler with KLD adaptive
00315   // sampling, so we'll take the more traditional route.
00316   /*
00317   // Low-variance resampler, taken from Probabilistic Robotics, p110
00318   count_inv = 1.0/set_a->sample_count;
00319   r = drand48() * count_inv;
00320   c = set_a->samples[0].weight;
00321   i = 0;
00322   m = 0;
00323   */
00324   while(set_b->sample_count < pf->max_samples)
00325   {
00326     sample_b = set_b->samples + set_b->sample_count++;
00327 
00328     if(drand48() < w_diff)
00329       sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data);
00330     else
00331     {
00332       // Can't (easily) combine low-variance sampler with KLD adaptive
00333       // sampling, so we'll take the more traditional route.
00334       /*
00335       // Low-variance resampler, taken from Probabilistic Robotics, p110
00336       U = r + m * count_inv;
00337       while(U>c)
00338       {
00339         i++;
00340         // Handle wrap-around by resetting counters and picking a new random
00341         // number
00342         if(i >= set_a->sample_count)
00343         {
00344           r = drand48() * count_inv;
00345           c = set_a->samples[0].weight;
00346           i = 0;
00347           m = 0;
00348           U = r + m * count_inv;
00349           continue;
00350         }
00351         c += set_a->samples[i].weight;
00352       }
00353       m++;
00354       */
00355 
00356       // Naive discrete event sampler
00357       double r;
00358       r = drand48();
00359       for(i=0;i<set_a->sample_count;i++)
00360       {
00361         if((c[i] <= r) && (r < c[i+1]))
00362           break;
00363       }
00364       assert(i<set_a->sample_count);
00365 
00366       sample_a = set_a->samples + i;
00367 
00368       assert(sample_a->weight > 0);
00369 
00370       // Add sample to list
00371       sample_b->pose = sample_a->pose;
00372     }
00373 
00374     sample_b->weight = 1.0;
00375     total += sample_b->weight;
00376 
00377     // Add sample to histogram
00378     pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
00379 
00380     // See if we have enough samples yet
00381     if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
00382       break;
00383   }
00384   
00385   // Reset averages, to avoid spiraling off into complete randomness.
00386   if(w_diff > 0.0)
00387     pf->w_slow = pf->w_fast = 0.0;
00388 
00389   //fprintf(stderr, "\n\n");
00390 
00391   // Normalize weights
00392   for (i = 0; i < set_b->sample_count; i++)
00393   {
00394     sample_b = set_b->samples + i;
00395     sample_b->weight /= total;
00396   }
00397   
00398   // Re-compute cluster statistics
00399   pf_cluster_stats(pf, set_b);
00400 
00401   // Use the newly created sample set
00402   pf->current_set = (pf->current_set + 1) % 2;
00403 
00404   free(c);
00405   return;
00406 }
00407 
00408 
00409 // Compute the required number of samples, given that there are k bins
00410 // with samples in them.  This is taken directly from Fox et al.
00411 int pf_resample_limit(pf_t *pf, int k)
00412 {
00413   double a, b, c, x;
00414   int n;
00415 
00416   if (k <= 1)
00417     return pf->max_samples;
00418 
00419   a = 1;
00420   b = 2 / (9 * ((double) k - 1));
00421   c = sqrt(2 / (9 * ((double) k - 1))) * pf->pop_z;
00422   x = a - b + c;
00423 
00424   n = (int) ceil((k - 1) / (2 * pf->pop_err) * x * x * x);
00425 
00426   if (n < pf->min_samples)
00427     return pf->min_samples;
00428   if (n > pf->max_samples)
00429     return pf->max_samples;
00430   
00431   return n;
00432 }
00433 
00434 
00435 // Re-compute the cluster statistics for a sample set
00436 void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set)
00437 {
00438   int i, j, k, cidx;
00439   pf_sample_t *sample;
00440   pf_cluster_t *cluster;
00441   
00442   // Workspace
00443   double m[4], c[2][2];
00444   size_t count;
00445   double weight;
00446 
00447   // Cluster the samples
00448   pf_kdtree_cluster(set->kdtree);
00449   
00450   // Initialize cluster stats
00451   set->cluster_count = 0;
00452 
00453   for (i = 0; i < set->cluster_max_count; i++)
00454   {
00455     cluster = set->clusters + i;
00456     cluster->count = 0;
00457     cluster->weight = 0;
00458     cluster->mean = pf_vector_zero();
00459     cluster->cov = pf_matrix_zero();
00460 
00461     for (j = 0; j < 4; j++)
00462       cluster->m[j] = 0.0;
00463     for (j = 0; j < 2; j++)
00464       for (k = 0; k < 2; k++)
00465         cluster->c[j][k] = 0.0;
00466   }
00467 
00468   // Initialize overall filter stats
00469   count = 0;
00470   weight = 0.0;
00471   set->mean = pf_vector_zero();
00472   set->cov = pf_matrix_zero();
00473   for (j = 0; j < 4; j++)
00474     m[j] = 0.0;
00475   for (j = 0; j < 2; j++)
00476     for (k = 0; k < 2; k++)
00477       c[j][k] = 0.0;
00478   
00479   // Compute cluster stats
00480   for (i = 0; i < set->sample_count; i++)
00481   {
00482     sample = set->samples + i;
00483 
00484     //printf("%d %f %f %f\n", i, sample->pose.v[0], sample->pose.v[1], sample->pose.v[2]);
00485 
00486     // Get the cluster label for this sample
00487     cidx = pf_kdtree_get_cluster(set->kdtree, sample->pose);
00488     assert(cidx >= 0);
00489     if (cidx >= set->cluster_max_count)
00490       continue;
00491     if (cidx + 1 > set->cluster_count)
00492       set->cluster_count = cidx + 1;
00493     
00494     cluster = set->clusters + cidx;
00495 
00496     cluster->count += 1;
00497     cluster->weight += sample->weight;
00498 
00499     count += 1;
00500     weight += sample->weight;
00501 
00502     // Compute mean
00503     cluster->m[0] += sample->weight * sample->pose.v[0];
00504     cluster->m[1] += sample->weight * sample->pose.v[1];
00505     cluster->m[2] += sample->weight * cos(sample->pose.v[2]);
00506     cluster->m[3] += sample->weight * sin(sample->pose.v[2]);
00507 
00508     m[0] += sample->weight * sample->pose.v[0];
00509     m[1] += sample->weight * sample->pose.v[1];
00510     m[2] += sample->weight * cos(sample->pose.v[2]);
00511     m[3] += sample->weight * sin(sample->pose.v[2]);
00512 
00513     // Compute covariance in linear components
00514     for (j = 0; j < 2; j++)
00515       for (k = 0; k < 2; k++)
00516       {
00517         cluster->c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
00518         c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
00519       }
00520   }
00521 
00522   // Normalize
00523   for (i = 0; i < set->cluster_count; i++)
00524   {
00525     cluster = set->clusters + i;
00526         
00527     cluster->mean.v[0] = cluster->m[0] / cluster->weight;
00528     cluster->mean.v[1] = cluster->m[1] / cluster->weight;
00529     cluster->mean.v[2] = atan2(cluster->m[3], cluster->m[2]);
00530 
00531     cluster->cov = pf_matrix_zero();
00532 
00533     // Covariance in linear components
00534     for (j = 0; j < 2; j++)
00535       for (k = 0; k < 2; k++)
00536         cluster->cov.m[j][k] = cluster->c[j][k] / cluster->weight -
00537           cluster->mean.v[j] * cluster->mean.v[k];
00538 
00539     // Covariance in angular components; I think this is the correct
00540     // formula for circular statistics.
00541     cluster->cov.m[2][2] = -2 * log(sqrt(cluster->m[2] * cluster->m[2] +
00542                                          cluster->m[3] * cluster->m[3]));
00543 
00544     //printf("cluster %d %d %f (%f %f %f)\n", i, cluster->count, cluster->weight,
00545            //cluster->mean.v[0], cluster->mean.v[1], cluster->mean.v[2]);
00546     //pf_matrix_fprintf(cluster->cov, stdout, "%e");
00547   }
00548 
00549   // Compute overall filter stats
00550   set->mean.v[0] = m[0] / weight;
00551   set->mean.v[1] = m[1] / weight;
00552   set->mean.v[2] = atan2(m[3], m[2]);
00553 
00554   // Covariance in linear components
00555   for (j = 0; j < 2; j++)
00556     for (k = 0; k < 2; k++)
00557       set->cov.m[j][k] = c[j][k] / weight - set->mean.v[j] * set->mean.v[k];
00558 
00559   // Covariance in angular components; I think this is the correct
00560   // formula for circular statistics.
00561   set->cov.m[2][2] = -2 * log(sqrt(m[2] * m[2] + m[3] * m[3]));
00562 
00563   return;
00564 }
00565 
00566 
00567 // Compute the CEP statistics (mean and variance).
00568 void pf_get_cep_stats(pf_t *pf, pf_vector_t *mean, double *var)
00569 {
00570   int i;
00571   double mn, mx, my, mrr;
00572   pf_sample_set_t *set;
00573   pf_sample_t *sample;
00574   
00575   set = pf->sets + pf->current_set;
00576 
00577   mn = 0.0;
00578   mx = 0.0;
00579   my = 0.0;
00580   mrr = 0.0;
00581   
00582   for (i = 0; i < set->sample_count; i++)
00583   {
00584     sample = set->samples + i;
00585 
00586     mn += sample->weight;
00587     mx += sample->weight * sample->pose.v[0];
00588     my += sample->weight * sample->pose.v[1];
00589     mrr += sample->weight * sample->pose.v[0] * sample->pose.v[0];
00590     mrr += sample->weight * sample->pose.v[1] * sample->pose.v[1];
00591   }
00592 
00593   mean->v[0] = mx / mn;
00594   mean->v[1] = my / mn;
00595   mean->v[2] = 0.0;
00596 
00597   *var = mrr / mn - (mx * mx / (mn * mn) + my * my / (mn * mn));
00598 
00599   return;
00600 }
00601 
00602 
00603 // Get the statistics for a particular cluster.
00604 int pf_get_cluster_stats(pf_t *pf, int clabel, double *weight,
00605                          pf_vector_t *mean, pf_matrix_t *cov)
00606 {
00607   pf_sample_set_t *set;
00608   pf_cluster_t *cluster;
00609 
00610   set = pf->sets + pf->current_set;
00611 
00612   if (clabel >= set->cluster_count)
00613     return 0;
00614   cluster = set->clusters + clabel;
00615 
00616   *weight = cluster->weight;
00617   *mean = cluster->mean;
00618   *cov = cluster->cov;
00619 
00620   return 1;
00621 }
00622 
00623 /// @endcond