Fawkes API  Fawkes Development Version
pf.c
1 
2 /***************************************************************************
3  * pf.c: Simple particle filter for localization
4  *
5  * Created: Wed May 16 16:04:41 2012
6  * Copyright 2000 Brian Gerkey
7  * 2000 Kasper Stoy
8  * 2012 Tim Niemueller [www.niemueller.de]
9  ****************************************************************************/
10 
11 /* This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License as published by
13  * the Free Software Foundation; either version 2 of the License, or
14  * (at your option) any later version.
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU Library General Public License for more details.
20  *
21  * Read the full text in the LICENSE.GPL file in the doc directory.
22  */
23 
24 /* From:
25  * Player - One Hell of a Robot Server (LGPL)
26  * Copyright (C) 2000 Brian Gerkey & Kasper Stoy
27  * gerkey@usc.edu kaspers@robotics.usc.edu
28  */
29 /**************************************************************************
30  * Desc: Simple particle filter for localization.
31  * Author: Andrew Howard
32  * Date: 10 Dec 2002
33  *************************************************************************/
34 
35 #include <assert.h>
36 #include <math.h>
37 #include <stdlib.h>
38 #include <time.h>
39 
40 #include "pf.h"
41 #include "pf_pdf.h"
42 #include "pf_kdtree.h"
43 
44 /// @cond EXTERNAL
45 
46 // Compute the required number of samples, given that there are k bins
47 // with samples in them.
48 static int pf_resample_limit(pf_t *pf, int k);
49 
50 // Re-compute the cluster statistics for a sample set
51 static void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set);
52 
53 
54 // Create a new filter
55 pf_t *pf_alloc(int min_samples, int max_samples,
56  double alpha_slow, double alpha_fast,
57  pf_init_model_fn_t random_pose_fn, void *random_pose_data)
58 {
59  int i, j;
60  pf_t *pf;
61  pf_sample_set_t *set;
62  pf_sample_t *sample;
63 
64  srand48(time(NULL));
65 
66  pf = calloc(1, sizeof(pf_t));
67 
68  pf->random_pose_fn = random_pose_fn;
69  pf->random_pose_data = random_pose_data;
70 
71  pf->min_samples = min_samples;
72  pf->max_samples = max_samples;
73 
74  // Control parameters for the population size calculation. [err] is
75  // the max error between the true distribution and the estimated
76  // distribution. [z] is the upper standard normal quantile for (1 -
77  // p), where p is the probability that the error on the estimated
78  // distrubition will be less than [err].
79  pf->pop_err = 0.01;
80  pf->pop_z = 3;
81 
82  pf->current_set = 0;
83  for (j = 0; j < 2; j++)
84  {
85  set = pf->sets + j;
86 
87  set->sample_count = max_samples;
88  set->samples = calloc(max_samples, sizeof(pf_sample_t));
89 
90  for (i = 0; i < set->sample_count; i++)
91  {
92  sample = set->samples + i;
93  sample->pose.v[0] = 0.0;
94  sample->pose.v[1] = 0.0;
95  sample->pose.v[2] = 0.0;
96  sample->weight = 1.0 / max_samples;
97  }
98 
99  // HACK: is 3 times max_samples enough?
100  set->kdtree = pf_kdtree_alloc(3 * max_samples);
101 
102  set->cluster_count = 0;
103  set->cluster_max_count = max_samples;
104  set->clusters = calloc(set->cluster_max_count, sizeof(pf_cluster_t));
105 
106  set->mean = pf_vector_zero();
107  set->cov = pf_matrix_zero();
108  }
109 
110  pf->w_slow = 0.0;
111  pf->w_fast = 0.0;
112 
113  pf->alpha_slow = alpha_slow;
114  pf->alpha_fast = alpha_fast;
115 
116  return pf;
117 }
118 
119 
120 // Free an existing filter
121 void pf_free(pf_t *pf)
122 {
123  int i;
124 
125  for (i = 0; i < 2; i++)
126  {
127  free(pf->sets[i].clusters);
128  pf_kdtree_free(pf->sets[i].kdtree);
129  free(pf->sets[i].samples);
130  }
131  free(pf);
132 
133  return;
134 }
135 
136 
137 // Initialize the filter using a guassian
138 void pf_init(pf_t *pf, pf_vector_t mean, pf_matrix_t cov)
139 {
140  int i;
141  pf_sample_set_t *set;
142  pf_sample_t *sample;
143  pf_pdf_gaussian_t *pdf;
144 
145  set = pf->sets + pf->current_set;
146 
147  // Create the kd tree for adaptive sampling
148  pf_kdtree_clear(set->kdtree);
149 
150  set->sample_count = pf->max_samples;
151 
152  pdf = pf_pdf_gaussian_alloc(mean, cov);
153 
154  // Compute the new sample poses
155  for (i = 0; i < set->sample_count; i++)
156  {
157  sample = set->samples + i;
158  sample->weight = 1.0 / pf->max_samples;
159  sample->pose = pf_pdf_gaussian_sample(pdf);
160 
161  // Add sample to histogram
162  pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
163  }
164 
165  pf->w_slow = pf->w_fast = 0.0;
166 
167  pf_pdf_gaussian_free(pdf);
168 
169  // Re-compute cluster statistics
170  pf_cluster_stats(pf, set);
171 
172  return;
173 }
174 
175 
176 // Initialize the filter using some model
177 void pf_init_model(pf_t *pf, pf_init_model_fn_t init_fn, void *init_data)
178 {
179  int i;
180  pf_sample_set_t *set;
181  pf_sample_t *sample;
182 
183  set = pf->sets + pf->current_set;
184 
185  // Create the kd tree for adaptive sampling
186  pf_kdtree_clear(set->kdtree);
187 
188  set->sample_count = pf->max_samples;
189 
190  // Compute the new sample poses
191  for (i = 0; i < set->sample_count; i++)
192  {
193  sample = set->samples + i;
194  sample->weight = 1.0 / pf->max_samples;
195  sample->pose = (*init_fn) (init_data);
196 
197  // Add sample to histogram
198  pf_kdtree_insert(set->kdtree, sample->pose, sample->weight);
199  }
200 
201  pf->w_slow = pf->w_fast = 0.0;
202 
203  // Re-compute cluster statistics
204  pf_cluster_stats(pf, set);
205 
206  return;
207 }
208 
209 
210 // Update the filter with some new action
211 void pf_update_action(pf_t *pf, pf_action_model_fn_t action_fn, void *action_data)
212 {
213  pf_sample_set_t *set;
214 
215  set = pf->sets + pf->current_set;
216 
217  (*action_fn) (action_data, set);
218 
219  return;
220 }
221 
222 
223 #include <float.h>
224 // Update the filter with some new sensor observation
225 void pf_update_sensor(pf_t *pf, pf_sensor_model_fn_t sensor_fn, void *sensor_data)
226 {
227  int i;
228  pf_sample_set_t *set;
229  pf_sample_t *sample;
230  double total;
231 
232  set = pf->sets + pf->current_set;
233 
234  // Compute the sample weights
235  total = (*sensor_fn) (sensor_data, set);
236 
237  if (total > 0.0)
238  {
239  // Normalize weights
240  double w_avg=0.0;
241  for (i = 0; i < set->sample_count; i++)
242  {
243  sample = set->samples + i;
244  w_avg += sample->weight;
245  sample->weight /= total;
246  }
247  // Update running averages of likelihood of samples (Prob Rob p258)
248  w_avg /= set->sample_count;
249  if(pf->w_slow == 0.0)
250  pf->w_slow = w_avg;
251  else
252  pf->w_slow += pf->alpha_slow * (w_avg - pf->w_slow);
253  if(pf->w_fast == 0.0)
254  pf->w_fast = w_avg;
255  else
256  pf->w_fast += pf->alpha_fast * (w_avg - pf->w_fast);
257  //printf("w_avg: %e slow: %e fast: %e\n",
258  //w_avg, pf->w_slow, pf->w_fast);
259  }
260  else
261  {
262  //PLAYER_WARN("pdf has zero probability");
263 
264  // Handle zero total
265  for (i = 0; i < set->sample_count; i++)
266  {
267  sample = set->samples + i;
268  sample->weight = 1.0 / set->sample_count;
269  }
270  }
271 
272  return;
273 }
274 
275 
276 // Resample the distribution
277 void pf_update_resample(pf_t *pf)
278 {
279  int i;
280  double total;
281  pf_sample_set_t *set_a, *set_b;
282  pf_sample_t *sample_a, *sample_b;
283 
284  //double r,c,U;
285  //int m;
286  //double count_inv;
287  double* c;
288 
289  double w_diff;
290 
291  set_a = pf->sets + pf->current_set;
292  set_b = pf->sets + (pf->current_set + 1) % 2;
293 
294  // Build up cumulative probability table for resampling.
295  // TODO: Replace this with a more efficient procedure
296  // (e.g., http://www.network-theory.co.uk/docs/gslref/GeneralDiscreteDistributions.html)
297  c = (double*)malloc(sizeof(double)*(set_a->sample_count+1));
298  c[0] = 0.0;
299  for(i=0;i<set_a->sample_count;i++)
300  c[i+1] = c[i]+set_a->samples[i].weight;
301 
302  // Create the kd tree for adaptive sampling
303  pf_kdtree_clear(set_b->kdtree);
304 
305  // Draw samples from set a to create set b.
306  total = 0;
307  set_b->sample_count = 0;
308 
309  w_diff = 1.0 - pf->w_fast / pf->w_slow;
310  if(w_diff < 0.0)
311  w_diff = 0.0;
312  //printf("w_diff: %9.6f\n", w_diff);
313 
314  // Can't (easily) combine low-variance sampler with KLD adaptive
315  // sampling, so we'll take the more traditional route.
316  /*
317  // Low-variance resampler, taken from Probabilistic Robotics, p110
318  count_inv = 1.0/set_a->sample_count;
319  r = drand48() * count_inv;
320  c = set_a->samples[0].weight;
321  i = 0;
322  m = 0;
323  */
324  while(set_b->sample_count < pf->max_samples)
325  {
326  sample_b = set_b->samples + set_b->sample_count++;
327 
328  if(drand48() < w_diff)
329  sample_b->pose = (pf->random_pose_fn)(pf->random_pose_data);
330  else
331  {
332  // Can't (easily) combine low-variance sampler with KLD adaptive
333  // sampling, so we'll take the more traditional route.
334  /*
335  // Low-variance resampler, taken from Probabilistic Robotics, p110
336  U = r + m * count_inv;
337  while(U>c)
338  {
339  i++;
340  // Handle wrap-around by resetting counters and picking a new random
341  // number
342  if(i >= set_a->sample_count)
343  {
344  r = drand48() * count_inv;
345  c = set_a->samples[0].weight;
346  i = 0;
347  m = 0;
348  U = r + m * count_inv;
349  continue;
350  }
351  c += set_a->samples[i].weight;
352  }
353  m++;
354  */
355 
356  // Naive discrete event sampler
357  double r;
358  r = drand48();
359  for(i=0;i<set_a->sample_count;i++)
360  {
361  if((c[i] <= r) && (r < c[i+1]))
362  break;
363  }
364  assert(i<set_a->sample_count);
365 
366  sample_a = set_a->samples + i;
367 
368  assert(sample_a->weight > 0);
369 
370  // Add sample to list
371  sample_b->pose = sample_a->pose;
372  }
373 
374  sample_b->weight = 1.0;
375  total += sample_b->weight;
376 
377  // Add sample to histogram
378  pf_kdtree_insert(set_b->kdtree, sample_b->pose, sample_b->weight);
379 
380  // See if we have enough samples yet
381  if (set_b->sample_count > pf_resample_limit(pf, set_b->kdtree->leaf_count))
382  break;
383  }
384 
385  // Reset averages, to avoid spiraling off into complete randomness.
386  if(w_diff > 0.0)
387  pf->w_slow = pf->w_fast = 0.0;
388 
389  //fprintf(stderr, "\n\n");
390 
391  // Normalize weights
392  for (i = 0; i < set_b->sample_count; i++)
393  {
394  sample_b = set_b->samples + i;
395  sample_b->weight /= total;
396  }
397 
398  // Re-compute cluster statistics
399  pf_cluster_stats(pf, set_b);
400 
401  // Use the newly created sample set
402  pf->current_set = (pf->current_set + 1) % 2;
403 
404  free(c);
405  return;
406 }
407 
408 
409 // Compute the required number of samples, given that there are k bins
410 // with samples in them. This is taken directly from Fox et al.
411 int pf_resample_limit(pf_t *pf, int k)
412 {
413  double a, b, c, x;
414  int n;
415 
416  if (k <= 1)
417  return pf->max_samples;
418 
419  a = 1;
420  b = 2 / (9 * ((double) k - 1));
421  c = sqrt(2 / (9 * ((double) k - 1))) * pf->pop_z;
422  x = a - b + c;
423 
424  n = (int) ceil((k - 1) / (2 * pf->pop_err) * x * x * x);
425 
426  if (n < pf->min_samples)
427  return pf->min_samples;
428  if (n > pf->max_samples)
429  return pf->max_samples;
430 
431  return n;
432 }
433 
434 
435 // Re-compute the cluster statistics for a sample set
436 void pf_cluster_stats(pf_t *pf, pf_sample_set_t *set)
437 {
438  int i, j, k, cidx;
439  pf_sample_t *sample;
440  pf_cluster_t *cluster;
441 
442  // Workspace
443  double m[4], c[2][2];
444  size_t count;
445  double weight;
446 
447  // Cluster the samples
448  pf_kdtree_cluster(set->kdtree);
449 
450  // Initialize cluster stats
451  set->cluster_count = 0;
452 
453  for (i = 0; i < set->cluster_max_count; i++)
454  {
455  cluster = set->clusters + i;
456  cluster->count = 0;
457  cluster->weight = 0;
458  cluster->mean = pf_vector_zero();
459  cluster->cov = pf_matrix_zero();
460 
461  for (j = 0; j < 4; j++)
462  cluster->m[j] = 0.0;
463  for (j = 0; j < 2; j++)
464  for (k = 0; k < 2; k++)
465  cluster->c[j][k] = 0.0;
466  }
467 
468  // Initialize overall filter stats
469  count = 0;
470  weight = 0.0;
471  set->mean = pf_vector_zero();
472  set->cov = pf_matrix_zero();
473  for (j = 0; j < 4; j++)
474  m[j] = 0.0;
475  for (j = 0; j < 2; j++)
476  for (k = 0; k < 2; k++)
477  c[j][k] = 0.0;
478 
479  // Compute cluster stats
480  for (i = 0; i < set->sample_count; i++)
481  {
482  sample = set->samples + i;
483 
484  //printf("%d %f %f %f\n", i, sample->pose.v[0], sample->pose.v[1], sample->pose.v[2]);
485 
486  // Get the cluster label for this sample
487  cidx = pf_kdtree_get_cluster(set->kdtree, sample->pose);
488  assert(cidx >= 0);
489  if (cidx >= set->cluster_max_count)
490  continue;
491  if (cidx + 1 > set->cluster_count)
492  set->cluster_count = cidx + 1;
493 
494  cluster = set->clusters + cidx;
495 
496  cluster->count += 1;
497  cluster->weight += sample->weight;
498 
499  count += 1;
500  weight += sample->weight;
501 
502  // Compute mean
503  cluster->m[0] += sample->weight * sample->pose.v[0];
504  cluster->m[1] += sample->weight * sample->pose.v[1];
505  cluster->m[2] += sample->weight * cos(sample->pose.v[2]);
506  cluster->m[3] += sample->weight * sin(sample->pose.v[2]);
507 
508  m[0] += sample->weight * sample->pose.v[0];
509  m[1] += sample->weight * sample->pose.v[1];
510  m[2] += sample->weight * cos(sample->pose.v[2]);
511  m[3] += sample->weight * sin(sample->pose.v[2]);
512 
513  // Compute covariance in linear components
514  for (j = 0; j < 2; j++)
515  for (k = 0; k < 2; k++)
516  {
517  cluster->c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
518  c[j][k] += sample->weight * sample->pose.v[j] * sample->pose.v[k];
519  }
520  }
521 
522  // Normalize
523  for (i = 0; i < set->cluster_count; i++)
524  {
525  cluster = set->clusters + i;
526 
527  cluster->mean.v[0] = cluster->m[0] / cluster->weight;
528  cluster->mean.v[1] = cluster->m[1] / cluster->weight;
529  cluster->mean.v[2] = atan2(cluster->m[3], cluster->m[2]);
530 
531  cluster->cov = pf_matrix_zero();
532 
533  // Covariance in linear components
534  for (j = 0; j < 2; j++)
535  for (k = 0; k < 2; k++)
536  cluster->cov.m[j][k] = cluster->c[j][k] / cluster->weight -
537  cluster->mean.v[j] * cluster->mean.v[k];
538 
539  // Covariance in angular components; I think this is the correct
540  // formula for circular statistics.
541  cluster->cov.m[2][2] = -2 * log(sqrt(cluster->m[2] * cluster->m[2] +
542  cluster->m[3] * cluster->m[3]));
543 
544  //printf("cluster %d %d %f (%f %f %f)\n", i, cluster->count, cluster->weight,
545  //cluster->mean.v[0], cluster->mean.v[1], cluster->mean.v[2]);
546  //pf_matrix_fprintf(cluster->cov, stdout, "%e");
547  }
548 
549  // Compute overall filter stats
550  set->mean.v[0] = m[0] / weight;
551  set->mean.v[1] = m[1] / weight;
552  set->mean.v[2] = atan2(m[3], m[2]);
553 
554  // Covariance in linear components
555  for (j = 0; j < 2; j++)
556  for (k = 0; k < 2; k++)
557  set->cov.m[j][k] = c[j][k] / weight - set->mean.v[j] * set->mean.v[k];
558 
559  // Covariance in angular components; I think this is the correct
560  // formula for circular statistics.
561  set->cov.m[2][2] = -2 * log(sqrt(m[2] * m[2] + m[3] * m[3]));
562 
563  return;
564 }
565 
566 
567 // Compute the CEP statistics (mean and variance).
568 void pf_get_cep_stats(pf_t *pf, pf_vector_t *mean, double *var)
569 {
570  int i;
571  double mn, mx, my, mrr;
572  pf_sample_set_t *set;
573  pf_sample_t *sample;
574 
575  set = pf->sets + pf->current_set;
576 
577  mn = 0.0;
578  mx = 0.0;
579  my = 0.0;
580  mrr = 0.0;
581 
582  for (i = 0; i < set->sample_count; i++)
583  {
584  sample = set->samples + i;
585 
586  mn += sample->weight;
587  mx += sample->weight * sample->pose.v[0];
588  my += sample->weight * sample->pose.v[1];
589  mrr += sample->weight * sample->pose.v[0] * sample->pose.v[0];
590  mrr += sample->weight * sample->pose.v[1] * sample->pose.v[1];
591  }
592 
593  mean->v[0] = mx / mn;
594  mean->v[1] = my / mn;
595  mean->v[2] = 0.0;
596 
597  *var = mrr / mn - (mx * mx / (mn * mn) + my * my / (mn * mn));
598 
599  return;
600 }
601 
602 
603 // Get the statistics for a particular cluster.
604 int pf_get_cluster_stats(pf_t *pf, int clabel, double *weight,
605  pf_vector_t *mean, pf_matrix_t *cov)
606 {
607  pf_sample_set_t *set;
608  pf_cluster_t *cluster;
609 
610  set = pf->sets + pf->current_set;
611 
612  if (clabel >= set->cluster_count)
613  return 0;
614  cluster = set->clusters + clabel;
615 
616  *weight = cluster->weight;
617  *mean = cluster->mean;
618  *cov = cluster->cov;
619 
620  return 1;
621 }
622 
623 /// @endcond