Fawkes API  Fawkes Development Version
triclops.cpp
1 
2 /***************************************************************************
3  * triclops.cpp - Stereo processor using the TriclopsSDK
4  *
5  * Created: Fri May 18 17:20:31 2007
6  * Copyright 2007 Tim Niemueller [www.niemueller.de]
7  *
8  ****************************************************************************/
9 
10 /* This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License as published by
12  * the Free Software Foundation; either version 2 of the License, or
13  * (at your option) any later version. A runtime exception applies to
14  * this software (see LICENSE.GPL_WRE file mentioned below for details).
15  *
16  * This program is distributed in the hope that it will be useful,
17  * but WITHOUT ANY WARRANTY; without even the implied warranty of
18  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19  * GNU Library General Public License for more details.
20  *
21  * Read the full text in the LICENSE.GPL_WRE file in the doc directory.
22  */
23 
24 #include <fvstereo/triclops.h>
25 
26 #include <core/exceptions/software.h>
27 #include <core/exceptions/system.h>
28 #include <fvcams/bumblebee2.h>
29 #include <fvutils/base/roi.h>
30 #include <utils/math/angle.h>
31 #include <fvutils/color/conversions.h>
32 #include <fvutils/rectification/rectfile.h>
33 #include <fvutils/rectification/rectinfo_lut_block.h>
34 
35 // PGR Triclops SDK
36 #include <triclops.h>
37 
38 #include <cstdlib>
39 #include <unistd.h>
40 #include <errno.h>
41 
42 #include <math.h>
43 
44 using namespace fawkes;
45 
46 namespace firevision {
47 #if 0 /* just to make Emacs auto-indent happy */
48 }
49 #endif
50 
51 /// @cond INTERNALS
52 /** Data internal to Triclops stereo processor
53  * This class exists to be able to hide the triclops stuff from the camera
54  * user and not to expose the Triclops SDK headers.
55  */
56 class TriclopsStereoProcessorData
57 {
58  public:
59  TriclopsContext triclops;
60  TriclopsInput input;
61  TriclopsError err;
62  TriclopsImage rectified_image;
63  TriclopsImage16 disparity_image_hires;
64  TriclopsImage disparity_image_lores;
65  bool enable_subpixel_interpolation;
66 };
67 /// @endcond
68 
69 /** @class TriclopsStereoProcessor <fvstereo/triclops.h>
70  * Stereo processing using PGR Triclops SDK.
71  * This class uses the Triclops SDK provided by Point Grey Research along with
72  * the Bumblebee2 cameras.
73  * @author Tim Niemueller
74  */
75 
76 
77 /** Constructor.
78  * This constructor initializes this triclops wrapper to work on a real camera.
79  * @param camera Must be of type Bumblebee2Camera
80  */
81 TriclopsStereoProcessor::TriclopsStereoProcessor(Camera *camera)
82 {
83  _context_file = NULL;
84  buffer_deinterlaced = NULL;
85 
86  bb2 = dynamic_cast<Bumblebee2Camera *>(camera);
87  if ( ! bb2 ) {
88  throw TypeMismatchException("Camera is not of type Bumblebee2Camera");
89  }
90  if ( ! bb2->is_bumblebee2() ) {
91  throw TypeMismatchException("Camera is not a Bumblebee 2");
92  }
93 
94  bb2->set_image_number(Bumblebee2Camera::RGB_IMAGE);
95 
96  _width = bb2->pixel_width();
97  _height = bb2->pixel_height();
98 
99  _output_image_width = _width;
100  _output_image_height = _height;
101 
102  create_buffers();
103  try {
104  setup_triclops();
105  } catch (...) {
106  throw;
107  }
108 
109  buffer_rgb = bb2->buffer();
110  buffer_rgb_right = buffer_rgb;
111  buffer_rgb_left = buffer_rgb + colorspace_buffer_size(RGB, _width, _height);
112  buffer_rgb_center = buffer_rgb_left; // wtf? Done so in pgr code
113 }
114 
115 
116 /** Constructor.
117  * With this ctor you can make the triclops wrapper to work on saved images given
118  * the expected image size (of a single image) and the path to the Triclops
119  * context from the used camera.
120  * @param width image width in pixels
121  * @param height image height in pixels
122  * @param context_file Triclops context file
123  */
124 TriclopsStereoProcessor::TriclopsStereoProcessor(unsigned int width, unsigned int height,
125  const char *context_file)
126 {
127  _width = width;
128  _height = height;
129  _output_image_width = _width;
130  _output_image_height = _height;
131  _context_file = strdup(context_file);
132 
133  bb2 = NULL;
134 
135  create_buffers();
136  try {
137  setup_triclops();
138  } catch (Exception &e) {
139  throw;
140  }
141 }
142 
143 
144 /** Create working buffers.
145  * buffer size calculated as: we have RAW16 format, which means two bytes per
146  * pixel, thus total buffer size must be w * h * 2
147  */
148 void
149 TriclopsStereoProcessor::create_buffers()
150 {
151  buffer_green = (unsigned char *)malloc(_width * _height * 2);
152  buffer_yuv_right = malloc_buffer(YUV422_PLANAR, _width, _height);
153  buffer_yuv_left = malloc_buffer(YUV422_PLANAR, _width, _height);
154 
155  if ( bb2 ) {
156  buffer_rgb = bb2->buffer();
157  } else {
158  buffer_rgb = (unsigned char *)malloc(colorspace_buffer_size(RGB, _width, _height) * 2);
159  buffer_deinterlaced = (unsigned char *)malloc(_width * _height * 2);
160  }
161  buffer_rgb_right = buffer_rgb;
162  buffer_rgb_left = buffer_rgb + colorspace_buffer_size(RGB, _width, _height);
163  buffer_rgb_center = buffer_rgb_left; // wtf? Done so in pgr code
164 
165 }
166 
167 
168 void
169 TriclopsStereoProcessor::setup_triclops()
170 {
171  // Internal data
172  data = new TriclopsStereoProcessorData();
173  // Always the same
174  data->input.inputType = TriInp_RGB;
175  data->input.nrows = _height;
176  data->input.ncols = _width;
177  data->input.rowinc = data->input.ncols;
178  /*
179  data->input.u.rgb.red = buffer_yuv_right;
180  data->input.u.rgb.green = buffer_yuv_left;
181  data->input.u.rgb.blue = buffer_yuv_left;
182  */
183  data->input.u.rgb.red = buffer_green;
184  data->input.u.rgb.green = buffer_green + _width * _height;
185  data->input.u.rgb.blue = data->input.u.rgb.green;
186 
187  if ( bb2 ) {
188  try {
189  get_triclops_context_from_camera();
190  } catch (Exception &e) {
191  free(data);
192  throw;
193  }
194  } else {
195 
196  if ( ! _context_file ) {
197  free(data);
198  throw NullPointerException("TriclopsStereoProcessor: You must supply the path "
199  "to a valid BB2 context file");
200  }
201 
202  if ( access(_context_file, F_OK | R_OK) != 0 ) {
203  free(data);
204  throw CouldNotOpenFileException("TriclopsStereoProcessor: Cannot access context file");
205  }
206  data->err = triclopsGetDefaultContextFromFile(&(data->triclops), _context_file);
207  if ( data->err != TriclopsErrorOk ) {
208  free(data);
209  throw Exception("Fetching Triclops context from camera failed");
210  }
211  }
212 
213  // Set defaults
214  data->enable_subpixel_interpolation = false;
215 
216  triclopsSetSubpixelInterpolation( data->triclops, 0);
217 
218  triclopsSetResolutionAndPrepare( data->triclops,
219  _height, _width,
220  _height, _width);
221 
222  triclopsSetEdgeCorrelation( data->triclops, 1 );
223  triclopsSetLowpass( data->triclops, 1 );
224  triclopsSetDisparity( data->triclops, 5, 100);
225  triclopsSetEdgeMask(data->triclops, 11);
226  triclopsSetStereoMask(data->triclops, 23);
227  triclopsSetSurfaceValidation(data->triclops, 1);
228  triclopsSetTextureValidation(data->triclops, 0);
229 
230  triclopsSetDisparityMapping( data->triclops, 10, 85 );
231  triclopsSetDisparityMappingOn( data->triclops, 1 );
232 }
233 
234 
235 /** Destructor. */
236 TriclopsStereoProcessor::~TriclopsStereoProcessor()
237 {
238  // segfaults :-/
239  // triclopsDestroyContext( data->triclops );
240 
241  if ( buffer_green != NULL ) free(buffer_green);
242  if ( buffer_yuv_right != NULL ) free(buffer_yuv_right);
243  if ( buffer_yuv_left != NULL ) free(buffer_yuv_left);
244  if ( _context_file != NULL) free(_context_file);
245 
246  if ( ! bb2 ) {
247  if ( buffer_rgb) free(buffer_rgb);
248  if ( buffer_deinterlaced) free(buffer_deinterlaced);
249  }
250 
251  buffer_green = NULL;
252  buffer_rgb = NULL;
253  buffer_yuv_right = NULL;
254  buffer_yuv_left = NULL;
255  _buffer = NULL;
256 
257  delete data;
258 }
259 
260 
261 /** Set raw buffer.
262  * @param raw16_buffer buffer containing the stereo image encoded as BB2 RAW16
263  */
264 void
265 TriclopsStereoProcessor::set_raw_buffer(unsigned char *raw16_buffer)
266 {
267  buffer_raw16 = raw16_buffer;
268 }
269 
270 
271 /** Set the resolution of the output images.
272  * @param width the width of the output images
273  * @param height the height of the output images
274  */
275 void
276 TriclopsStereoProcessor::set_output_image_size(unsigned int width, unsigned int height)
277 {
278  data->err = triclopsSetResolutionAndPrepare(data->triclops, height, width, _height, _width);
279 
280  if ( data->err == TriclopsErrorOk )
281  {
282  _output_image_width = width;
283  _output_image_height = height;
284  }
285 }
286 
287 
288 /** Enable or disable subpixel interpolation
289  * @param enabled true to enable, false to disable
290  */
291 void
292 TriclopsStereoProcessor::set_subpixel_interpolation(bool enabled)
293 {
294  data->enable_subpixel_interpolation = enabled;
295  triclopsSetSubpixelInterpolation(data->triclops, enabled);
296 }
297 
298 
299 /** Enable or disable edge correlation.
300  * @param enabled true to enable, false to disable
301  */
302 void
303 TriclopsStereoProcessor::set_edge_correlation(bool enabled)
304 {
305  triclopsSetEdgeCorrelation(data->triclops, enabled);
306 }
307 
308 
309 /** Enable or disable lowpass filtering before rectification.
310  * @param enabled true to enable, false to disable
311  */
312 void
313 TriclopsStereoProcessor::set_lowpass(bool enabled)
314 {
315  triclopsSetLowpass(data->triclops, enabled);
316 }
317 
318 
319 /** Set disparity range.
320  * @param min minimum disparity
321  * @param max maximum disparity
322  */
323 void
324 TriclopsStereoProcessor::set_disparity_range(int min, int max)
325 {
326  triclopsSetDisparity(data->triclops, min, max);
327 }
328 
329 
330 /** Set edge mask.
331  * Size of the kernel used for edge detection.
332  * This value must be in the range [3..13].
333  * @param mask_size mask size
334  */
335 void
336 TriclopsStereoProcessor::set_edge_masksize(unsigned int mask_size)
337 {
338  triclopsSetEdgeMask(data->triclops, mask_size);
339 }
340 
341 
342 /** Set stereo mask.
343  * Size of the mask used for stereo correlation.
344  * @param mask_size mask size
345  */
346 void
347 TriclopsStereoProcessor::set_stereo_masksize(unsigned int mask_size)
348 {
349  triclopsSetStereoMask(data->triclops, mask_size);
350 }
351 
352 
353 /** Enable or disable surface validation.
354  * @param enabled true to enable, false to disable
355  */
356 void
357 TriclopsStereoProcessor::set_surface_validation(bool enabled)
358 {
359  triclopsSetSurfaceValidation(data->triclops, enabled);
360 }
361 
362 
363 /** Enable or disable texture validation.
364  * @param enabled true to enable, false to disable
365  */
366 void
367 TriclopsStereoProcessor::set_texture_validation(bool enabled)
368 {
369  triclopsSetTextureValidation(data->triclops, enabled);
370 }
371 
372 
373 /** Set disparity mapping range.
374  * @param min minimum disparity
375  * @param max maximum disparity
376  */
377 void
378 TriclopsStereoProcessor::set_disparity_mapping_range(unsigned char min, unsigned char max)
379 {
380  triclopsSetDisparityMapping(data->triclops, min, max);
381 }
382 
383 
384 /** Enable or disable disparity mapping.
385  * @param enabled true to enable, false to disable
386  */
387 void
388 TriclopsStereoProcessor::set_disparity_mapping(bool enabled)
389 {
390  triclopsSetDisparityMappingOn(data->triclops, enabled);
391 }
392 
393 /** Check state of subpixel interpolation
394  * @return true if enabled, false otherwise
395  */
396 bool
397 TriclopsStereoProcessor::subpixel_interpolation()
398 {
399  TriclopsBool on;
400  triclopsGetSubpixelInterpolation(data->triclops, &on);
401  return on;
402 }
403 
404 
405 /** Get width of ouput images.
406  * @return width of output images
407  */
408 unsigned int
409 TriclopsStereoProcessor::output_image_width()
410 {
411  return _output_image_width;
412 }
413 
414 
415 /** Get height of ouput images.
416  * @return height of output images
417  */
418 unsigned int
419 TriclopsStereoProcessor::output_image_height()
420 {
421  return _output_image_height;
422 }
423 
424 
425 /** Check state of edge correlation.
426  * @return true if enabled, false otherwise
427  */
428 bool
429 TriclopsStereoProcessor::edge_correlation()
430 {
431  TriclopsBool on;
432  triclopsGetEdgeCorrelation(data->triclops, &on);
433  return on;
434 }
435 
436 
437 /** Check state of lowpass filtering.
438  * @return true if enabled, false otherwise
439  */
440 bool
441 TriclopsStereoProcessor::lowpass()
442 {
443  TriclopsBool on;
444  triclopsGetLowpass(data->triclops, &on);
445  return on;
446 }
447 
448 
449 /** Get disparity range min value.
450  * @return disparity range min value
451  */
452 int
453 TriclopsStereoProcessor::disparity_range_min()
454 {
455  int min, max;
456  triclopsGetDisparity( data->triclops, &min, &max );
457  return min;
458 }
459 
460 
461 /** Get disparity range max value.
462  * @return disparity range max value
463  */
464 int
465 TriclopsStereoProcessor::disparity_range_max()
466 {
467  int min, max;
468  triclopsGetDisparity( data->triclops, &min, &max );
469  return max;
470 }
471 
472 
473 /** Get edge mask size.
474  * @return size of the edge mask
475  */
476 unsigned int
477 TriclopsStereoProcessor::edge_masksize()
478 {
479  int mask_size = 0;
480  triclopsGetEdgeMask( data->triclops, &mask_size );
481  return mask_size;
482 }
483 
484 
485 /** Get stereo mask size.
486  * @return size of the stereo mask
487  */
488 unsigned int
489 TriclopsStereoProcessor::stereo_masksize()
490 {
491  int mask_size = 0;
492  triclopsGetStereoMask( data->triclops, &mask_size );
493  return mask_size;
494 }
495 
496 
497 /** Check state of surface validation.
498  * @return true if enabled, false otherwise
499  */
500 bool
501 TriclopsStereoProcessor::surface_validation()
502 {
503  TriclopsBool on;
504  triclopsGetSurfaceValidation(data->triclops, &on);
505  return on;
506 }
507 
508 
509 /** Check state of texture validation.
510  * @return true if enabled, false otherwise
511  */
512 bool
513 TriclopsStereoProcessor::texture_validation()
514 {
515  TriclopsBool on;
516  triclopsGetTextureValidation(data->triclops, &on);
517  return on;
518 }
519 
520 
521 /** Get disparity mapping min value.
522  * @return min value for disparity mapping
523  */
524 unsigned char
525 TriclopsStereoProcessor::disparity_mapping_min()
526 {
527  unsigned char min, max;
528  triclopsGetDisparityMapping( data->triclops, &min, &max );
529  return min;
530 }
531 
532 
533 /** Get disparity mapping max value.
534  * @return max value for disparity mapping
535  */
536 unsigned char
537 TriclopsStereoProcessor::disparity_mapping_max()
538 {
539  unsigned char min, max;
540  triclopsGetDisparityMapping( data->triclops, &min, &max );
541  return max;
542 }
543 
544 
545 /** Check state of disparity mapping
546  * @return true if enabled, false otherwise
547  */
548 bool
549 TriclopsStereoProcessor::disparity_mapping()
550 {
551  TriclopsBool on;
552  triclopsGetDisparityMappingOn(data->triclops, &on);
553  return on;
554 }
555 
556 
557 void
558 TriclopsStereoProcessor::preprocess_stereo()
559 {
560  if ( bb2 ) {
561  bb2->deinterlace_stereo();
562  bb2->decode_bayer();
563  } else {
564  Bumblebee2Camera::deinterlace_stereo(buffer_raw16, buffer_deinterlaced,
565  _width, _height);
566  Bumblebee2Camera::decode_bayer(buffer_deinterlaced, buffer_rgb,
567  _width, _height, BAYER_PATTERN_BGGR);
568  }
569 }
570 
571 void
572 TriclopsStereoProcessor::calculate_yuv(bool both)
573 {
574  // RGB -> YUV
575  convert(RGB, YUV422_PLANAR, buffer_rgb_right, buffer_yuv_right, _width, _height);
576  if ( both ) {
577  convert(RGB, YUV422_PLANAR, buffer_rgb_left, buffer_yuv_left, _width, _height);
578  }
579 }
580 
581 
582 void
583 TriclopsStereoProcessor::calculate_disparity(ROI *roi)
584 {
585  TriclopsROI *rois;
586  int max_rois;
587 
588  if ( NULL != roi ) {
589  if ( TriclopsErrorOk == triclopsGetROIs( data->triclops, &rois, &max_rois ) ) {
590  // assume there is always at least one ROI
591  rois[0].col = roi->start.x;
592  rois[0].row = roi->start.y;
593  rois[0].ncols = roi->width;
594  rois[0].nrows = roi->height;
595 
596  triclopsSetNumberOfROIs( data->triclops, 1 );
597  } else {
598  triclopsSetNumberOfROIs( data->triclops, 0 );
599  }
600  } else {
601  triclopsSetNumberOfROIs( data->triclops, 0 );
602  }
603 
604  // now deinterlace the RGB Buffer
605  deinterlace_green( buffer_rgb, buffer_green, _width, 6 * _height );
606 
607  // rectify
608  if ( (data->err = triclopsRectify( data->triclops, &(data->input))) != TriclopsErrorOk ) {
609  throw Exception("Rectifying the image failed");
610  }
611 
612  // disparity
613  if ( (data->err = triclopsStereo( data->triclops )) != TriclopsErrorOk ) {
614  throw Exception("Calculating the disparity image failed");
615  }
616 
617  triclopsGetImage(data->triclops, TriImg_RECTIFIED, TriCam_REFERENCE, &(data->rectified_image));
618 
619  if ( data->enable_subpixel_interpolation ) {
620  triclopsGetImage16( data->triclops, TriImg16_DISPARITY, TriCam_REFERENCE, &(data->disparity_image_hires) );
621  } else {
622  triclopsGetImage( data->triclops, TriImg_DISPARITY, TriCam_REFERENCE, &(data->disparity_image_lores) );
623  }
624  /*
625  if ( data->enable_subpixel_interpolation ) {
626  if ( data->disparity_image_hires.data[640 * 240 + 320] < 0xFF00 ) {
627  float x, y, z;
628  triclopsRCD16ToXYZ(data->triclops, _height, _width, data->disparity_image_hires.data[640 * 240 + 320], &x, &y, &z);
629  cout << "Pixel coordinate: (x, y, z) = (" << x << "," << y << "," << z << ")" << endl;
630  } else {
631  cout << "Pixel has invalid disparity value" << endl;
632  }
633  if ( ! done ) {
634  triclopsSaveImage16( &(data->disparity_image_hires), "disparity.pgm" );
635  triclopsSetDisparityMappingOn( data->triclops, 0 );
636  done = true;
637  }
638  } else {
639  if ( (data->disparity_image_lores.data + data->disparity_image_lores.rowinc * 240 + 320) != 0 ) {
640  float x, y, z;
641  triclopsRCD8ToXYZ(data->triclops, 240, 320, *(data->disparity_image_lores.data + data->disparity_image_lores.rowinc * 240 + 320), &x, &y, &z);
642  cout << "Pixel coordinate: (x, y, z) = (" << x << "," << y << "," << z << ")" << endl;
643  } else {
644  cout << "Pixel has invalid disparity value" << endl;
645  }
646  if ( ! done ) {
647  PNMWriter pnm(PNM_PGM, "disparity_own.pgm", _width, _height);
648  pnm.set_buffer(YUV422_PLANAR, data->disparity_image_lores.data);
649  pnm.write();
650  triclopsSaveImage( &(data->disparity_image_lores), "disparity_lores.pgm" );
651  triclopsSetDisparityMappingOn( data->triclops, 0 );
652  done = true;
653  }
654  }
655  */
656 }
657 
658 
659 /** Get the disparity image buffer.
660  * Access method to the disparity image buffer.
661  * @return pointer to the internal disparity image buffer
662  */
663 unsigned char *
664 TriclopsStereoProcessor::disparity_buffer()
665 {
666  if ( data->enable_subpixel_interpolation ) {
667  return (unsigned char *)data->disparity_image_hires.data;
668  } else {
669  return data->disparity_image_lores.data;
670  }
671 }
672 
673 
674 size_t
675 TriclopsStereoProcessor::disparity_buffer_size() const
676 {
677  if ( data->enable_subpixel_interpolation ) {
678  return _width * _height * 2;
679  } else {
680  return _width * _height;
681  }
682 }
683 
684 
685 unsigned char *
686 TriclopsStereoProcessor::yuv_buffer_left()
687 {
688  return buffer_yuv_right;
689 }
690 
691 
692 unsigned char *
693 TriclopsStereoProcessor::yuv_buffer_right()
694 {
695  return buffer_yuv_left;
696 }
697 
698 
699 /** Get Triclops context.
700  * This retrieves calibration information from the camera and stores it into a
701  * temporary file. With this file the Triclops context is initialized. Afterwards
702  * the temporary file is removed.
703  */
704 void
705 TriclopsStereoProcessor::get_triclops_context_from_camera()
706 {
707  char *tmpname = (char *)malloc(strlen("triclops_cal_XXXXXX") + 1);
708  strcpy(tmpname, "triclops_cal_XXXXXX");
709  char *tmpfile = mktemp(tmpname);
710  bb2->write_triclops_config_from_camera_to_file(tmpfile);
711 
712  data->err = triclopsGetDefaultContextFromFile(&(data->triclops), tmpfile);
713  if ( data->err != TriclopsErrorOk ) {
714  free(tmpfile);
715  throw Exception("Fetching Triclops context from camera failed");
716  }
717  unlink(tmpfile);
718  free(tmpfile);
719 }
720 
721 
722 /** Deinterlace green buffer.
723  * Method used in stereo processing. Following the PTGrey example, seems useless
724  * if we have YUV planar and thus grey images anyway.
725  * @param src source buffer
726  * @param dest destination buffer
727  * @param width width of the image
728  * @param height height of the image
729  */
730 void
731 TriclopsStereoProcessor::deinterlace_green( unsigned char* src,
732  unsigned char* dest,
733  unsigned int width,
734  unsigned int height)
735 {
736  register int i = (width*height)-2;
737  register int g = ((width*height)/3)-1;
738 
739  while (i >= 0) {
740  dest[g--] = src[i-=3];
741  }
742 }
743 
744 
745 
746 
747 /** Get camera-relative coordinates of a point.
748  * Use this method to get the coordinates in the camera coordinate system of a given
749  * point in the image. It may not be possible to provide such a coordinate if no valid
750  * disparity information could be calculated for the given point.
751  * @param px x coordinate in image
752  * @param py y coordinate in image
753  * @param x contains the x coordinate in the camera-relative coordinate system upon
754  * successful return
755  * @param y contains the y coordinate in the camera-relative coordinate system upon
756  * successful return
757  * @param z contains the z coordinate in the camera-relative coordinate system upon
758  * successful return
759  * @return true, if valid information could be retrieved. In that case (x, y, z) is filled
760  * with the coordinates, false otherwise (x, y, and z are not modified).
761  */
762 bool
763 TriclopsStereoProcessor::get_xyz(unsigned int px, unsigned int py, float *x, float *y, float *z)
764 {
765  if ( data->enable_subpixel_interpolation ) {
766  unsigned short disparity = data->disparity_image_hires.data[_width * py + px];
767  if ( disparity < 0xFF00 ) {
768  triclopsRCD16ToXYZ(data->triclops, py, px, disparity, x, y, z);
769  return true;
770  }
771  } else {
772  unsigned char disparity = data->disparity_image_lores.data[_width * py + px];
773  if ( disparity != 0 ) {
774  triclopsRCD8ToXYZ(data->triclops, py, px, disparity, x, y, z);
775  return true;
776  }
777  }
778 
779  return false;
780 }
781 
782 
783 /** Get transformed coordinates of a point.
784  * Use this method to get the coordinates in the transformed coordinate system of a given
785  * point in the image. It may not be possible to provide such a coordinate if no valid
786  * disparity information could be calculated for the given point.
787  * @param px x coordinate in image
788  * @param py y coordinate in image
789  * @param x contains the x coordinate in the camera-relative coordinate system upon
790  * successful return
791  * @param y contains the y coordinate in the camera-relative coordinate system upon
792  * successful return
793  * @param z contains the z coordinate in the camera-relative coordinate system upon
794  * successful return
795  * @return true, if valid information could be retrieved. In that case (x, y, z) is filled
796  * with the coordinates, false otherwise (x, y, and z are not modified).
797  */
798 bool
799 TriclopsStereoProcessor::get_world_xyz(unsigned int px, unsigned int py, float *x, float *y, float *z)
800 {
801  float cam_angle = deg2rad(57);
802  float trans_x = -0.1;
803  float trans_y = 0.05;
804  float trans_z = -0.78;
805  float tx, ty, tz;
806  if ( get_xyz(px, py, &tx, &ty, &tz) ) {
807  /* transform around x axis
808  *x = tx;
809  *y = cos(cam_angle) * ty + sin(cam_angle) * tz;
810  *z = -sin(cam_angle) * ty + cos(cam_angle) * tz;
811  */
812  float x1, y1, z1, x2, y2, z2, x3, y3, z3;
813  x1 = tx;
814  y1 = cos(cam_angle) * ty + sin(cam_angle) * tz;
815  z1 = -sin(cam_angle) * ty + cos(cam_angle) * tz;
816  // cout << "Transform 1: (" << tx << "," << ty << "," << tz << ") -> (" << x1 << "," << y1 << "," << z1 << ")" << endl;
817  // *x = y1;
818  // *y = cos(cam_angle) * x1 + sin(cam_angle) * z1;
819  // *z = -sin(cam_angle) * x1 + cos(cam_angle) * z1;
820  x2 = y1;
821  y2 = x1;
822  z2 = z1;
823  // cout << "Transform 2: (" << x1 << "," << y1 << "," << z1 << ") -> (" << x2 << "," << y2 << "," << z2 << ")" << endl;
824 
825  x3 = z2;
826  y3 = y2;
827  z3 = x2;
828  // cout << "Transform 3: (" << x2 << "," << y2 << "," << z2 << ") -> (" << x3 << "," << y3 << "," << z3 << ")" << endl;
829 
830  *x = x3 + trans_x;
831  *y = y3 + trans_y;
832  *z = z3 + trans_z;
833 
834  // cout << "Transform 4: (" << x3 << "," << y3 << "," << z3 << ") -> (" << *x << "," << *y << "," << *z << ")" << endl;
835 
836  /*
837  *x = ty + trans_x;
838  *y = -sin(cam_angle) * tx + cos(cam_angle) * tz + trans_y;
839  *z = cos(cam_angle) * tx + sin(cam_angle) * tz + trans_z;
840  */
841  return true;
842  } else {
843  return false;
844  }
845 }
846 
847 
848 /** Generate rectification LUT.
849  * This will generate a lookup table that can be used for fast rectification of
850  * an image. The lut will be of the dimensions of the currently specified image
851  * (either given to the offline context file constructor or as defined by the supplied
852  * BB2 camera). The file will use RectificationFile to write two RectificationLookupTable
853  * entities, one for each image.
854  * @param lut_file name of the file to write to. The file will be created if
855  * it does not exist and truncated otherwise. The directory where the file has to
856  * be stored has to exist.
857  */
858 void
859 TriclopsStereoProcessor::generate_rectification_lut(const char *lut_file)
860 {
861  uint64_t guid = 0;
862  const char *model;
863 
864  if ( bb2 ) {
865  guid = bb2->guid();
866  model = bb2->model();
867  } else {
868  int serial_no;
869  triclopsGetSerialNumber(data->triclops, &serial_no);
870  guid = 0xFFFFFFFF;
871  guid <<= 32;
872  guid |= serial_no;
873 
874  model = "Bumblebee2";
875  }
876 
877  RectificationInfoFile *rif = new RectificationInfoFile(guid, model);
878 
879  RectificationLutInfoBlock *lib_left = new RectificationLutInfoBlock(_width, _height,
880  FIREVISION_RECTINFO_CAMERA_LEFT);
881  RectificationLutInfoBlock *lib_right = new RectificationLutInfoBlock(_width, _height,
882  FIREVISION_RECTINFO_CAMERA_RIGHT);
883 
884  register float row, col;
885  for (unsigned int h = 0; h < _height; ++h) {
886  for (unsigned int w = 0; w < _width; ++w) {
887  if ( triclopsUnrectifyPixel(data->triclops, TriCam_LEFT, h, w, &row, &col) != TriclopsErrorOk ) {
888  throw Exception("Failed to get unrectified position from Triclops SDK");
889  }
890  lib_left->set_mapping(w, h, (int)roundf(col), (int)roundf(row));
891 
892  if ( triclopsUnrectifyPixel(data->triclops, TriCam_RIGHT, h, w, &row, &col) != TriclopsErrorOk ) {
893  throw Exception("Failed to get unrectified position from Triclops SDK");
894  }
895  lib_right->set_mapping(w, h, (int)roundf(col), (int)roundf(row));
896  }
897  }
898 
899  rif->add_rectinfo_block(lib_left);
900  rif->add_rectinfo_block(lib_right);
901 
902  rif->write(lut_file);
903 
904  delete rif;
905 }
906 
907 
908 /** Verify rectification LUT.
909  * @param lut_file Rectification LUT to verify
910  * @return true if the LUT matches the current camera/context file, false otherwise.
911  */
912 bool
913 TriclopsStereoProcessor::verify_rectification_lut(const char *lut_file)
914 {
916  rif->read(lut_file);
917 
918  if ( bb2 ) {
919  if ( ! bb2->verify_guid(rif->guid()) ) {
920  return false;
921  }
922  } else {
923  int serial_no;
924  uint64_t guid = 0;
925  triclopsGetSerialNumber(data->triclops, &serial_no);
926  guid = 0xFFFFFFFF;
927  guid <<= 32;
928  guid |= serial_no;
929 
930  if ( rif->guid() != guid ) {
931  return false;
932  }
933  }
934 
935  if ( rif->num_blocks() != 2 ) {
936  printf("Insufficient blocks, we only have %zu\n", rif->num_blocks());
937  return false;
938  }
939 
940  bool left_ok = false;
941  bool right_ok = false;
942 
944  printf("We have %zu blocks\n", blocks->size());
945  RectificationInfoFile::RectInfoBlockVector::const_iterator i;
946  for (i = blocks->begin(); (i != blocks->end() && ! (left_ok && right_ok)); ++i) {
947  printf("Veryfying block\n");
948  RectificationInfoBlock *rib = *i;
949 
950  if ( (rib->camera() != FIREVISION_RECTINFO_CAMERA_LEFT) &&
951  (rib->camera() != FIREVISION_RECTINFO_CAMERA_RIGHT) ) {
952  continue;
953  }
954 
955  if ( rib->type() == FIREVISION_RECTINFO_TYPE_LUT_16x16 ) {
956  RectificationLutInfoBlock *rlib = dynamic_cast<RectificationLutInfoBlock *>(rib);
957  if ( rlib == NULL ) {
958  continue;
959  }
960 
961  TriclopsCamera cam;
962  if ( rib->camera() == FIREVISION_RECTINFO_CAMERA_LEFT ) {
963  cam = TriCam_LEFT;
964  if ( left_ok ) continue;
965  } else {
966  cam = TriCam_RIGHT;
967  if ( right_ok ) continue;
968  }
969 
970  register float row, col;
971  register uint16_t rx, ry;
972  bool lut_ok = true;
973  for (unsigned int h = 0; (h < _height) && lut_ok; ++h) {
974  for (unsigned int w = 0; w < _width; ++w) {
975  if ( triclopsUnrectifyPixel(data->triclops, cam, h, w, &row, &col) != TriclopsErrorOk ) {
976  throw Exception("Failed to get unrectified position from Triclops SDK");
977  }
978  rlib->mapping(w, h, &rx, &ry);
979  if ( (rx != (int)roundf(col)) || (ry != (int)roundf(row)) ) {
980  printf("Value at (%x,%u) not ok\n", rx, ry);
981  lut_ok = false;
982  break;
983  }
984  }
985  }
986 
987  if ( lut_ok ) {
988  if ( rib->camera() == FIREVISION_RECTINFO_CAMERA_LEFT ) {
989  left_ok = true;
990  } else {
991  right_ok = true;
992  }
993  }
994  }
995  }
996  delete blocks;
997 
998  delete rif;
999  return (left_ok && right_ok);
1000 }
1001 
1002 /**Calculates all three cartesian coordinates of the entire disparity map
1003  * The values transformed are given by the window (specified by hoff, voff, width and height). settings contains all further information needed (in that order): the angle of the camera, the position of the camera relativ to the robot: x, y, z and the maximum distance of points allowed. Points filtered out by distance are marked as NaN in all three coordinates. Unknown Regions are marked the same way with INFINITY.
1004  * @param buffer buffer for the coordinates, 1st index 0: x; 1: y; 2:z, 2nd index denotes the lines, 3rd index denotes the columns
1005  * @param hoff horizontal offset of the window
1006  * @param voff vertical offset of the window
1007  * @param width width of the window
1008  * @param height height of the window
1009  * @param settings a vector of settings in floating point format (angle,position of camera x, y, z, maximum distance of points)
1010  */
1011 void
1012 TriclopsStereoProcessor::getall_world_xyz(float ***buffer, int hoff, int voff, int width, int height, float *settings){
1013 
1014  float fx,fy,fz,fed,rho;
1015  int displine, /*zline,*/ px, py;
1016 
1017  float **x = buffer[0], **y = buffer[1], **z = buffer[2];
1018 
1019  const float dnc = NAN;
1020  const float ur = INFINITY;
1021  const float cos_angle = cos(deg2rad(settings[0]));
1022  const float sin_angle = sin(deg2rad(settings[0]));
1023  const float trans_x = settings[1];
1024  const float trans_y = settings[2];
1025  const float trans_z = settings[3];
1026  const float mqd = settings[4]*settings[4];
1027 
1028  if( data->enable_subpixel_interpolation ){
1029  unsigned short *disp = data->disparity_image_hires.data;
1030 
1031  for(py = 0; py < height; py++){
1032  displine = (py + voff) * _width;
1033  //zline = py * width;
1034  for(px = 0; px < width; px++){
1035  if( disp[displine+px+hoff] >= 0xFF00 ){
1036  z[py][px] = x[py][px] = y[py][px] = ur;
1037  }
1038  else{
1039  triclopsRCD16ToXYZ(data->triclops,py+voff,px+hoff,disp[displine+px+hoff],&fx,&fy,&fz);
1040  fed = x[py][px] = -sin_angle * fy + cos_angle * fz + trans_x;
1041  fz = z[py][px] = cos_angle * fy + sin_angle * fz + trans_z;
1042  fy = y[py][px] = fx + trans_y;
1043  fx = fed;
1044  if(fz > 0.0f){
1045  rho = trans_z / ( fz - trans_z );
1046  x[py][px] = fx = fx - rho * ( fx - trans_x );
1047  y[py][px] = fy = fy - rho * ( fy - trans_y );
1048  }
1049  fed = fx*fx + fy*fy;
1050  if(fed > mqd){
1051  z[py][px] = x[py][px] = y[py][px] = dnc;
1052  }
1053  }
1054  }
1055  }
1056  }
1057  else{
1058  unsigned char *disp = data->disparity_image_lores.data;
1059 
1060  for(py = 0; py < height; py++){
1061  displine = (py + voff) * _width;
1062  //zline = py * width;
1063  for(px = 0; px < width; px++){
1064  if( disp[displine+px+hoff] == 0 ){
1065  z[py][px] = x[py][px] = y[py][px] = ur;
1066  }
1067  else{
1068  triclopsRCD8ToXYZ(data->triclops,py+voff,px+hoff,disp[displine+px+hoff],&fx,&fy,&fz);
1069  fed = x[py][px] = -sin_angle * fy + cos_angle * fz + trans_x;
1070  fz = z[py][px] = cos_angle * fy + sin_angle * fz + trans_z;
1071  fy = y[py][px] = fx + trans_y;
1072  fx = fed;
1073  if(fz > 0.0f){
1074  rho = trans_z / ( fz - trans_z );
1075  x[py][px] = fx = fx - rho * ( fx - trans_x );
1076  y[py][px] = fy = fy - rho * ( fy - trans_y );
1077  }
1078  fed = fx*fx + fy*fy;
1079  if(fed > mqd){
1080  z[py][px] = x[py][px] = y[py][px] = dnc;
1081  }
1082  }
1083  }
1084  }
1085  }
1086 }
1087 
1088 } // end namespace firevision
size_t num_blocks()
Get the number of available info blocks.
Definition: fvfile.cpp:232
File could not be opened.
Definition: system.h:53
unsigned int type() const
Get block type.
Camera interface for image aquiring devices in FireVision.
Definition: camera.h:35
Fawkes library namespace.
Recitification Lookup Table Block.
fawkes::upoint_t start
ROI start.
Definition: roi.h:119
unsigned int y
y coordinate
Definition: types.h:36
unsigned int x
x coordinate
Definition: types.h:35
void add_rectinfo_block(RectificationInfoBlock *block)
Add a rectification info block.
Definition: rectfile.cpp:128
unsigned int width
ROI width.
Definition: roi.h:121
Region of interest.
Definition: roi.h:58
A NULL pointer was supplied where not allowed.
Definition: software.h:34
Vector that is used for maintaining the rectification info blocks.
Definition: rectfile.h:49
uint64_t guid()
Get the GUID of camera.
Definition: rectfile.cpp:106
virtual void write(const char *file_name)
Write file.
Definition: fvfile.cpp:262
virtual void mapping(uint16_t x, uint16_t y, uint16_t *to_x, uint16_t *to_y)
Get mapping (to_x, to_y) for (x, y).
Bumblebee2 camera.
Definition: bumblebee2.h:37
Base class for exceptions in Fawkes.
Definition: exception.h:36
virtual void read(const char *filename)
Read file.
Definition: rectfile.cpp:157
Rectification info block.
unsigned int height
ROI height.
Definition: roi.h:123
void set_mapping(uint16_t x, uint16_t y, uint16_t to_x, uint16_t to_y)
Set mapping.
float deg2rad(float deg)
Convert an angle given in degrees to radians.
Definition: angle.h:37
Rectification Info File.
Definition: rectfile.h:38
uint8_t camera() const
Get block camera identifier.
RectInfoBlockVector * rectinfo_blocks()
Get all rectification info blocks.
Definition: rectfile.cpp:138