Intel® RealSense™ Cross Platform API
Intel Realsense Cross-platform API
rsutil.h
Go to the documentation of this file.
1 /* License: Apache 2.0. See LICENSE file in root directory.
2  Copyright(c) 2015 Intel Corporation. All Rights Reserved. */
3 
4 #ifndef LIBREALSENSE_RSUTIL2_H
5 #define LIBREALSENSE_RSUTIL2_H
6 
7 #include "h/rs_types.h"
8 #include "h/rs_sensor.h"
9 #include "h/rs_frame.h"
10 #include "rs.h"
11 #include "assert.h"
12 #include <stdlib.h>
13 #include <stdbool.h>
14 #include <stdint.h>
15 #include <math.h>
16 #include <float.h>
17 
18 /* Given a point in 3D space, compute the corresponding pixel coordinates in an image with no distortion or forward distortion coefficients produced by the same camera */
19 static void rs2_project_point_to_pixel(float pixel[2], const struct rs2_intrinsics * intrin, const float point[3])
20 {
21  float x = point[0] / point[2], y = point[1] / point[2];
22 
25  {
26 
27  float r2 = x*x + y*y;
28  float f = 1 + intrin->coeffs[0]*r2 + intrin->coeffs[1]*r2*r2 + intrin->coeffs[4]*r2*r2*r2;
29  x *= f;
30  y *= f;
31  float dx = x + 2*intrin->coeffs[2]*x*y + intrin->coeffs[3]*(r2 + 2*x*x);
32  float dy = y + 2*intrin->coeffs[3]*x*y + intrin->coeffs[2]*(r2 + 2*y*y);
33  x = dx;
34  y = dy;
35  }
36  if (intrin->model == RS2_DISTORTION_FTHETA)
37  {
38  float r = sqrtf(x*x + y*y);
39  if (r < FLT_EPSILON)
40  {
41  r = FLT_EPSILON;
42  }
43  float rd = (float)(1.0f / intrin->coeffs[0] * atan(2 * r* tan(intrin->coeffs[0] / 2.0f)));
44  x *= rd / r;
45  y *= rd / r;
46  }
48  {
49  float r = sqrtf(x*x + y*y);
50  if (r < FLT_EPSILON)
51  {
52  r = FLT_EPSILON;
53  }
54  float theta = atan(r);
55  float theta2 = theta*theta;
56  float series = 1 + theta2*(intrin->coeffs[0] + theta2*(intrin->coeffs[1] + theta2*(intrin->coeffs[2] + theta2*intrin->coeffs[3])));
57  float rd = theta*series;
58  x *= rd / r;
59  y *= rd / r;
60  }
61 
62  pixel[0] = x * intrin->fx + intrin->ppx;
63  pixel[1] = y * intrin->fy + intrin->ppy;
64 }
65 
66 /* Given pixel coordinates and depth in an image with no distortion or inverse distortion coefficients, compute the corresponding point in 3D space relative to the same camera */
67 static void rs2_deproject_pixel_to_point(float point[3], const struct rs2_intrinsics * intrin, const float pixel[2], float depth)
68 {
69  assert(intrin->model != RS2_DISTORTION_MODIFIED_BROWN_CONRADY); // Cannot deproject from a forward-distorted image
70  //assert(intrin->model != RS2_DISTORTION_BROWN_CONRADY); // Cannot deproject to an brown conrady model
71 
72  float x = (pixel[0] - intrin->ppx) / intrin->fx;
73  float y = (pixel[1] - intrin->ppy) / intrin->fy;
75  {
76  float r2 = x*x + y*y;
77  float f = 1 + intrin->coeffs[0]*r2 + intrin->coeffs[1]*r2*r2 + intrin->coeffs[4]*r2*r2*r2;
78  float ux = x*f + 2*intrin->coeffs[2]*x*y + intrin->coeffs[3]*(r2 + 2*x*x);
79  float uy = y*f + 2*intrin->coeffs[3]*x*y + intrin->coeffs[2]*(r2 + 2*y*y);
80  x = ux;
81  y = uy;
82  }
84  {
85  float rd = sqrtf(x*x + y*y);
86  if (rd < FLT_EPSILON)
87  {
88  rd = FLT_EPSILON;
89  }
90 
91  float theta = rd;
92  float theta2 = rd*rd;
93  for (int i = 0; i < 4; i++)
94  {
95  float f = theta*(1 + theta2*(intrin->coeffs[0] + theta2*(intrin->coeffs[1] + theta2*(intrin->coeffs[2] + theta2*intrin->coeffs[3])))) - rd;
96  if (abs(f) < FLT_EPSILON)
97  {
98  break;
99  }
100  float df = 1 + theta2*(3 * intrin->coeffs[0] + theta2*(5 * intrin->coeffs[1] + theta2*(7 * intrin->coeffs[2] + 9 * theta2*intrin->coeffs[3])));
101  theta -= f / df;
102  theta2 = theta*theta;
103  }
104  float r = tan(theta);
105  x *= r / rd;
106  y *= r / rd;
107  }
108  if (intrin->model == RS2_DISTORTION_FTHETA)
109  {
110  float rd = sqrtf(x*x + y*y);
111  if (rd < FLT_EPSILON)
112  {
113  rd = FLT_EPSILON;
114  }
115  float r = (float)(tan(intrin->coeffs[0] * rd) / atan(2 * tan(intrin->coeffs[0] / 2.0f)));
116  x *= r / rd;
117  y *= r / rd;
118  }
119 
120  point[0] = depth * x;
121  point[1] = depth * y;
122  point[2] = depth;
123 }
124 
125 /* Transform 3D coordinates relative to one sensor to 3D coordinates relative to another viewpoint */
126 static void rs2_transform_point_to_point(float to_point[3], const struct rs2_extrinsics * extrin, const float from_point[3])
127 {
128  to_point[0] = extrin->rotation[0] * from_point[0] + extrin->rotation[3] * from_point[1] + extrin->rotation[6] * from_point[2] + extrin->translation[0];
129  to_point[1] = extrin->rotation[1] * from_point[0] + extrin->rotation[4] * from_point[1] + extrin->rotation[7] * from_point[2] + extrin->translation[1];
130  to_point[2] = extrin->rotation[2] * from_point[0] + extrin->rotation[5] * from_point[1] + extrin->rotation[8] * from_point[2] + extrin->translation[2];
131 }
132 
133 /* Calculate horizontal and vertical feild of view, based on video intrinsics */
134 static void rs2_fov(const struct rs2_intrinsics * intrin, float to_fov[2])
135 {
136  to_fov[0] = (atan2f(intrin->ppx + 0.5f, intrin->fx) + atan2f(intrin->width - (intrin->ppx + 0.5f), intrin->fx)) * 57.2957795f;
137  to_fov[1] = (atan2f(intrin->ppy + 0.5f, intrin->fy) + atan2f(intrin->height - (intrin->ppy + 0.5f), intrin->fy)) * 57.2957795f;
138 }
139 
140 static void next_pixel_in_line(float curr[2], const float start[2], const float end[2])
141 {
142  float line_slope = (end[1] - start[1]) / (end[0] - start[0]);
143  if (fabs(end[0] - curr[0]) > fabs(end[1] - curr[1]))
144  {
145  curr[0] = end[0] > curr[0] ? curr[0] + 1 : curr[0] - 1;
146  curr[1] = end[1] - line_slope * (end[0] - curr[0]);
147  }
148  else
149  {
150  curr[1] = end[1] > curr[1] ? curr[1] + 1 : curr[1] - 1;
151  curr[0] = end[0] - ((end[1] + curr[1]) / line_slope);
152  }
153 }
154 
155 static bool is_pixel_in_line(const float curr[2], const float start[2], const float end[2])
156 {
157  return ((end[0] >= start[0] && end[0] >= curr[0] && curr[0] >= start[0]) || (end[0] <= start[0] && end[0] <= curr[0] && curr[0] <= start[0])) &&
158  ((end[1] >= start[1] && end[1] >= curr[1] && curr[1] >= start[1]) || (end[1] <= start[1] && end[1] <= curr[1] && curr[1] <= start[1]));
159 }
160 
161 static void adjust_2D_point_to_boundary(float p[2], int width, int height)
162 {
163  if (p[0] < 0) p[0] = 0;
164  if (p[0] > width) p[0] = (float)width;
165  if (p[1] < 0) p[1] = 0;
166  if (p[1] > height) p[1] = (float)height;
167 }
168 
169 /* Find projected pixel with unknown depth search along line. */
170 static void rs2_project_color_pixel_to_depth_pixel(float to_pixel[2],
171  const uint16_t* data, float depth_scale,
172  float depth_min, float depth_max,
173  const struct rs2_intrinsics* depth_intrin,
174  const struct rs2_intrinsics* color_intrin,
175  const struct rs2_extrinsics* color_to_depth,
176  const struct rs2_extrinsics* depth_to_color,
177  const float from_pixel[2])
178 {
179  //Find line start pixel
180  float start_pixel[2] = { 0 }, min_point[3] = { 0 }, min_transformed_point[3] = { 0 };
181  rs2_deproject_pixel_to_point(min_point, color_intrin, from_pixel, depth_min);
182  rs2_transform_point_to_point(min_transformed_point, color_to_depth, min_point);
183  rs2_project_point_to_pixel(start_pixel, depth_intrin, min_transformed_point);
184  adjust_2D_point_to_boundary(start_pixel, depth_intrin->width, depth_intrin->height);
185 
186  //Find line end depth pixel
187  float end_pixel[2] = { 0 }, max_point[3] = { 0 }, max_transformed_point[3] = { 0 };
188  rs2_deproject_pixel_to_point(max_point, color_intrin, from_pixel, depth_max);
189  rs2_transform_point_to_point(max_transformed_point, color_to_depth, max_point);
190  rs2_project_point_to_pixel(end_pixel, depth_intrin, max_transformed_point);
191  adjust_2D_point_to_boundary(end_pixel, depth_intrin->width, depth_intrin->height);
192 
193  //search along line for the depth pixel that it's projected pixel is the closest to the input pixel
194  float min_dist = -1;
195  for (float p[2] = { start_pixel[0], start_pixel[1] }; is_pixel_in_line(p, start_pixel, end_pixel); next_pixel_in_line(p, start_pixel, end_pixel))
196  {
197  float depth = depth_scale * data[(int)p[1] * depth_intrin->width + (int)p[0]];
198  if (depth == 0)
199  continue;
200 
201  float projected_pixel[2] = { 0 }, point[3] = { 0 }, transformed_point[3] = { 0 };
202  rs2_deproject_pixel_to_point(point, depth_intrin, p, depth);
203  rs2_transform_point_to_point(transformed_point, depth_to_color, point);
204  rs2_project_point_to_pixel(projected_pixel, color_intrin, transformed_point);
205 
206  float new_dist = pow((projected_pixel[1] - from_pixel[1]), 2) + pow((projected_pixel[0] - from_pixel[0]), 2);
207  if (new_dist < min_dist || min_dist < 0)
208  {
209  min_dist = new_dist;
210  to_pixel[0] = p[0];
211  to_pixel[1] = p[1];
212  }
213  }
214 }
215 
216 #endif
Definition: rs_types.h:47
float translation[3]
Definition: rs_sensor.h:90
Definition: rs_types.h:51
float coeffs[5]
Definition: rs_types.h:66
Exposes librealsense functionality for C compilers.
float rotation[9]
Definition: rs_sensor.h:89
float ppx
Definition: rs_types.h:61
Exposes RealSense frame functionality for C compilers.
Exposes RealSense structs.
Definition: rs_types.h:49
int width
Definition: rs_types.h:59
Exposes RealSense sensor functionality for C compilers.
Definition: rs_types.h:48
Cross-stream extrinsics: encodes the topology describing how the different devices are oriented.
Definition: rs_sensor.h:87
rs2_distortion model
Definition: rs_types.h:65
float fy
Definition: rs_types.h:64
float fx
Definition: rs_types.h:63
Video stream intrinsics.
Definition: rs_types.h:57
int height
Definition: rs_types.h:60
float ppy
Definition: rs_types.h:62