LibMultiSense
LibMultiSense Documentation
Loading...
Searching...
No Matches
MultiSenseUtilities.hh
Go to the documentation of this file.
1
37#pragma once
38
39#include <array>
40#include <filesystem>
41#include <fstream>
42#include <sstream>
43#include <vector>
44
45#include "MultiSenseTypes.hh"
46
47namespace multisense
48{
49
54#pragma pack(push, 1)
55
59template<typename Color>
60struct Point
61{
62 float x = 0;
63 float y = 0;
64 float z = 0;
65 Color color;
66};
67
71template<>
72struct Point<void>
73{
74 float x = 0;
75 float y = 0;
76 float z = 0;
77};
78
82template<typename Color = void>
84{
85 std::vector<Point<Color>> cloud;
86};
87
91struct Pixel
92{
93 size_t u = 0;
94 size_t v = 0;
95};
96
97#pragma pack(pop)
98
100{
101public:
102
110 QMatrix(const CameraCalibration &reference_cal, const CameraCalibration &matching_cal):
111 fx_(reference_cal.P[0][0]),
112 fy_(reference_cal.P[1][1]),
113 cx_(reference_cal.P[0][2]),
114 cy_(reference_cal.P[1][2]),
115 tx_(matching_cal.P[0][3] / matching_cal.P[0][0]),
116 cx_prime_(matching_cal.P[0][2]),
117 fytx_(fy_ * tx_),
118 fxtx_(fx_ * tx_),
119 fycxtx_(fy_ * cx_ * tx_),
120 fxcytx_(fx_ * cy_ * tx_),
121 fxfytx_(fx_ * fy_ * tx_),
122 fycxcxprime_(fy_ * (cx_ - cx_prime_))
123 {
124 }
125
126 Point<void> reproject(const Pixel &pixel, double disparity) const
127 {
128 const double inversebeta = 1.0 / (-fy_ * disparity + fycxcxprime_);
129 const double x = ((fytx_ * pixel.u) + (-fycxtx_)) * inversebeta;
130 const double y = ((fxtx_ * pixel.v) + (-fxcytx_)) * inversebeta;
131 const double z = fxfytx_ * inversebeta;
132
133 return Point<void>{static_cast<float>(x), static_cast<float>(y), static_cast<float>(z)};
134 }
135
136private:
137 double fx_ = 0.0;
138 double fy_ = 0.0;
139 double cx_ = 0.0;
140 double cy_ = 0.0;
141 double tx_ = 0.0;
142 double cx_prime_ = 0.0;
143
144 double fytx_ = 0.0;
145 double fxtx_ = 0.0;
146
147 double fycxtx_ = 0.0;
148 double fxcytx_ = 0.0;
149 double fxfytx_ = 0.0;
150 double fycxcxprime_ = 0.0;
151};
152
156MULTISENSE_API std::string to_string(const Status &status);
157
166MULTISENSE_API bool write_image(const Image &image, const std::filesystem::path &path);
167
176MULTISENSE_API std::optional<Image> create_depth_image(const ImageFrame &frame,
177 const Image::PixelFormat &depth_format,
178 const DataSource &disparity_source = DataSource::LEFT_DISPARITY_RAW,
179 bool compute_in_aux_frame = false,
180 float invalid_value = 0);
181
193MULTISENSE_API std::optional<Point<void>> get_aux_3d_point(const ImageFrame &frame,
194 const Pixel &rectified_aux_pixel,
195 size_t max_pixel_search_window,
196 double pixel_epsilon,
197 const DataSource &disparity_source = DataSource::LEFT_DISPARITY_RAW);
198
207MULTISENSE_API std::optional<Image> create_bgr_from_ycbcr420(const Image &luma,
208 const Image &chroma,
209 const DataSource &output_source);
210
218MULTISENSE_API std::optional<Image> create_bgr_image(const ImageFrame &frame,
219 const DataSource &output_source);
220
231template<typename Color>
232MULTISENSE_API std::optional<PointCloud<Color>> create_color_pointcloud(const Image &disparity,
233 const std::optional<Image> &color,
234 double max_range,
235 const StereoCalibration &calibration)
236{
237 size_t color_step = 0;
238 double color_disparity_scale = 0.0;
239
240 if constexpr (std::is_same_v<Color, void>)
241 {
242 if (disparity.format != Image::PixelFormat::MONO16 || disparity.width < 0 || disparity.height < 0)
243 {
244 return std::nullopt;
245 }
246 }
247 else
248 {
249 if (!color)
250 {
251 return std::nullopt;
252 }
253
254 color_step = sizeof(Color);
255
256 if (disparity.format != Image::PixelFormat::MONO16 ||
257 color->width != disparity.width ||
258 color->height != disparity.height ||
259 disparity.width < 0 ||
260 disparity.height < 0)
261 {
262 return std::nullopt;
263 }
264
265 const double tx = calibration.right.P[0][3] / calibration.right.P[0][0];
266 const double color_tx = color->calibration.P[0][3] / color->calibration.P[0][0];
267 color_disparity_scale = color_tx / tx;
268 }
269
270 constexpr double scale = 1.0 / 16.0;
271
272 const double squared_range = max_range * max_range;
273
274 const QMatrix Q(disparity.calibration, calibration.right);
275
276 PointCloud<Color> output;
277 output.cloud.reserve(disparity.width * disparity.height);
278
279 for (size_t h = 0 ; h < static_cast<size_t>(disparity.height) ; ++h)
280 {
281 for (size_t w = 0 ; w < static_cast<size_t>(disparity.width) ; ++w)
282 {
283 const size_t index = disparity.image_data_offset +
284 (h * disparity.width * sizeof(uint16_t)) +
285 (w * sizeof(uint16_t));
286
287 const double d =
288 static_cast<double>(*reinterpret_cast<const uint16_t*>(disparity.raw_data->data() + index)) * scale;
289
290 if (d == 0.0)
291 {
292 continue;
293 }
294
295 const auto &[x, y, z] = Q.reproject(Pixel{w, h}, d);
296
297 if ((x*x + y*y + z*z) > squared_range)
298 {
299 continue;
300 }
301
302 if constexpr (std::is_same_v<Color, void>)
303 {
304 output.cloud.push_back(Point<Color>{static_cast<float>(x), static_cast<float>(y), static_cast<float>(z)});
305 }
306 else
307 {
308 //
309 // Use the approximation that color_pixel_u = disp_u - (tx_color/ tx) * d
310 //
311 const size_t color_index = color->image_data_offset +
312 (h * color->width * color_step) +
313 static_cast<size_t>((static_cast<double>(w) - (color_disparity_scale * d))) * color_step;
314
315 const Color color_pixel = *reinterpret_cast<const Color*>(color->raw_data->data() + color_index);
316
317 output.cloud.push_back(Point<Color>{static_cast<float>(x), static_cast<float>(y), static_cast<float>(z),
318 color_pixel});
319 }
320 }
321 }
322
323 return output;
324}
325
334template<typename Color>
335MULTISENSE_API std::optional<PointCloud<Color>> create_color_pointcloud(const ImageFrame &frame,
336 double max_range,
337 const DataSource &color_source = DataSource::UNKNOWN,
338 const DataSource &disparity_source = DataSource::LEFT_DISPARITY_RAW)
339{
340 if constexpr (std::is_same_v<Color, void>)
341 {
342 if (!frame.has_image(disparity_source))
343 {
344 return std::nullopt;
345 }
346
347 return create_color_pointcloud<Color>(frame.get_image(disparity_source), std::nullopt, max_range, frame.calibration);
348 }
349 else
350 {
351 if (!frame.has_image(color_source) || !frame.has_image(disparity_source))
352 {
353 return std::nullopt;
354 }
355
357 frame.get_image(color_source),
358 max_range,
359 frame.calibration);
360 }
361}
362
363MULTISENSE_API std::optional<PointCloud<void>> create_pointcloud(const ImageFrame &frame,
364 double max_range,
366
374template <typename Color>
375MULTISENSE_API bool write_pointcloud_ply(const PointCloud<Color> &point_cloud, const std::filesystem::path &path)
376{
377 std::ofstream ply(path, std::ios::binary);
378 if (!ply.good())
379 {
380 return false;
381 }
382
383 std::ostringstream header;
384 header << "ply\n";
385 header << "format binary_little_endian 1.0\n";
386 header << "element vertex " << point_cloud.cloud.size() << "\n";
387 header << "property float x\n";
388 header << "property float y\n";
389 header << "property float z\n";
390
391 if constexpr (std::is_same_v<Color, uint8_t>)
392 {
393 header << "property uchar intensity\n";
394 }
395 else if constexpr (std::is_same_v<Color, uint16_t>)
396 {
397 header << "property ushort intensity\n";
398 }
399 else if constexpr (std::is_same_v<Color, std::array<uint8_t, 3>>)
400 {
401 header << "property uchar red\n";
402 header << "property uchar green\n";
403 header << "property uchar blue\n";
404 }
405 else if (!std::is_same_v<Color, void>)
406 {
407 throw std::runtime_error("Unsupported color type");
408 }
409
410 header << "end_header\n";
411
412 std::string header_str = header.str();
413 ply.write(header_str.c_str(), header_str.size());
414
415 for (const auto &point : point_cloud.cloud)
416 {
417 ply.write(reinterpret_cast<const char*>(&point.x), sizeof(point.x));
418 ply.write(reinterpret_cast<const char*>(&point.y), sizeof(point.y));
419 ply.write(reinterpret_cast<const char*>(&point.z), sizeof(point.z));
420
421 if constexpr (std::is_same_v<Color, std::array<uint8_t, 3>>)
422 {
423 uint8_t red = point.color[2];
424 uint8_t green = point.color[1];
425 uint8_t blue = point.color[0];
426 ply.write(reinterpret_cast<const char*>(&red), sizeof(red));
427 ply.write(reinterpret_cast<const char*>(&green), sizeof(green));
428 ply.write(reinterpret_cast<const char*>(&blue), sizeof(blue));
429 }
430 else if constexpr (std::is_same_v<Color, void>)
431 {
432 // No color data to write.
433 }
434 else
435 {
436 // Write the color value directly (works for uint8_t or uint16_t).
437 ply.write(reinterpret_cast<const char*>(&point.color), sizeof(point.color));
438 }
439 }
440
441 return true;
442}
443
444}
Copyright 2013-2025 Carnegie Robotics, LLC 4501 Hatfield Street, Pittsburgh, PA 15201 http://www....
#define MULTISENSE_API
QMatrix(const CameraCalibration &reference_cal, const CameraCalibration &matching_cal)
Construct a minimal Q matrix from calibrations.
Point< void > reproject(const Pixel &pixel, double disparity) const
MULTISENSE_API std::optional< Point< void > > get_aux_3d_point(const ImageFrame &frame, const Pixel &rectified_aux_pixel, size_t max_pixel_search_window, double pixel_epsilon, const DataSource &disparity_source=DataSource::LEFT_DISPARITY_RAW)
for a given pixel in the aux image, return the corresponding 3D point associated with the aux pixel
MULTISENSE_API std::optional< PointCloud< Color > > create_color_pointcloud(const Image &disparity, const std::optional< Image > &color, double max_range, const StereoCalibration &calibration)
Create a point cloud from a image frame and a color source.
MULTISENSE_API std::optional< Image > create_bgr_from_ycbcr420(const Image &luma, const Image &chroma, const DataSource &output_source)
Convert a YCbCr420 luma + chroma image into a BGR color image.
DataSource
Identifies which camera or data source the image is from.
MULTISENSE_API std::string to_string(const Status &status)
Convert a status object to a user readable string.
MULTISENSE_API bool write_pointcloud_ply(const PointCloud< Color > &point_cloud, const std::filesystem::path &path)
Write a point cloud to a ply file.
MULTISENSE_API std::optional< Image > create_bgr_image(const ImageFrame &frame, const DataSource &output_source)
Convert a YCbCr420 luma + chroma image into a BGR color image.
MULTISENSE_API bool write_image(const Image &image, const std::filesystem::path &path)
Write a image to a specific path on disk.
MULTISENSE_API std::optional< Image > create_depth_image(const ImageFrame &frame, const Image::PixelFormat &depth_format, const DataSource &disparity_source=DataSource::LEFT_DISPARITY_RAW, bool compute_in_aux_frame=false, float invalid_value=0)
Create a depth image from a image frame.
MULTISENSE_API std::optional< PointCloud< void > > create_pointcloud(const ImageFrame &frame, double max_range, const DataSource &disparity_source=DataSource::LEFT_DISPARITY_RAW)
std::array< std::array< float, 4 >, 3 > P
Rectified projection matrix which takes points in the origin camera coordinate frame and projects the...
A frame containing multiple images (indexed by DataSource).
bool has_image(const DataSource &source) const
Check if we have an image for a given data source.
Represents a single image plus metadata.
int width
Width of the image in pixels.
PixelFormat format
The format of the image data stored in the raw_data stored in the raw_data buffer.
PixelFormat
Pixel formats.
CameraCalibration calibration
The scaled calibration associated with the image.
int height
Height of the image in pixels.
int64_t image_data_offset
An offset into the raw_data pointer where the image data starts.
std::shared_ptr< const std::vector< uint8_t > > raw_data
A pointer to the raw image data sent from the camera.
Pixel coordinates in a image.
A pointcloud containing a collection of colorized points.
std::vector< Point< Color > > cloud
Make sure our Points and point clouds are packed for applications which might need to handle the unde...
CameraCalibration right
Calibration information for the right camera.