diff --git a/.github/workflows/basic-build-ci.yaml b/.github/workflows/basic-build-ci.yaml index e308066fb..759927eef 100644 --- a/.github/workflows/basic-build-ci.yaml +++ b/.github/workflows/basic-build-ci.yaml @@ -15,7 +15,7 @@ jobs: image: osrf/ros2:testing steps: - name: Checkout repo - uses: actions/checkout@v2 + uses: actions/checkout@v4 - name: Create Workspace run: | mkdir src_tmp @@ -35,3 +35,36 @@ jobs: bash -c 'source /opt/ros/$ROS_DISTRO/setup.bash; \ colcon test; \ colcon test-result --verbose' + build-rolling-testing: + runs-on: ubuntu-latest + strategy: + fail-fast: false + container: + image: osrf/ros2:testing + steps: + - name: Checkout repo + uses: actions/checkout@v4 + - name: Create Workspace + run: | + mkdir src_tmp + mv `find -maxdepth 1 -not -name . -not -name src_tmp` src_tmp/ + mv src_tmp/ src/ + - name: Install Prerequisites + run: | + apt update -qq + apt install -qq -y lsb-release wget curl gnupg2 git + curl -sSL https://raw.githubusercontent.com/ros/rosdistro/master/ros.key -o /usr/share/keyrings/ros2-testing-archive-keyring.gpg + echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/ros2-testing-archive-keyring.gpg] http://packages.ros.org/ros2-testing/ubuntu $(. /etc/os-release && echo $UBUNTU_CODENAME) main" | tee /etc/apt/sources.list.d/ros2.list > /dev/null + apt-get update && apt-get upgrade -q -y + bash -c 'source /opt/ros/$ROS_DISTRO/setup.bash; \ + apt-get update && apt-get upgrade -y && rosdep update; \ + rosdep install --from-paths src --ignore-src -y' + - name: Build Workspace + run: | + bash -c 'source /opt/ros/$ROS_DISTRO/setup.bash; \ + colcon build' + - name: Run Tests + run: | + bash -c 'source /opt/ros/$ROS_DISTRO/setup.bash; \ + colcon test; \ + colcon test-result --verbose' diff --git a/README.md b/README.md index f261bb717..177975a8b 100644 --- a/README.md +++ b/README.md @@ -1,7 +1,7 @@ image_pipeline ============== -[![Build Status](https://build.ros2.org/buildStatus/icon?job=Rdev__image_pipeline__ubuntu_jammy_amd64)](https://build.ros2.org/job/Rdev__image_pipeline__ubuntu_jammy_amd64/) +[![Build Status](https://build.ros2.org/buildStatus/icon?job=Rdev__image_pipeline__ubuntu_noble_amd64)](https://build.ros2.org/job/Rdev__image_pipeline__ubuntu_noble_amd64/) This package fills the gap between getting raw images from a camera driver and higher-level vision processing. diff --git a/camera_calibration/CHANGELOG.rst b/camera_calibration/CHANGELOG.rst index cbe2394d8..c70d55a99 100644 --- a/camera_calibration/CHANGELOG.rst +++ b/camera_calibration/CHANGELOG.rst @@ -2,6 +2,105 @@ Changelog for package camera_calibration ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ +* Refactoring calibration code (`#1000 `_) + Co-authored-by: Alejandro Hernández Cordero +* Contributors: Myron Rodrigues + +6.0.2 (2024-07-23) +------------------ + +6.0.1 (2024-07-22) +------------------ +* Change camera info message to lower case (`#1005 `_) + Change camera info message to lower case since message type had been + change in rolling and humble. + [](https://github.com/ros2/common_interfaces/blob/rolling/sensor_msgs/msg/CameraInfo.msg) +* Formatting calib code before refactoring (`#999 `_) + As discussed in `#975 `_ and `#973 `_ + doing the linting first. + using style from + [here](https://github.com/ament/ament_lint/blob/rolling/ament_pycodestyle/ament_pycodestyle/configuration/ament_pycodestyle.ini) +* Added stereo calibration using charuco board (`#976 `_) + From `#972 `_ + Doing this first for rolling. + This was a TODO in the repository, opening this PR to add this feature. + - The main issue why this wasn't possible imo is the way `mk_obj_points` + works. I'm using the inbuilt opencv function to get the points there. + - The other is a condition when aruco markers are detected they are + added as good points, This is fine in case of mono but in stereo these + have to be the same number as the object points to find matches although + this should be possible with aruco. +* Contributors: Myron Rodrigues, SFhmichael + +6.0.0 (2024-05-27) +------------------ +* fix: cv2.aruco.interpolateCornersCharuco is deprecated (`#979 `_) + There has been API Changes in the newer releases of opencv2 (from + 4.8.0). The PR addresses this by supporting both the old and new APIs. + updated Syntax + ``` + charucodetector = cv2.aruco.CharucoDetector(board) + charuco_corners, charuco_ids, marker_corners, marker_ids = charucodetector.detectBoard(image) + ``` + before 4.8.0 + ``` + marker_corners, marker_ids, rejectedImgPoints = cv2.aruco.detectMarkers( image, dictionary) + retval, charuco_corners, charuco_ids = cv2.aruco.interpolateCornersCharuco( marker_corners, marker_ids, image, board) + ``` + See the changed examples in the main opencv2 repo: + https://github.com/opencv/opencv/blob/f9a59f2592993d3dcc080e495f4f5e02dd8ec7ef/samples/python/calibrate.py#L110 +* Update for compatibility with image_pipeline 4.1.0 (`#968 `_) + This is a PR to fix: + - `#966 `_ + As noted in `#966 `_, as of writing image_pipeline [4.1.0 has been + released](https://github.com/ros-perception/vision_opencv/releases/tag/4.1.0), + is updated on + [index.ros.org](https://index.ros.org/p/image_geometry/github-ros-perception-vision_opencv/#rolling), + but it has not yet been migrated to + [packages.ros.org](http://packages.ros.org/ros2/ubuntu/dists/noble/main/binary-amd64/Packages). + As such `camera_calibration` will also require the source of + [image_pipeline + 4.1.0](https://github.com/ros-perception/vision_opencv/releases/tag/4.1.0) + or higher to successfully build. + I tested to ensure successful build with colcon build & colcon test. + Note that colcon test has the following warning that is out of scope of + this PR: + ``` + =============================== warnings summary =============================== + src/camera_calibration/calibrator.py:47 + Warning: The distutils package is deprecated and slated for removal in Python 3.12. Use setuptools or check PEP 632 for potential alternatives + ``` + Please let me know if there are any questions, concerns, or requested + changes. +* replace disutils with python3-semver (`#970 `_) + Fix for + - `#969 `_ + I added a dependency for `python3-semver` to replace version parsing + with `disutils`. + Please let me know if you have any questions, concerns, or additional + requested changes. +* Contributors: Földi Tamás, Scott Monaghan + +5.0.1 (2024-03-26) +------------------ +* Fix spelling error for cv2.aruco.DICT from 6x6_50 to 7x7_1000 (`#961 `_) + There was mismatch of capitalisation of "X" for OpenCV + cv2.aruco.DICT_n**X**n\_ in camera_calibration package for dicts 6x6_50 + to 7x7_1000 + Co-authored-by: Vishal Balaji +* unified changelog, add missing image, deduplicate tutorials (`#938 `_) + Last bit of documentation updates - putting together a single changelog + summary for the whole release (rather than scattering among packages). + Unified the camera_info tutorial so it isn't duplicated. Added a missing + image from image_rotate (was on local disk, but hadn't committed it) +* migrate camera_calibration documentation (`#937 `_) +* install tarfile_calibration (`#923 `_) + otherwise, it can't be run easily +* calibration: better warnings around board configuration `#713 `_ (`#724 `_) +* Contributors: Michael Ferguson, Vishal Balaji, jonathanTIE + 5.0.0 (2024-01-24) ------------------ * ROS 2: Added more aruco dicts, fixed aruco linerror bug (`#873 `_) diff --git a/camera_calibration/doc/api.rst b/camera_calibration/doc/api.rst index b898197c5..d998cc7e6 100644 --- a/camera_calibration/doc/api.rst +++ b/camera_calibration/doc/api.rst @@ -1,8 +1,8 @@ API Documentation ================= -.. autoclass:: camera_calibration.calibrator.MonoCalibrator +.. autoclass:: camera_calibration.mono_calibrator.MonoCalibrator :members: -.. autoclass:: camera_calibration.calibrator.StereoCalibrator +.. autoclass:: camera_calibration.stereo_calibrator.StereoCalibrator :members: diff --git a/camera_calibration/package.xml b/camera_calibration/package.xml index 84c742bc5..37ce60925 100644 --- a/camera_calibration/package.xml +++ b/camera_calibration/package.xml @@ -2,7 +2,7 @@ camera_calibration - 5.0.0 + 6.0.3 camera_calibration allows easy calibration of monocular or stereo cameras using a checkerboard calibration target. @@ -27,6 +27,7 @@ rclpy std_srvs sensor_msgs + python3-semver ament_copyright ament_flake8 diff --git a/camera_calibration/setup.py b/camera_calibration/setup.py index f5639a0f6..0a6bbb2ed 100644 --- a/camera_calibration/setup.py +++ b/camera_calibration/setup.py @@ -5,12 +5,12 @@ setup( name=PACKAGE_NAME, - version='5.0.0', + version='6.0.3', packages=["camera_calibration", "camera_calibration.nodes"], data_files=[ - ('share/ament_index/resource_index/packages', - ['resource/' + PACKAGE_NAME]), - ('share/' + PACKAGE_NAME, ['package.xml']), + ('share/ament_index/resource_index/packages', + ['resource/' + PACKAGE_NAME]), + ('share/' + PACKAGE_NAME, ['package.xml']), ], py_modules=[], package_dir={'': 'src'}, diff --git a/camera_calibration/src/camera_calibration/calibrator.py b/camera_calibration/src/camera_calibration/calibrator.py index c35bf3b46..306df57b9 100644 --- a/camera_calibration/src/camera_calibration/calibrator.py +++ b/camera_calibration/src/camera_calibration/calibrator.py @@ -32,80 +32,91 @@ # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -from io import BytesIO import cv2 import cv_bridge -import image_geometry import math import numpy.linalg -import pickle -import random import sensor_msgs.msg -import sys import tarfile -import time -from distutils.version import LooseVersion from enum import Enum # Supported camera models + + class CAMERA_MODEL(Enum): PINHOLE = 0 FISHEYE = 1 # Supported calibration patterns + + class Patterns: Chessboard, Circles, ACircles, ChArUco = list(range(4)) + class CalibrationException(Exception): pass # TODO: Make pattern per-board? + + class ChessboardInfo(): - def __init__(self, pattern="chessboard", n_cols = 0, n_rows = 0, dim = 0.0, marker_size = 0.0, aruco_dict = None): + def __init__( + self, pattern="chessboard", n_cols=0, n_rows=0, dim=0.0, marker_size=0.0, + aruco_dict=None): self.pattern = pattern self.n_cols = n_cols self.n_rows = n_rows self.dim = dim self.marker_size = marker_size self.aruco_dict = None - self.charuco_board = None; - if pattern=="charuco": + self.charuco_board = None + if pattern == "charuco": self.aruco_dict = cv2.aruco.getPredefinedDictionary({ - "aruco_orig" : cv2.aruco.DICT_ARUCO_ORIGINAL, - "4x4_50" : cv2.aruco.DICT_4X4_50, - "4x4_100" : cv2.aruco.DICT_4X4_100, - "4x4_250" : cv2.aruco.DICT_4X4_250, - "4x4_1000" : cv2.aruco.DICT_4X4_1000, - "5x5_50" : cv2.aruco.DICT_5X5_50, - "5x5_100" : cv2.aruco.DICT_5X5_100, - "5x5_250" : cv2.aruco.DICT_5X5_250, - "5x5_1000" : cv2.aruco.DICT_5X5_1000, - "6x6_50" : cv2.aruco.DICT_6X6_50, - "6x6_100" : cv2.aruco.DICT_6X6_100, - "6x6_250" : cv2.aruco.DICT_6X6_250, - "6x6_1000" : cv2.aruco.DICT_6X6_1000, - "7x7_50" : cv2.aruco.DICT_7X7_50, - "7x7_100" : cv2.aruco.DICT_7X7_100, - "7x7_250" : cv2.aruco.DICT_7X7_250, - "7x7_1000" : cv2.aruco.DICT_7X7_1000}[aruco_dict]) - self.charuco_board = cv2.aruco.CharucoBoard_create(self.n_cols, self.n_rows, self.dim, self.marker_size, - self.aruco_dict) + "aruco_orig": cv2.aruco.DICT_ARUCO_ORIGINAL, + "4x4_50": cv2.aruco.DICT_4X4_50, + "4x4_100": cv2.aruco.DICT_4X4_100, + "4x4_250": cv2.aruco.DICT_4X4_250, + "4x4_1000": cv2.aruco.DICT_4X4_1000, + "5x5_50": cv2.aruco.DICT_5X5_50, + "5x5_100": cv2.aruco.DICT_5X5_100, + "5x5_250": cv2.aruco.DICT_5X5_250, + "5x5_1000": cv2.aruco.DICT_5X5_1000, + "6x6_50": cv2.aruco.DICT_6X6_50, + "6x6_100": cv2.aruco.DICT_6X6_100, + "6x6_250": cv2.aruco.DICT_6X6_250, + "6x6_1000": cv2.aruco.DICT_6X6_1000, + "7x7_50": cv2.aruco.DICT_7X7_50, + "7x7_100": cv2.aruco.DICT_7X7_100, + "7x7_250": cv2.aruco.DICT_7X7_250, + "7x7_1000": cv2.aruco.DICT_7X7_1000}[aruco_dict]) + if cv2.__version__ >= '4.8.0': + self.charuco_board = cv2.aruco.CharucoBoard( + (self.n_cols, self.n_rows), + self.dim, self.marker_size, self.aruco_dict) + else: + self.charuco_board = cv2.aruco.CharucoBoard_create( + self.n_cols, self.n_rows, self.dim, self.marker_size, self.aruco_dict) + # Make all private!!!!! def lmin(seq1, seq2): """ Pairwise minimum of two sequences """ return [min(a, b) for (a, b) in zip(seq1, seq2)] + def lmax(seq1, seq2): """ Pairwise maximum of two sequences """ return [max(a, b) for (a, b) in zip(seq1, seq2)] + def _pdist(p1, p2): """ Distance bwt two points. p1 = (x, y), p2 = (x, y) """ return math.sqrt(math.pow(p1[0] - p2[0], 2) + math.pow(p1[1] - p2[1], 2)) + def _get_outside_corners(corners, board): """ Return the four corners of the board as a whole, as (up_left, up_right, down_right, down_left). @@ -114,20 +125,23 @@ def _get_outside_corners(corners, board): ydim = board.n_rows if board.pattern != "charuco" and corners.shape[1] * corners.shape[0] != xdim * ydim: - raise Exception("Invalid number of corners! %d corners. X: %d, Y: %d" % (corners.shape[1] * corners.shape[0], - xdim, ydim)) + raise Exception("Invalid number of corners! %d corners. X: %d, Y: %d" % + (corners.shape[1] * corners.shape[0], xdim, ydim)) if board.pattern == "charuco" and corners.shape[1] * corners.shape[0] != (xdim-1) * (ydim-1): - raise Exception(("Invalid number of corners! %d corners. X: %d, Y: %d\n for ChArUco boards, " + - "_get_largest_rectangle_corners handles partial views of the target") % (corners.shape[1] * - corners.shape[0], xdim-1, ydim-1)) + raise Exception( + ("Invalid number of corners! %d corners. X: %d, Y: %d\n for ChArUco boards, " + + "_get_largest_rectangle_corners handles partial views of the target") % + (corners.shape[1] * corners.shape[0], + xdim - 1, ydim - 1)) - up_left = corners[0,0] - up_right = corners[xdim - 1,0] - down_right = corners[-1,0] - down_left = corners[-xdim,0] + up_left = corners[0, 0] + up_right = corners[xdim - 1, 0] + down_right = corners[-1, 0] + down_left = corners[-xdim, 0] return (up_left, up_right, down_right, down_left) + def _get_largest_rectangle_corners(corners, ids, board): """ Return the largest rectangle with all four corners visible in a partial view of a ChArUco board, as (up_left, @@ -147,7 +161,8 @@ def _get_largest_rectangle_corners(corners, ids, board): # xdim and ydim are number of squares, but we're working with inner corners xdim = board.n_cols - 1 ydim = board.n_rows - 1 - board_vis = [[[i*xdim + j] in ids for j in range(xdim)] for i in range(ydim)] + board_vis = [ + [[i*xdim + j] in ids for j in range(xdim)] for i in range(ydim)] best_area = 0 best_rect = [-1, -1, -1, -1] @@ -162,10 +177,12 @@ def _get_largest_rectangle_corners(corners, ids, board): best_rect = [x1, x2, y1, y2] (x1, x2, y1, y2) = best_rect corner_ids = (y2*xdim+x1, y2*xdim+x2, y1*xdim+x2, y1*xdim + x1) - corners = tuple(corners[numpy.where(ids == corner_id)[0]][0][0] for corner_id in corner_ids) + corners = tuple(corners[numpy.where(ids == corner_id)[0]][0][0] + for corner_id in corner_ids) return corners + def _calculate_skew(corners): """ Get skew for given checkerboard detection. @@ -182,11 +199,13 @@ def angle(a, b, c): """ ab = a - b cb = c - b - return math.acos(numpy.dot(ab,cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb))) + return math.acos(numpy.dot(ab, cb) / (numpy.linalg.norm(ab) * numpy.linalg.norm(cb))) - skew = min(1.0, 2. * abs((math.pi / 2.) - angle(up_left, up_right, down_right))) + skew = min(1.0, 2. * abs((math.pi / 2.) - + angle(up_left, up_right, down_right))) return skew + def _calculate_area(corners): """ Get 2d image area of the detected checkerboard. @@ -201,7 +220,8 @@ def _calculate_area(corners): q = a + b return abs(p[0]*q[1] - p[1]*q[0]) / 2. -def _get_corners(img, board, refine = True, checkerboard_flags=0): + +def _get_corners(img, board, refine=True, checkerboard_flags=0): """ Get corners for a particular chessboard for an image """ @@ -211,8 +231,8 @@ def _get_corners(img, board, refine = True, checkerboard_flags=0): mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) else: mono = img - (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), flags = cv2.CALIB_CB_ADAPTIVE_THRESH | - cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags) + (ok, corners) = cv2.findChessboardCorners(mono, (board.n_cols, board.n_rows), + flags=cv2.CALIB_CB_ADAPTIVE_THRESH | cv2.CALIB_CB_NORMALIZE_IMAGE | checkerboard_flags) if not ok: return (ok, corners) @@ -220,23 +240,28 @@ def _get_corners(img, board, refine = True, checkerboard_flags=0): # NOTE: This may cause problems with very low-resolution cameras, where 8 pixels is a non-negligible fraction # of the image size. See http://answers.ros.org/question/3155/how-can-i-calibrate-low-resolution-cameras BORDER = 8 - if not all([(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) for i in range(corners.shape[0])]): + if not all( + [(BORDER < corners[i, 0, 0] < (w - BORDER)) and (BORDER < corners[i, 0, 1] < (h - BORDER)) + for i in range(corners.shape[0])]): ok = False # Ensure that all corner-arrays are going from top to bottom. - if board.n_rows!=board.n_cols: + if board.n_rows != board.n_cols: if corners[0, 0, 1] > corners[-1, 0, 1]: corners = numpy.copy(numpy.flipud(corners)) else: - direction_corners=(corners[-1]-corners[0])>=numpy.array([[0.0,0.0]]) + direction_corners = (corners[-1]-corners[0] + ) >= numpy.array([[0.0, 0.0]]) if not numpy.all(direction_corners): if not numpy.any(direction_corners): corners = numpy.copy(numpy.flipud(corners)) elif direction_corners[0][0]: - corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2)).reshape(board.n_cols*board.n_rows,1,2) + corners = numpy.rot90(corners.reshape(board.n_rows, board.n_cols, 2)).reshape( + board.n_cols*board.n_rows, 1, 2) else: - corners=numpy.rot90(corners.reshape(board.n_rows,board.n_cols,2),3).reshape(board.n_cols*board.n_rows,1,2) + corners = numpy.rot90(corners.reshape(board.n_rows, board.n_cols, 2), 3).reshape( + board.n_cols*board.n_rows, 1, 2) if refine and ok: # Use a radius of half the minimum distance between corners. This should be large enough to snap to the @@ -245,17 +270,20 @@ def _get_corners(img, board, refine = True, checkerboard_flags=0): for row in range(board.n_rows): for col in range(board.n_cols - 1): index = row*board.n_rows + col - min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + 1, 0])) + min_distance = min(min_distance, _pdist( + corners[index, 0], corners[index + 1, 0])) for row in range(board.n_rows - 1): for col in range(board.n_cols): index = row*board.n_rows + col - min_distance = min(min_distance, _pdist(corners[index, 0], corners[index + board.n_cols, 0])) + min_distance = min(min_distance, _pdist( + corners[index, 0], corners[index + board.n_cols, 0])) radius = int(math.ceil(min_distance * 0.5)) - cv2.cornerSubPix(mono, corners, (radius,radius), (-1,-1), - ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 )) + cv2.cornerSubPix(mono, corners, (radius, radius), (-1, -1), + (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)) return (ok, corners) + def _get_charuco_corners(img, board, refine): """ Get chessboard corners from image of ChArUco board @@ -268,12 +296,22 @@ def _get_charuco_corners(img, board, refine): else: mono = img - marker_corners, marker_ids, _ = cv2.aruco.detectMarkers(img, board.aruco_dict) - if len(marker_corners) == 0: - return (False, None, None) - _, square_corners, ids = cv2.aruco.interpolateCornersCharuco(marker_corners, marker_ids, img, board.charuco_board) + if cv2.__version__ >= '4.8.0': + charucodetector = cv2.aruco.CharucoDetector(board.charuco_board) + square_corners, ids, marker_corners, marker_ids = charucodetector.detectBoard( + mono) + else: + marker_corners, marker_ids, _ = cv2.aruco.detectMarkers( + img, board.aruco_dict) + + if len(marker_corners) == 0: + return (False, None, None) + _, square_corners, ids = cv2.aruco.interpolateCornersCharuco( + marker_corners, marker_ids, img, board.charuco_board, minMarkers=1) + return ((square_corners is not None) and (len(square_corners) > 5), square_corners, ids) + def _get_circles(img, board, pattern): """ Get circle centers for a symmetric or asymmetric grid @@ -289,16 +327,19 @@ def _get_circles(img, board, pattern): if pattern == Patterns.ACircles: flag = cv2.CALIB_CB_ASYMMETRIC_GRID mono_arr = numpy.array(mono) - (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_cols, board.n_rows), flags=flag) + (ok, corners) = cv2.findCirclesGrid( + mono_arr, (board.n_cols, board.n_rows), flags=flag) # In symmetric case, findCirclesGrid does not detect the target if it's turned sideways. So we try # again with dimensions swapped - not so efficient. # TODO Better to add as second board? Corner ordering will change. if not ok and pattern == Patterns.Circles: - (ok, corners) = cv2.findCirclesGrid(mono_arr, (board.n_rows, board.n_cols), flags=flag) + (ok, corners) = cv2.findCirclesGrid( + mono_arr, (board.n_rows, board.n_cols), flags=flag) return (ok, corners) + def _get_dist_model(dist_params, cam_model): # Select dist model if CAMERA_MODEL.PINHOLE == cam_model: @@ -313,27 +354,32 @@ def _get_dist_model(dist_params, cam_model): return dist_model # TODO self.size needs to come from CameraInfo, full resolution + + class Calibrator(): """ Base class for calibration system """ - def __init__(self, boards, flags=0, fisheye_flags = 0, pattern=Patterns.Chessboard, name='', - checkerboard_flags=cv2.CALIB_CB_FAST_CHECK, max_chessboard_speed = -1.0): + + def __init__(self, boards, flags=0, fisheye_flags=0, pattern=Patterns.Chessboard, name='', + checkerboard_flags=cv2.CALIB_CB_FAST_CHECK, max_chessboard_speed=-1.0): # Ordering the dimensions for the different detectors is actually a minefield... if pattern == Patterns.Chessboard: # Make sure n_cols > n_rows to agree with OpenCV CB detector output - self._boards = [ChessboardInfo("chessboard", max(i.n_cols, i.n_rows), min(i.n_cols, i.n_rows), i.dim) for i in boards] + self._boards = [ChessboardInfo("chessboard", max( + i.n_cols, i.n_rows), min(i.n_cols, i.n_rows), i.dim) for i in boards] elif pattern == Patterns.ChArUco: self._boards = boards elif pattern == Patterns.ACircles: # 7x4 and 4x7 are actually different patterns. Assume square-ish pattern, so n_rows > n_cols. - self._boards = [ChessboardInfo("acircles", min(i.n_cols, i.n_rows), max(i.n_cols, i.n_rows), i.dim) for i in boards] + self._boards = [ChessboardInfo("acircles", min(i.n_cols, i.n_rows), max( + i.n_cols, i.n_rows), i.dim) for i in boards] elif pattern == Patterns.Circles: # We end up having to check both ways anyway self._boards = boards else: - raise CalibratorException('pattern must be one of: Chessboard, Circles, ACircles, or ChArUco') - + raise CalibrationException( + 'pattern must be one of: Chessboard, Circles, ACircles, or ChArUco') # Set to true after we perform calibration self.calibrated = False @@ -386,10 +432,11 @@ def get_parameters(self, corners, ids, board, size): Return list of parameters [X, Y, size, skew] describing the checkerboard view. """ (width, height) = size - Xs = corners[:,:,0] - Ys = corners[:,:,1] + Xs = corners[:, :, 0] + Ys = corners[:, :, 1] if board.pattern == 'charuco': - outside_corners = _get_largest_rectangle_corners(corners, ids, board) + outside_corners = _get_largest_rectangle_corners( + corners, ids, board) else: outside_corners = _get_outside_corners(corners, board) area = _calculate_area(outside_corners) @@ -397,7 +444,7 @@ def get_parameters(self, corners, ids, board, size): border = math.sqrt(area) # For X and Y, we "shrink" the image all around by approx. half the board size. # Otherwise large boards are penalized because you can't get much X/Y variation. - p_x = min(1.0, max(0.0, (numpy.mean(Xs) - border / 2) / (width - border))) + p_x = min(1.0, max(0.0, (numpy.mean(Xs) - border / 2) / (width - border))) p_y = min(1.0, max(0.0, (numpy.mean(Ys) - border / 2) / (height - border))) p_size = math.sqrt(area / (width * height)) params = [p_x, p_y, p_size, skew] @@ -416,19 +463,23 @@ def is_slow_moving(self, corners, ids, last_frame_corners, last_frame_ids): return False if ids is None: num_corners = len(corners) - corner_deltas = (corners - last_frame_corners).reshape(num_corners, 2) + corner_deltas = ( + corners - last_frame_corners).reshape(num_corners, 2) else: corner_deltas = [] last_frame_ids = list(last_frame_ids.transpose()[0]) for i, c_id in enumerate(ids): try: last_i = last_frame_ids.index(c_id) - corner_deltas.append(corners[i] - last_frame_corners[last_i]) - except ValueError: pass + corner_deltas.append( + corners[i] - last_frame_corners[last_i]) + except ValueError: + pass corner_deltas = numpy.concatenate(corner_deltas) # Average distance travelled overall for all corners - average_motion = numpy.average(numpy.linalg.norm(corner_deltas, axis = 1)) + average_motion = numpy.average( + numpy.linalg.norm(corner_deltas, axis=1)) return average_motion <= self.max_chessboard_speed def is_good_sample(self, params, corners, ids, last_frame_corners, last_frame_ids): @@ -439,11 +490,11 @@ def is_good_sample(self, params, corners, ids, last_frame_corners, last_frame_id return True def param_distance(p1, p2): - return sum([abs(a-b) for (a,b) in zip(p1, p2)]) + return sum([abs(a-b) for (a, b) in zip(p1, p2)]) db_params = [sample[0] for sample in self.db] d = min([param_distance(params, p) for p in db_params]) - #print "d = %.3f" % d #DEBUG + # print "d = %.3f" % d #DEBUG # TODO What's a good threshold here? Should it be configurable? if d <= 0.2: return False @@ -472,22 +523,28 @@ def compute_goodenough(self): min_params = [min_params[0], min_params[1], 0., 0.] # For each parameter, judge how much progress has been made toward adequate variation - progress = [min((hi - lo) / r, 1.0) for (lo, hi, r) in zip(min_params, max_params, self.param_ranges)] + progress = [min((hi - lo) / r, 1.0) for (lo, hi, r) + in zip(min_params, max_params, self.param_ranges)] # If we have lots of samples, allow calibration even if not all parameters are green # TODO Awkward that we update self.goodenough instead of returning it - self.goodenough = (len(self.db) >= 40) or all([p == 1.0 for p in progress]) + self.goodenough = (len(self.db) >= 40) or all( + [p == 1.0 for p in progress]) return list(zip(self._param_names, min_params, max_params, progress)) - def mk_object_points(self, boards, use_board_size = False): + def mk_object_points(self, boards, use_board_size=False): + if self.pattern == Patterns.ChArUco: + opts = [board.charuco_board.chessboardCorners for board in boards] + return opts opts = [] - for i, b in enumerate(boards): + for b in boards: num_pts = b.n_cols * b.n_rows opts_loc = numpy.zeros((num_pts, 1, 3), numpy.float32) for j in range(num_pts): opts_loc[j, 0, 0] = (j // b.n_cols) if self.pattern == Patterns.ACircles: - opts_loc[j, 0, 1] = 2*(j % b.n_cols) + (opts_loc[j, 0, 0] % 2) + opts_loc[j, 0, 1] = 2 * \ + (j % b.n_cols) + (opts_loc[j, 0, 0] % 2) else: opts_loc[j, 0, 1] = (j % b.n_cols) opts_loc[j, 0, 2] = 0 @@ -496,7 +553,7 @@ def mk_object_points(self, boards, use_board_size = False): opts.append(opts_loc) return opts - def get_corners(self, img, refine = True): + def get_corners(self, img, refine=True): """ Use cvFindChessboardCorners to find corners of chessboard in image. @@ -511,7 +568,8 @@ def get_corners(self, img, refine = True): for b in self._boards: if self.pattern == Patterns.Chessboard: - (ok, corners) = _get_corners(img, b, refine, self.checkerboard_flags) + (ok, corners) = _get_corners( + img, b, refine, self.checkerboard_flags) ids = None elif self.pattern == Patterns.ChArUco: (ok, corners, ids) = _get_charuco_corners(img, b, refine) @@ -536,12 +594,7 @@ def downsample_and_detect(self, img): # Scale the input image down to ~VGA size height = img.shape[0] width = img.shape[1] - vga_scale_param = self.get_parameter('vga_scale').get_parameter_value() - if vga_scale_param == 0: - scale = math.sqrt((width*height) / (640.*480.)) - else: - scale = vga_scale_param - + scale = math.sqrt((width*height) / (640.*480.)) if scale > 1.0: scrib = cv2.resize(img, (int(width / scale), int(height / scale))) else: @@ -552,7 +605,8 @@ def downsample_and_detect(self, img): if self.pattern == Patterns.Chessboard: # Detect checkerboard - (ok, downsampled_corners, ids, board) = self.get_corners(scrib, refine = True) + (ok, downsampled_corners, ids, board) = self.get_corners( + scrib, refine=True) # Scale corners back to full size image corners = None @@ -568,8 +622,8 @@ def downsample_and_detect(self, img): mono = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) else: mono = img - cv2.cornerSubPix(mono, corners_unrefined, (radius,radius), (-1,-1), - ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1 )) + cv2.cornerSubPix(mono, corners_unrefined, (radius, radius), (-1, -1), + (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.1)) corners = corners_unrefined else: corners = downsampled_corners @@ -581,8 +635,8 @@ def downsample_and_detect(self, img): if ok: if scale > 1.0: downsampled_corners = corners.copy() - downsampled_corners[:,:,0] /= x_scale - downsampled_corners[:,:,1] /= y_scale + downsampled_corners[:, :, 0] /= x_scale + downsampled_corners[:, :, 1] /= y_scale else: downsampled_corners = corners @@ -628,34 +682,36 @@ def lrost(name, d, k, r, p, size): "[%s]" % name, "", "camera matrix", - " ".join("%8f" % k[0,i] for i in range(3)), - " ".join("%8f" % k[1,i] for i in range(3)), - " ".join("%8f" % k[2,i] for i in range(3)), + " ".join("%8f" % k[0, i] for i in range(3)), + " ".join("%8f" % k[1, i] for i in range(3)), + " ".join("%8f" % k[2, i] for i in range(3)), "", "distortion", " ".join("%8f" % x for x in d.flat), "", "rectification", - " ".join("%8f" % r[0,i] for i in range(3)), - " ".join("%8f" % r[1,i] for i in range(3)), - " ".join("%8f" % r[2,i] for i in range(3)), + " ".join("%8f" % r[0, i] for i in range(3)), + " ".join("%8f" % r[1, i] for i in range(3)), + " ".join("%8f" % r[2, i] for i in range(3)), "", "projection", - " ".join("%8f" % p[0,i] for i in range(4)), - " ".join("%8f" % p[1,i] for i in range(4)), - " ".join("%8f" % p[2,i] for i in range(4)), + " ".join("%8f" % p[0, i] for i in range(4)), + " ".join("%8f" % p[1, i] for i in range(4)), + " ".join("%8f" % p[2, i] for i in range(4)), "" ]) - assert len(calmessage) < 525, "Calibration info must be less than 525 bytes" + assert len( + calmessage) < 525, "Calibration info must be less than 525 bytes" return calmessage @staticmethod def lryaml(name, d, k, r, p, size, cam_model): def format_mat(x, precision): return ("[%s]" % ( - numpy.array2string(x, precision=precision, suppress_small=True, separator=", ") + numpy.array2string(x, precision=precision, + suppress_small=True, separator=", ") .replace("[", "").replace("]", "").replace("\n", "\n ") - )) + )) dist_model = _get_dist_model(d, cam_model) @@ -690,10 +746,11 @@ def format_mat(x, precision): def do_save(self): filename = '/tmp/calibrationdata.tar.gz' tf = tarfile.open(filename, 'w:gz') - self.do_tarfile_save(tf) # Must be overridden in subclasses + self.do_tarfile_save(tf) # Must be overridden in subclasses tf.close() print(("Wrote calibration data to", filename)) + def image_from_archive(archive, name): """ Load image PGM file from tar archive. @@ -701,758 +758,17 @@ def image_from_archive(archive, name): Used for tarfile loading and unit test. """ member = archive.getmember(name) - imagefiledata = numpy.frombuffer(archive.extractfile(member).read(), numpy.uint8) + imagefiledata = numpy.frombuffer( + archive.extractfile(member).read(), numpy.uint8) imagefiledata.resize((1, imagefiledata.size)) return cv2.imdecode(imagefiledata, cv2.IMREAD_COLOR) + class ImageDrawable(): """ Passed to CalibrationNode after image handled. Allows plotting of images with detected corner points """ - def __init__(self): - self.params = None -class MonoDrawable(ImageDrawable): def __init__(self): - ImageDrawable.__init__(self) - self.scrib = None - self.linear_error = -1.0 - -class StereoDrawable(ImageDrawable): - def __init__(self): - ImageDrawable.__init__(self) - self.lscrib = None - self.rscrib = None - self.epierror = -1 - self.dim = -1 - - -class MonoCalibrator(Calibrator): - """ - Calibration class for monocular cameras:: - - images = [cv2.imread("mono%d.png") for i in range(8)] - mc = MonoCalibrator() - mc.cal(images) - print mc.as_message() - """ - - is_mono = True # TODO Could get rid of is_mono - - def __init__(self, *args, **kwargs): - if 'name' not in kwargs: - kwargs['name'] = 'narrow_stereo/left' - super(MonoCalibrator, self).__init__(*args, **kwargs) - - def cal(self, images): - """ - Calibrate camera from given images - """ - goodcorners = self.collect_corners(images) - self.cal_fromcorners(goodcorners) - self.calibrated = True - - def collect_corners(self, images): - """ - :param images: source images containing chessboards - :type images: list of :class:`cvMat` - - Find chessboards in all images. - - Return [ (corners, ids, ChessboardInfo) ] - """ - self.size = (images[0].shape[1], images[0].shape[0]) - corners = [self.get_corners(i) for i in images] - - goodcorners = [(co, ids, b) for (ok, co, ids, b) in corners if ok] - if not goodcorners: - raise CalibrationException("No corners found in images!") - return goodcorners - - def cal_fromcorners(self, good): - """ - :param good: Good corner positions and boards - :type good: [(corners, ChessboardInfo)] - """ - - (ipts, ids, boards) = zip(*good) - opts = self.mk_object_points(boards) - # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio - intrinsics_in = numpy.eye(3, dtype=numpy.float64) - - if self.pattern == Patterns.ChArUco: - if self.camera_model == CAMERA_MODEL.FISHEYE: - raise NotImplemented("Can't perform fisheye calibration with ChArUco board") - - reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco( - ipts, ids, boards[0].charuco_board, self.size, intrinsics_in, None) - - elif self.camera_model == CAMERA_MODEL.PINHOLE: - print("mono pinhole calibration...") - reproj_err, self.intrinsics, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera( - opts, ipts, - self.size, - intrinsics_in, - None, - flags = self.calib_flags) - # OpenCV returns more than 8 coefficients (the additional ones all zeros) when CALIB_RATIONAL_MODEL is set. - # The extra ones include e.g. thin prism coefficients, which we are not interested in. - if self.calib_flags & cv2.CALIB_RATIONAL_MODEL: - self.distortion = dist_coeffs.flat[:8].reshape(-1, 1) # rational polynomial - else: - self.distortion = dist_coeffs.flat[:5].reshape(-1, 1) # plumb bob - elif self.camera_model == CAMERA_MODEL.FISHEYE: - print("mono fisheye calibration...") - # WARNING: cv2.fisheye.calibrate wants float64 points - ipts64 = numpy.asarray(ipts, dtype=numpy.float64) - ipts = ipts64 - opts64 = numpy.asarray(opts, dtype=numpy.float64) - opts = opts64 - reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.fisheye.calibrate( - opts, ipts, self.size, - intrinsics_in, None, flags = self.fisheye_calib_flags) - - # R is identity matrix for monocular calibration - self.R = numpy.eye(3, dtype=numpy.float64) - self.P = numpy.zeros((3, 4), dtype=numpy.float64) - - self.set_alpha(0.0) - - def set_alpha(self, a): - """ - Set the alpha value for the calibrated camera solution. The alpha - value is a zoom, and ranges from 0 (zoomed in, all pixels in - calibrated image are valid) to 1 (zoomed out, all pixels in - original image are in calibrated image). - """ - - if self.camera_model == CAMERA_MODEL.PINHOLE: - # NOTE: Prior to Electric, this code was broken such that we never actually saved the new - # camera matrix. In effect, this enforced P = [K|0] for monocular cameras. - # TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix) - ncm, _ = cv2.getOptimalNewCameraMatrix(self.intrinsics, self.distortion, self.size, a) - for j in range(3): - for i in range(3): - self.P[j,i] = ncm[j, i] - self.mapx, self.mapy = cv2.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, ncm, self.size, cv2.CV_32FC1) - elif self.camera_model == CAMERA_MODEL.FISHEYE: - # NOTE: cv2.fisheye.estimateNewCameraMatrixForUndistortRectify not producing proper results, using a naive approach instead: - self.P[:3,:3] = self.intrinsics[:3,:3] - self.P[0,0] /= (1. + a) - self.P[1,1] /= (1. + a) - self.mapx, self.mapy = cv2.fisheye.initUndistortRectifyMap(self.intrinsics, self.distortion, self.R, self.P, self.size, cv2.CV_32FC1) - - - def remap(self, src): - """ - :param src: source image - :type src: :class:`cvMat` - - Apply the post-calibration undistortion to the source image - """ - return cv2.remap(src, self.mapx, self.mapy, cv2.INTER_LINEAR) - - def undistort_points(self, src): - """ - :param src: N source pixel points (u,v) as an Nx2 matrix - :type src: :class:`cvMat` - - Apply the post-calibration undistortion to the source points - """ - if self.camera_model == CAMERA_MODEL.PINHOLE: - return cv2.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P) - elif self.camera_model == CAMERA_MODEL.FISHEYE: - return cv2.fisheye.undistortPoints(src, self.intrinsics, self.distortion, R = self.R, P = self.P) - - def as_message(self): - """ Return the camera calibration as a CameraInfo message """ - return self.lrmsg(self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model) - - def from_message(self, msg, alpha = 0.0): - """ Initialize the camera calibration from a CameraInfo message """ - - self.size = (msg.width, msg.height) - self.intrinsics = numpy.array(msg.k, dtype=numpy.float64, copy=True).reshape((3, 3)) - self.distortion = numpy.array(msg.d, dtype=numpy.float64, copy=True).reshape((len(msg.d), 1)) - self.R = numpy.array(msg.r, dtype=numpy.float64, copy=True).reshape((3, 3)) - self.P = numpy.array(msg.p, dtype=numpy.float64, copy=True).reshape((3, 4)) - - self.set_alpha(0.0) - - def report(self): - self.lrreport(self.distortion, self.intrinsics, self.R, self.P) - - def ost(self): - return self.lrost(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size) - - def yaml(self): - return self.lryaml(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model) - - def linear_error_from_image(self, image): - """ - Detect the checkerboard and compute the linear error. - Mainly for use in tests. - """ - _, corners, _, ids, board, _ = self.downsample_and_detect(image) - if corners is None: - return None - - undistorted = self.undistort_points(corners) - return self.linear_error(undistorted, ids, board) - - @staticmethod - def linear_error(corners, ids, b): - - """ - Returns the linear error for a set of corners detected in the unrectified image. - """ - - if corners is None: - return None - - corners = numpy.squeeze(corners) - - def pt2line(x0, y0, x1, y1, x2, y2): - """ point is (x0, y0), line is (x1, y1, x2, y2) """ - return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + (y2 - y1) ** 2) - - n_cols = b.n_cols - n_rows = b.n_rows - if b.pattern == 'charuco': - n_cols -= 1 - n_rows -= 1 - n_pts = n_cols * n_rows - - if ids is None: - ids = numpy.arange(n_pts).reshape((n_pts, 1)) - - ids_to_idx = dict((ids[i, 0], i) for i in range(len(ids))) - - errors = [] - for row in range(n_rows): - row_min = row * n_cols - row_max = (row+1) * n_cols - pts_in_row = [x for x in ids if row_min <= x < row_max] - - # not enough points to calculate error - if len(pts_in_row) <= 2: continue - - left_pt = min(pts_in_row)[0] - right_pt = max(pts_in_row)[0] - x_left = corners[ids_to_idx[left_pt], 0] - y_left = corners[ids_to_idx[left_pt], 1] - x_right = corners[ids_to_idx[right_pt], 0] - y_right = corners[ids_to_idx[right_pt], 1] - - for pt in pts_in_row: - if pt[0] in (left_pt, right_pt): continue - x = corners[ids_to_idx[pt[0]], 0] - y = corners[ids_to_idx[pt[0]], 1] - errors.append(pt2line(x, y, x_left, y_left, x_right, y_right)) - - if errors: - return math.sqrt(sum([e**2 for e in errors]) / len(errors)) - else: - return None - - - def handle_msg(self, msg): - """ - Detects the calibration target and, if found and provides enough new information, - adds it to the sample database. - - Returns a MonoDrawable message with the display image and progress info. - """ - gray = self.mkgray(msg) - linear_error = -1 - - # Get display-image-to-be (scrib) and detection of the calibration target - scrib_mono, corners, downsampled_corners, ids, board, (x_scale, y_scale) = self.downsample_and_detect(gray) - - if self.calibrated: - # Show rectified image - # TODO Pull out downsampling code into function - gray_remap = self.remap(gray) - gray_rect = gray_remap - if x_scale != 1.0 or y_scale != 1.0: - gray_rect = cv2.resize(gray_remap, (scrib_mono.shape[1], scrib_mono.shape[0])) - - scrib = cv2.cvtColor(gray_rect, cv2.COLOR_GRAY2BGR) - - if corners is not None: - # Report linear error - undistorted = self.undistort_points(corners) - linear_error = self.linear_error(undistorted, ids, board) - - # Draw rectified corners - scrib_src = undistorted.copy() - scrib_src[:,:,0] /= x_scale - scrib_src[:,:,1] /= y_scale - cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), scrib_src, True) - - else: - scrib = cv2.cvtColor(scrib_mono, cv2.COLOR_GRAY2BGR) - if corners is not None: - # Draw (potentially downsampled) corners onto display image - if board.pattern == "charuco": - cv2.aruco.drawDetectedCornersCharuco(scrib, downsampled_corners, ids) - else: - cv2.drawChessboardCorners(scrib, (board.n_cols, board.n_rows), downsampled_corners, True) - - # Add sample to database only if it's sufficiently different from any previous sample. - params = self.get_parameters(corners, ids, board, (gray.shape[1], gray.shape[0])) - if self.is_good_sample(params, corners, ids, self.last_frame_corners, self.last_frame_ids): - self.db.append((params, gray)) - if self.pattern == Patterns.ChArUco: - self.good_corners.append((corners, ids, board)) - else: - self.good_corners.append((corners, None, board)) - print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params))) - - self.last_frame_corners = corners - self.last_frame_ids = ids - rv = MonoDrawable() - rv.scrib = scrib - rv.params = self.compute_goodenough() - rv.linear_error = linear_error - return rv - - def do_calibration(self, dump = False): - if not self.good_corners: - print("**** Collecting corners for all images! ****") #DEBUG - images = [i for (p, i) in self.db] - self.good_corners = self.collect_corners(images) - self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally - # Dump should only occur if user wants it - if dump: - pickle.dump((self.is_mono, self.size, self.good_corners), - open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) - self.cal_fromcorners(self.good_corners) - self.calibrated = True - # DEBUG - print((self.report())) - print((self.ost())) - - def do_tarfile_save(self, tf): - """ Write images and calibration solution to a tarfile object """ - - def taradd(name, buf): - if isinstance(buf, str): - s = BytesIO(buf.encode('utf-8')) - else: - s = BytesIO(buf) - ti = tarfile.TarInfo(name) - ti.size = len(s.getvalue()) - ti.uname = 'calibrator' - ti.mtime = int(time.time()) - tf.addfile(tarinfo=ti, fileobj=s) - - ims = [("left-%04d.png" % i, im) for i,(_, im) in enumerate(self.db)] - for (name, im) in ims: - taradd(name, cv2.imencode(".png", im)[1].tostring()) - taradd('ost.yaml', self.yaml()) - taradd('ost.txt', self.ost()) - - def do_tarfile_calibration(self, filename): - archive = tarfile.open(filename, 'r') - - limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('.pgm') or f.endswith('png'))) ] - - self.cal(limages) - -# TODO Replicate MonoCalibrator improvements in stereo -class StereoCalibrator(Calibrator): - """ - Calibration class for stereo cameras:: - - limages = [cv2.imread("left%d.png") for i in range(8)] - rimages = [cv2.imread("right%d.png") for i in range(8)] - sc = StereoCalibrator() - sc.cal(limages, rimages) - print sc.as_message() - """ - - is_mono = False - - def __init__(self, *args, **kwargs): - if 'name' not in kwargs: - kwargs['name'] = 'narrow_stereo' - super(StereoCalibrator, self).__init__(*args, **kwargs) - self.l = MonoCalibrator(*args, **kwargs) - self.r = MonoCalibrator(*args, **kwargs) - # Collecting from two cameras in a horizontal stereo rig, can't get - # full X range in the left camera. - self.param_ranges[0] = 0.4 - - #override - def set_cammodel(self, modeltype): - super(StereoCalibrator, self).set_cammodel(modeltype) - self.l.set_cammodel(modeltype) - self.r.set_cammodel(modeltype) - - def cal(self, limages, rimages): - """ - :param limages: source left images containing chessboards - :type limages: list of :class:`cvMat` - :param rimages: source right images containing chessboards - :type rimages: list of :class:`cvMat` - - Find chessboards in images, and runs the OpenCV calibration solver. - """ - goodcorners = self.collect_corners(limages, rimages) - self.size = (limages[0].shape[1], limages[0].shape[0]) - self.l.size = self.size - self.r.size = self.size - self.cal_fromcorners(goodcorners) - self.calibrated = True - - def collect_corners(self, limages, rimages): - """ - For a sequence of left and right images, find pairs of images where both - left and right have a chessboard, and return their corners as a list of pairs. - """ - # Pick out (corners, ids, board) tuples - lcorners = [] - rcorners = [] - for img in limages: - (_, corners, _, ids, board, _) = self.downsample_and_detect(img) - lcorners.append((corners, ids, board)) - for img in rimages: - (_, corners, _, ids, board, _) = self.downsample_and_detect(img) - rcorners.append((corners, ids, board)) - - good = [(lco, rco, lid, rid, b) for ((lco, lid, b), (rco, rid, br)) in zip( lcorners, rcorners) - if (lco is not None and rco is not None)] - - if len(good) == 0: - raise CalibrationException("No corners found in images!") - return good - - def cal_fromcorners(self, good): - # Perform monocular calibrations - lcorners = [(lco, lid, b) for (lco, rco, lid, rid, b) in good] - rcorners = [(rco, rid, b) for (lco, rco, lid, rid, b) in good] - self.l.cal_fromcorners(lcorners) - self.r.cal_fromcorners(rcorners) - - (lipts, ripts, _, _, boards) = zip(*good) - - opts = self.mk_object_points(boards, True) - - flags = cv2.CALIB_FIX_INTRINSIC - - self.T = numpy.zeros((3, 1), dtype=numpy.float64) - self.R = numpy.eye(3, dtype=numpy.float64) - - if self.pattern == Patterns.ChArUco: - # TODO: implement stereo ChArUco calibration - raise NotImplemented("Stereo calibration not implemented for ChArUco boards") - - if self.camera_model == CAMERA_MODEL.PINHOLE: - print("stereo pinhole calibration...") - if LooseVersion(cv2.__version__).version[0] == 2: - cv2.stereoCalibrate(opts, lipts, ripts, self.size, - self.l.intrinsics, self.l.distortion, - self.r.intrinsics, self.r.distortion, - self.R, # R - self.T, # T - criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), - flags = flags) - else: - cv2.stereoCalibrate(opts, lipts, ripts, - self.l.intrinsics, self.l.distortion, - self.r.intrinsics, self.r.distortion, - self.size, - self.R, # R - self.T, # T - criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), - flags = flags) - elif self.camera_model == CAMERA_MODEL.FISHEYE: - print("stereo fisheye calibration...") - if LooseVersion(cv2.__version__).version[0] == 2: - print("ERROR: You need OpenCV >3 to use fisheye camera model") - sys.exit() - else: - # WARNING: cv2.fisheye.stereoCalibrate wants float64 points - lipts64 = numpy.asarray(lipts, dtype=numpy.float64) - lipts = lipts64 - ripts64 = numpy.asarray(ripts, dtype=numpy.float64) - ripts = ripts64 - opts64 = numpy.asarray(opts, dtype=numpy.float64) - opts = opts64 - - cv2.fisheye.stereoCalibrate(opts, lipts, ripts, - self.l.intrinsics, self.l.distortion, - self.r.intrinsics, self.r.distortion, - self.size, - self.R, # R - self.T, # T - criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), # 30, 1e-6 - flags = flags) - - self.set_alpha(0.0) - - def set_alpha(self, a): - """ - Set the alpha value for the calibrated camera solution. The - alpha value is a zoom, and ranges from 0 (zoomed in, all pixels - in calibrated image are valid) to 1 (zoomed out, all pixels in - original image are in calibrated image). - """ - if self.camera_model == CAMERA_MODEL.PINHOLE: - cv2.stereoRectify(self.l.intrinsics, - self.l.distortion, - self.r.intrinsics, - self.r.distortion, - self.size, - self.R, - self.T, - self.l.R, self.r.R, self.l.P, self.r.P, - alpha = a) - - cv2.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.size, cv2.CV_32FC1, - self.l.mapx, self.l.mapy) - cv2.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.size, cv2.CV_32FC1, - self.r.mapx, self.r.mapy) - - elif self.camera_model == CAMERA_MODEL.FISHEYE: - self.Q = numpy.zeros((4,4), dtype=numpy.float64) - - flags = cv2.CALIB_ZERO_DISPARITY # Operation flags that may be zero or CALIB_ZERO_DISPARITY . - # If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views. - # And if the flag is not set, the function may still shift the images in the horizontal or vertical direction - # (depending on the orientation of epipolar lines) to maximize the useful image area. - - cv2.fisheye.stereoRectify(self.l.intrinsics, self.l.distortion, - self.r.intrinsics, self.r.distortion, - self.size, - self.R, self.T, - flags, - self.l.R, self.r.R, - self.l.P, self.r.P, - self.Q, - self.size, - a, - 1.0 ) - self.l.P[:3,:3] = numpy.dot(self.l.intrinsics,self.l.R) - self.r.P[:3,:3] = numpy.dot(self.r.intrinsics,self.r.R) - cv2.fisheye.initUndistortRectifyMap(self.l.intrinsics, self.l.distortion, self.l.R, self.l.intrinsics, self.size, cv2.CV_32FC1, - self.l.mapx, self.l.mapy) - cv2.fisheye.initUndistortRectifyMap(self.r.intrinsics, self.r.distortion, self.r.R, self.r.intrinsics, self.size, cv2.CV_32FC1, - self.r.mapx, self.r.mapy) - - def as_message(self): - """ - Return the camera calibration as a pair of CameraInfo messages, for left - and right cameras respectively. - """ - - return (self.lrmsg(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size, self.l.camera_model), - self.lrmsg(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size, self.r.camera_model)) - - def from_message(self, msgs, alpha = 0.0): - """ Initialize the camera calibration from a pair of CameraInfo messages. """ - self.size = (msgs[0].width, msgs[0].height) - - self.T = numpy.zeros((3, 1), dtype=numpy.float64) - self.R = numpy.eye(3, dtype=numpy.float64) - - self.l.from_message(msgs[0]) - self.r.from_message(msgs[1]) - # Need to compute self.T and self.R here, using the monocular parameters above - if False: - self.set_alpha(0.0) - - def report(self): - print("\nLeft:") - self.lrreport(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P) - print("\nRight:") - self.lrreport(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P) - print("self.T =", numpy.ravel(self.T).tolist()) - print("self.R =", numpy.ravel(self.R).tolist()) - - def ost(self): - return (self.lrost(self.name + "/left", self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size) + - self.lrost(self.name + "/right", self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size)) - - def yaml(self, suffix, info): - return self.lryaml(self.name + suffix, info.distortion, info.intrinsics, info.R, info.P, self.size, self.camera_model) - - # TODO Get rid of "from_images" versions of these, instead have function to get undistorted corners - def epipolar_error_from_images(self, limage, rimage): - """ - Detect the checkerboard in both images and compute the epipolar error. - Mainly for use in tests. - """ - lcorners = self.downsample_and_detect(limage)[1] - rcorners = self.downsample_and_detect(rimage)[1] - if lcorners is None or rcorners is None: - return None - - lundistorted = self.l.undistort_points(lcorners) - rundistorted = self.r.undistort_points(rcorners) - - return self.epipolar_error(lundistorted, rundistorted) - - def epipolar_error(self, lcorners, rcorners): - """ - Compute the epipolar error from two sets of matching undistorted points - """ - d = lcorners[:,:,1] - rcorners[:,:,1] - return numpy.sqrt(numpy.square(d).sum() / d.size) - - def chessboard_size_from_images(self, limage, rimage): - _, lcorners, _, _, board, _ = self.downsample_and_detect(limage) - _, rcorners, _, _, board, _ = self.downsample_and_detect(rimage) - if lcorners is None or rcorners is None: - return None - - lundistorted = self.l.undistort_points(lcorners) - rundistorted = self.r.undistort_points(rcorners) - - return self.chessboard_size(lundistorted, rundistorted, board) - - def chessboard_size(self, lcorners, rcorners, board, msg = None): - """ - Compute the square edge length from two sets of matching undistorted points - given the current calibration. - :param msg: a tuple of (left_msg, right_msg) - """ - # Project the points to 3d - cam = image_geometry.StereoCameraModel() - if msg == None: - msg = self.as_message() - cam.fromCameraInfo(*msg) - disparities = lcorners[:,:,0] - rcorners[:,:,0] - pt3d = [cam.projectPixelTo3d((lcorners[i,0,0], lcorners[i,0,1]), disparities[i,0]) for i in range(lcorners.shape[0]) ] - def l2(p0, p1): - return math.sqrt(sum([(c0 - c1) ** 2 for (c0, c1) in zip(p0, p1)])) - - # Compute the length from each horizontal and vertical line, and return the mean - cc = board.n_cols - cr = board.n_rows - lengths = ( - [l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1) for r in range(cr)] + - [l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1) for c in range(cc)]) - return sum(lengths) / len(lengths) - - def handle_msg(self, msg): - # TODO Various asserts that images have same dimension, same board detected... - (lmsg, rmsg) = msg - lgray = self.mkgray(lmsg) - rgray = self.mkgray(rmsg) - epierror = -1 - - # Get display-images-to-be and detections of the calibration target - lscrib_mono, lcorners, ldownsampled_corners, lids, lboard, (x_scale, y_scale) = self.downsample_and_detect(lgray) - rscrib_mono, rcorners, rdownsampled_corners, rids, rboard, _ = self.downsample_and_detect(rgray) - - if self.calibrated: - # Show rectified images - lremap = self.l.remap(lgray) - rremap = self.r.remap(rgray) - lrect = lremap - rrect = rremap - if x_scale != 1.0 or y_scale != 1.0: - lrect = cv2.resize(lremap, (lscrib_mono.shape[1], lscrib_mono.shape[0])) - rrect = cv2.resize(rremap, (rscrib_mono.shape[1], rscrib_mono.shape[0])) - - lscrib = cv2.cvtColor(lrect, cv2.COLOR_GRAY2BGR) - rscrib = cv2.cvtColor(rrect, cv2.COLOR_GRAY2BGR) - - # Draw rectified corners - if lcorners is not None: - lundistorted = self.l.undistort_points(lcorners) - scrib_src = lundistorted.copy() - scrib_src[:,:,0] /= x_scale - scrib_src[:,:,1] /= y_scale - cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), scrib_src, True) - - if rcorners is not None: - rundistorted = self.r.undistort_points(rcorners) - scrib_src = rundistorted.copy() - scrib_src[:,:,0] /= x_scale - scrib_src[:,:,1] /= y_scale - cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), scrib_src, True) - - # Report epipolar error - if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): - epierror = self.epipolar_error(lundistorted, rundistorted) - - else: - lscrib = cv2.cvtColor(lscrib_mono, cv2.COLOR_GRAY2BGR) - rscrib = cv2.cvtColor(rscrib_mono, cv2.COLOR_GRAY2BGR) - # Draw any detected chessboards onto display (downsampled) images - if lcorners is not None: - cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), - ldownsampled_corners, True) - if rcorners is not None: - cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), - rdownsampled_corners, True) - - # Add sample to database only if it's sufficiently different from any previous sample - if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): - params = self.get_parameters(lcorners, lids, lboard, (lgray.shape[1], lgray.shape[0])) - if self.is_good_sample(params, lcorners, lids, self.last_frame_corners, self.last_frame_ids): - self.db.append( (params, lgray, rgray) ) - self.good_corners.append( (lcorners, rcorners, lids, rids, lboard) ) - print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % tuple([len(self.db)] + params))) - - self.last_frame_corners = lcorners - self.last_frame_ids = lids - rv = StereoDrawable() - rv.lscrib = lscrib - rv.rscrib = rscrib - rv.params = self.compute_goodenough() - rv.epierror = epierror - return rv - - def do_calibration(self, dump = False): - # TODO MonoCalibrator collects corners if needed here - self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) # TODO Needs to be set externally - # Dump should only occur if user wants it - if dump: - pickle.dump((self.is_mono, self.size, self.good_corners), - open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) - self.l.size = self.size - self.r.size = self.size - self.cal_fromcorners(self.good_corners) - self.calibrated = True - # DEBUG - print((self.report())) - print((self.ost())) - - def do_tarfile_save(self, tf): - """ Write images and calibration solution to a tarfile object """ - ims = ([("left-%04d.png" % i, im) for i,(_, im, _) in enumerate(self.db)] + - [("right-%04d.png" % i, im) for i,(_, _, im) in enumerate(self.db)]) - - def taradd(name, buf): - if isinstance(buf, str): - s = BytesIO(buf.encode('utf-8')) - else: - s = BytesIO(buf) - ti = tarfile.TarInfo(name) - ti.size = len(s.getvalue()) - ti.uname = 'calibrator' - ti.mtime = int(time.time()) - tf.addfile(tarinfo=ti, fileobj=s) - - for (name, im) in ims: - taradd(name, cv2.imencode(".png", im)[1].tostring()) - taradd('left.yaml', self.yaml("/left", self.l)) - taradd('right.yaml', self.yaml("/right", self.r)) - taradd('ost.txt', self.ost()) - - def do_tarfile_calibration(self, filename): - archive = tarfile.open(filename, 'r') - limages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ] - rimages = [ image_from_archive(archive, f) for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ] - - if not len(limages) == len(rimages): - raise CalibrationException("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages))) - - ##\todo Check that the filenames match and stuff - - self.cal(limages, rimages) + self.params = None diff --git a/camera_calibration/src/camera_calibration/camera_calibrator.py b/camera_calibration/src/camera_calibration/camera_calibrator.py index 4fecabcc0..90357bb2d 100755 --- a/camera_calibration/src/camera_calibration/camera_calibrator.py +++ b/camera_calibration/src/camera_calibration/camera_calibrator.py @@ -31,29 +31,34 @@ # POSSIBILITY OF SUCH DAMAGE. import cv2 -import message_filters import numpy import os import rclpy -from rclpy.node import Node -import sensor_msgs.msg -import sensor_msgs.srv -import threading import time -from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, Patterns +import threading +import message_filters + try: from queue import Queue except ImportError: from Queue import Queue -from camera_calibration.calibrator import CAMERA_MODEL + +from rclpy.node import Node from rclpy.qos import qos_profile_system_default from rclpy.qos import QoSProfile +import sensor_msgs.msg +import sensor_msgs.srv + +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import Patterns, CAMERA_MODEL class BufferQueue(Queue): """Slight modification of the standard Queue that discards the oldest item when adding an item and the queue is full. """ + def put(self, item, *args, **kwargs): # The base implementation, for reference: # https://github.com/python/cpython/blob/2.7/Lib/Queue.py#L107 @@ -92,19 +97,25 @@ def run(self): class CalibrationNode(Node): - def __init__(self, name, boards, service_check = True, synchronizer = message_filters.TimeSynchronizer, flags = 0, - fisheye_flags = 0, pattern=Patterns.Chessboard, camera_name='', checkerboard_flags = 0, - max_chessboard_speed = -1, queue_size = 1): + def __init__(self, name, boards, service_check=True, + synchronizer=message_filters.TimeSynchronizer, flags=0, fisheye_flags=0, + pattern=Patterns.Chessboard, camera_name='', checkerboard_flags=0, + max_chessboard_speed=-1, queue_size=1): super().__init__(name) - self.set_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "camera/set_camera_info") - self.set_left_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "left_camera/set_camera_info") - self.set_right_camera_info_service = self.create_client(sensor_msgs.srv.SetCameraInfo, "right_camera/set_camera_info") + self.set_camera_info_service = self.create_client( + sensor_msgs.srv.SetCameraInfo, "camera/set_camera_info") + self.set_left_camera_info_service = self.create_client( + sensor_msgs.srv.SetCameraInfo, "left_camera/set_camera_info") + self.set_right_camera_info_service = self.create_client( + sensor_msgs.srv.SetCameraInfo, "right_camera/set_camera_info") if service_check: available = False # assume any non-default service names have been set. Wait for the service to become ready - for cli in [self.set_camera_info_service, self.set_left_camera_info_service, self.set_right_camera_info_service]: + for cli in [ + self.set_camera_info_service, self.set_left_camera_info_service, self. + set_right_camera_info_service]: print("Waiting for service", cli.srv_name, "...") # check all services so they are ready. if cli.wait_for_service(timeout_sec=1): @@ -123,12 +134,15 @@ def __init__(self, name, boards, service_check = True, synchronizer = message_fi self._pattern = pattern self._camera_name = camera_name self._max_chessboard_speed = max_chessboard_speed - lsub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'left', qos_profile=self.get_topic_qos("left")) - rsub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'right', qos_profile=self.get_topic_qos("right")) + lsub = message_filters.Subscriber( + self, sensor_msgs.msg.Image, 'left', qos_profile=self.get_topic_qos("left")) + rsub = message_filters.Subscriber( + self, sensor_msgs.msg.Image, 'right', qos_profile=self.get_topic_qos("right")) ts = synchronizer([lsub, rsub], 4) ts.registerCallback(self.queue_stereo) - msub = message_filters.Subscriber(self, sensor_msgs.msg.Image, 'image', qos_profile=self.get_topic_qos("image")) + msub = message_filters.Subscriber( + self, sensor_msgs.msg.Image, 'image', qos_profile=self.get_topic_qos("image")) msub.registerCallback(self.queue_monocular) self.q_mono = BufferQueue(queue_size) @@ -148,6 +162,7 @@ def __init__(self, name, boards, service_check = True, synchronizer = message_fi def redraw_stereo(self, *args): pass + def redraw_monocular(self, *args): pass @@ -160,13 +175,15 @@ def queue_stereo(self, lmsg, rmsg): def handle_monocular(self, msg): if self.c == None: if self._camera_name: - self.c = MonoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, name=self._camera_name, - checkerboard_flags=self._checkerboard_flags, - max_chessboard_speed = self._max_chessboard_speed) + self.c = MonoCalibrator( + self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, + name=self._camera_name, checkerboard_flags=self._checkerboard_flags, + max_chessboard_speed=self._max_chessboard_speed) else: - self.c = MonoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, - checkerboard_flags=self._checkerboard_flags, - max_chessboard_speed = self._max_chessboard_speed) + self.c = MonoCalibrator( + self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, + checkerboard_flags=self._checkerboard_flags, + max_chessboard_speed=self._max_chessboard_speed) # This should just call the MonoCalibrator drawable = self.c.handle_msg(msg) @@ -176,19 +193,20 @@ def handle_monocular(self, msg): def handle_stereo(self, msg): if self.c == None: if self._camera_name: - self.c = StereoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, name=self._camera_name, - checkerboard_flags=self._checkerboard_flags, - max_chessboard_speed = self._max_chessboard_speed) + self.c = StereoCalibrator( + self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, + name=self._camera_name, checkerboard_flags=self._checkerboard_flags, + max_chessboard_speed=self._max_chessboard_speed) else: - self.c = StereoCalibrator(self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, - checkerboard_flags=self._checkerboard_flags, - max_chessboard_speed = self._max_chessboard_speed) + self.c = StereoCalibrator( + self._boards, self._calib_flags, self._fisheye_calib_flags, self._pattern, + checkerboard_flags=self._checkerboard_flags, + max_chessboard_speed=self._max_chessboard_speed) drawable = self.c.handle_msg(msg) self.displaywidth = drawable.lscrib.shape[1] + drawable.rscrib.shape[1] self.redraw_stereo(drawable) - def check_set_camera_info(self, response): if response.success: return True @@ -196,12 +214,14 @@ def check_set_camera_info(self, response): for i in range(10): print("!" * 80) print() - print("Attempt to set camera info failed: " + response.result() if response.result() is not None else "Not available") + print("Attempt to set camera info failed: " + response.result() + if response.result() is not None else "Not available") print() for i in range(10): print("!" * 80) print() - self.get_logger().error('Unable to set camera info for calibration. Failure message: %s' % response.result() if response.result() is not None else "Not available") + self.get_logger().error('Unable to set camera info for calibration. Failure message: %s' % + response.result() if response.result() is not None else "Not available") return False def do_upload(self): @@ -240,7 +260,8 @@ def get_topic_qos(self, topic_name: str) -> QoSProfile: qos_profile.depth = qos_profile_system_default.depth return qos_profile else: - self.get_logger().warn(f"No publishers available for topic {topic_name}. Using system default QoS for subscriber.") + self.get_logger().warn( + f"No publishers available for topic {topic_name}. Using system default QoS for subscriber.") return qos_profile_system_default @@ -276,12 +297,14 @@ def spin(self): def initWindow(self): cv2.namedWindow("display", cv2.WINDOW_NORMAL) cv2.setMouseCallback("display", self.on_mouse) - cv2.createTrackbar("Camera type: \n 0 : pinhole \n 1 : fisheye", "display", 0,1, self.on_model_change) + cv2.createTrackbar("Camera type: \n 0 : pinhole \n 1 : fisheye", + "display", 0, 1, self.on_model_change) cv2.createTrackbar("scale", "display", 0, 100, self.on_scale) @classmethod - def putText(cls, img, text, org, color = (0,0,0)): - cv2.putText(img, text, org, cls.FONT_FACE, cls.FONT_SCALE, color, thickness = cls.FONT_THICKNESS) + def putText(cls, img, text, org, color=(0, 0, 0)): + cv2.putText(img, text, org, cls.FONT_FACE, cls.FONT_SCALE, + color, thickness=cls.FONT_THICKNESS) @classmethod def getTextSize(cls, text): @@ -293,7 +316,8 @@ def on_mouse(self, event, x, y, flags, param): if 180 <= y < 280: print("**** Calibrating ****") # Perform calibration in another thread to prevent UI blocking - threading.Thread(target=self.c.do_calibration, name="Calibration").start() + threading.Thread(target=self.c.do_calibration, + name="Calibration").start() self.buttons(self._last_display) self.queue_display.put(self._last_display) if self.c.calibrated: @@ -303,15 +327,17 @@ def on_mouse(self, event, x, y, flags, param): # Only shut down if we set camera info correctly, #3993 if self.do_upload(): rclpy.shutdown() + def on_model_change(self, model_select_val): if self.c == None: print("Cannot change camera model until the first image has been received") return - self.c.set_cammodel( CAMERA_MODEL.PINHOLE if model_select_val < 0.5 else CAMERA_MODEL.FISHEYE) + self.c.set_cammodel( + CAMERA_MODEL.PINHOLE if model_select_val < 0.5 else CAMERA_MODEL.FISHEYE) def on_scale(self, scalevalue): - if self.c.calibrated: + if self.c and self.c.calibrated: self.c.set_alpha(scalevalue / 100.0) def button(self, dst, label, enable): @@ -321,15 +347,17 @@ def button(self, dst, label, enable): color = (155, 155, 80) else: color = (224, 224, 224) - cv2.circle(dst, (size[0] // 2, size[1] // 2), min(size) // 2, color, -1) + cv2.circle(dst, (size[0] // 2, size[1] // 2), + min(size) // 2, color, -1) (w, h) = self.getTextSize(label) - self.putText(dst, label, ((size[0] - w) // 2, (size[1] + h) // 2), (255,255,255)) + self.putText( + dst, label, ((size[0] - w) // 2, (size[1] + h) // 2), (255, 255, 255)) def buttons(self, display): x = self.displaywidth - self.button(display[180:280,x:x+100], "CALIBRATE", self.c.goodenough) - self.button(display[280:380,x:x+100], "SAVE", self.c.calibrated) - self.button(display[380:480,x:x+100], "COMMIT", self.c.calibrated) + self.button(display[180:280, x:x+100], "CALIBRATE", self.c.goodenough) + self.button(display[280:380, x:x+100], "SAVE", self.c.calibrated) + self.button(display[380:480, x:x+100], "COMMIT", self.c.calibrated) def y(self, i): """Set up right-size images""" @@ -346,23 +374,25 @@ def redraw_monocular(self, drawable): height = drawable.scrib.shape[0] width = drawable.scrib.shape[1] - display = numpy.zeros((max(480, height), width + 100, 3), dtype=numpy.uint8) - display[0:height, 0:width,:] = drawable.scrib - display[0:height, width:width+100,:].fill(255) + display = numpy.zeros( + (max(480, height), width + 100, 3), dtype=numpy.uint8) + display[0:height, 0:width, :] = drawable.scrib + display[0:height, width:width+100, :].fill(255) self.buttons(display) if not self.c.calibrated: if drawable.params: - for i, (label, lo, hi, progress) in enumerate(drawable.params): - (w,_) = self.getTextSize(label) - self.putText(display, label, (width + (100 - w) // 2, self.y(i))) - color = (0,255,0) + for i, (label, lo, hi, progress) in enumerate(drawable.params): + (w, _) = self.getTextSize(label) + self.putText(display, label, + (width + (100 - w) // 2, self.y(i))) + color = (0, 255, 0) if progress < 1.0: color = (0, int(progress*255.), 255) cv2.line(display, - (int(width + lo * 100), self.y(i) + 20), - (int(width + hi * 100), self.y(i) + 20), - color, 4) + (int(width + lo * 100), self.y(i) + 20), + (int(width + hi * 100), self.y(i) + 20), + color, 4) else: self.putText(display, "lin.", (width, self.y(0))) @@ -371,7 +401,7 @@ def redraw_monocular(self, drawable): msg = "?" else: msg = "%.2f" % linerror - #print "linear", linerror + # print "linear", linerror self.putText(display, msg, (width, self.y(1))) self._last_display = display @@ -381,25 +411,27 @@ def redraw_stereo(self, drawable): height = drawable.lscrib.shape[0] width = drawable.lscrib.shape[1] - display = numpy.zeros((max(480, height), 2 * width + 100, 3), dtype=numpy.uint8) - display[0:height, 0:width,:] = drawable.lscrib - display[0:height, width:2*width,:] = drawable.rscrib - display[0:height, 2*width:2*width+100,:].fill(255) + display = numpy.zeros( + (max(480, height), 2 * width + 100, 3), dtype=numpy.uint8) + display[0:height, 0:width, :] = drawable.lscrib + display[0:height, width:2*width, :] = drawable.rscrib + display[0:height, 2*width:2*width+100, :].fill(255) self.buttons(display) if not self.c.calibrated: if drawable.params: for i, (label, lo, hi, progress) in enumerate(drawable.params): - (w,_) = self.getTextSize(label) - self.putText(display, label, (2 * width + (100 - w) // 2, self.y(i))) - color = (0,255,0) + (w, _) = self.getTextSize(label) + self.putText(display, label, (2 * width + + (100 - w) // 2, self.y(i))) + color = (0, 255, 0) if progress < 1.0: color = (0, int(progress*255.), 255) cv2.line(display, - (int(2 * width + lo * 100), self.y(i) + 20), - (int(2 * width + hi * 100), self.y(i) + 20), - color, 4) + (int(2 * width + lo * 100), self.y(i) + 20), + (int(2 * width + hi * 100), self.y(i) + 20), + color, 4) else: self.putText(display, "epi.", (2 * width, self.y(0))) @@ -411,7 +443,8 @@ def redraw_stereo(self, drawable): # TODO dim is never set anywhere. Supposed to be observed chessboard size? if drawable.dim != -1: self.putText(display, "dim", (2 * width, self.y(2))) - self.putText(display, "%.3f" % drawable.dim, (2 * width, self.y(3))) + self.putText(display, "%.3f" % + drawable.dim, (2 * width, self.y(3))) self._last_display = display self.queue_display.put(display) diff --git a/camera_calibration/src/camera_calibration/camera_checker.py b/camera_calibration/src/camera_calibration/camera_checker.py index 19149b5ec..10b0d86eb 100755 --- a/camera_calibration/src/camera_calibration/camera_checker.py +++ b/camera_calibration/src/camera_calibration/camera_checker.py @@ -35,34 +35,39 @@ import cv2 import cv_bridge import functools -import message_filters import numpy -import rclpy -from rclpy.node import Node -import sensor_msgs.msg -import sensor_msgs.srv import threading - -from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, ChessboardInfo -from message_filters import ApproximateTimeSynchronizer - try: from queue import Queue except ImportError: from Queue import Queue +import message_filters +from message_filters import ApproximateTimeSynchronizer +import rclpy +from rclpy.node import Node +import sensor_msgs.msg +import sensor_msgs.srv + +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import ChessboardInfo + def mean(seq): return sum(seq) / len(seq) + def lmin(seq1, seq2): """ Pairwise minimum of two sequences """ return [min(a, b) for (a, b) in zip(seq1, seq2)] + def lmax(seq1, seq2): """ Pairwise maximum of two sequences """ return [max(a, b) for (a, b) in zip(seq1, seq2)] + class ConsumerThread(threading.Thread): def __init__(self, queue, function): threading.Thread.__init__(self) @@ -76,6 +81,7 @@ def run(self): break self.function(m) + class CameraCheckerNode(Node): def __init__(self, name, chess_size, dim, approximate=0): @@ -100,9 +106,11 @@ def __init__(self, name, chess_size, dim, approximate=0): if approximate <= 0: sync = message_filters.TimeSynchronizer else: - sync = functools.partial(ApproximateTimeSynchronizer, slop=approximate) + sync = functools.partial( + ApproximateTimeSynchronizer, slop=approximate) - tsm = sync([message_filters.Subscriber(self, type, topic) for (topic, type) in tosync_mono], 10) + tsm = sync([message_filters.Subscriber(self, type, topic) + for (topic, type) in tosync_mono], 10) tsm.registerCallback(self.queue_monocular) left_topic = "stereo/left/image_rect" @@ -117,7 +125,8 @@ def __init__(self, name, chess_size, dim, approximate=0): (right_camera_topic, sensor_msgs.msg.CameraInfo) ] - tss = sync([message_filters.Subscriber(self, type, topic) for (topic, type) in tosync_stereo], 10) + tss = sync([message_filters.Subscriber(self, type, topic) + for (topic, type) in tosync_stereo], 10) tss.registerCallback(self.queue_stereo) self.br = cv_bridge.CvBridge() @@ -162,24 +171,29 @@ def handle_monocular(self, msg): # Add in reprojection check image_points = C - object_points = self.mc.mk_object_points([self.board], use_board_size=True)[0] + object_points = self.mc.mk_object_points( + [self.board], use_board_size=True)[0] dist_coeffs = numpy.zeros((4, 1)) - camera_matrix = numpy.array( [ [ camera.P[0], camera.P[1], camera.P[2] ], - [ camera.P[4], camera.P[5], camera.P[6] ], - [ camera.P[8], camera.P[9], camera.P[10] ] ] ) - ok, rot, trans = cv2.solvePnP(object_points, image_points, camera_matrix, dist_coeffs) + camera_matrix = numpy.array([[camera.p[0], camera.p[1], camera.p[2]], + [camera.p[4], camera.p[5], camera.p[6]], + [camera.p[8], camera.p[9], camera.p[10]]]) + ok, rot, trans = cv2.solvePnP( + object_points, image_points, camera_matrix, dist_coeffs) # Convert rotation into a 3x3 Rotation Matrix rot3x3, _ = cv2.Rodrigues(rot) # Reproject model points into image - object_points_world = numpy.asmatrix(rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans) + object_points_world = numpy.asmatrix( + rot3x3) * numpy.asmatrix(object_points.squeeze().T) + numpy.asmatrix(trans) reprojected_h = camera_matrix * object_points_world - reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :]) + reprojected = (reprojected_h[0:2, :] / reprojected_h[2, :]) reprojection_errors = image_points.squeeze().T - reprojected - reprojection_rms = numpy.sqrt(numpy.sum(numpy.array(reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape)) + reprojection_rms = numpy.sqrt(numpy.sum(numpy.array( + reprojection_errors) ** 2) / numpy.product(reprojection_errors.shape)) # Print the results - print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % (linearity_rms, reprojection_rms)) + print("Linearity RMS Error: %.3f Pixels Reprojection RMS Error: %.3f Pixels" % ( + linearity_rms, reprojection_rms)) else: print('no chessboard') @@ -194,8 +208,10 @@ def handle_stereo(self, msg): if L is not None and R is not None: epipolar = self.sc.epipolar_error(L, R) - dimension = self.sc.chessboard_size(L, R, self.board, msg=(lcmsg, rcmsg)) + dimension = self.sc.chessboard_size( + L, R, self.board, msg=(lcmsg, rcmsg)) - print("epipolar error: %f pixels dimension: %f m" % (epipolar, dimension)) + print("epipolar error: %f pixels dimension: %f m" % + (epipolar, dimension)) else: print("no chessboard") diff --git a/camera_calibration/src/camera_calibration/mono_calibrator.py b/camera_calibration/src/camera_calibration/mono_calibrator.py new file mode 100644 index 000000000..326462a20 --- /dev/null +++ b/camera_calibration/src/camera_calibration/mono_calibrator.py @@ -0,0 +1,414 @@ +#!/usr/bin/env python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from io import BytesIO +import cv2 +import math +import numpy.linalg +import pickle +import random +import tarfile +import time +from camera_calibration.calibrator import ( + Calibrator, + CalibrationException, + CAMERA_MODEL, + Patterns, + ImageDrawable, + image_from_archive +) + + +class MonoDrawable(ImageDrawable): + def __init__(self): + ImageDrawable.__init__(self) + self.scrib = None + self.linear_error = -1.0 + + +class MonoCalibrator(Calibrator): + """ + Calibration class for monocular cameras:: + + images = [cv2.imread("mono%d.png") for i in range(8)] + mc = MonoCalibrator() + mc.cal(images) + print mc.as_message() + """ + + is_mono = True # TODO Could get rid of is_mono + + def __init__(self, *args, **kwargs): + if 'name' not in kwargs: + kwargs['name'] = 'narrow_stereo/left' + super(MonoCalibrator, self).__init__(*args, **kwargs) + + def cal(self, images): + """ + Calibrate camera from given images + """ + goodcorners = self.collect_corners(images) + self.cal_fromcorners(goodcorners) + self.calibrated = True + + def collect_corners(self, images): + """ + :param images: source images containing chessboards + :type images: list of :class:`cvMat` + + Find chessboards in all images. + + Return [ (corners, ids, ChessboardInfo) ] + """ + self.size = (images[0].shape[1], images[0].shape[0]) + corners = [self.get_corners(i) for i in images] + + goodcorners = [(co, ids, b) for (ok, co, ids, b) in corners if ok] + if not goodcorners: + raise CalibrationException("No corners found in images!") + return goodcorners + + def cal_fromcorners(self, good): + """ + :param good: Good corner positions and boards + :type good: [(corners, ChessboardInfo)] + """ + + (ipts, ids, boards) = zip(*good) + opts = self.mk_object_points(boards) + # If FIX_ASPECT_RATIO flag set, enforce focal lengths have 1/1 ratio + intrinsics_in = numpy.eye(3, dtype=numpy.float64) + + if self.pattern == Patterns.ChArUco: + if self.camera_model == CAMERA_MODEL.FISHEYE: + raise NotImplemented( + "Can't perform fisheye calibration with ChArUco board") + + reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.aruco.calibrateCameraCharuco( + ipts, ids, boards[0].charuco_board, self.size, intrinsics_in, None) + + elif self.camera_model == CAMERA_MODEL.PINHOLE: + print("mono pinhole calibration...") + reproj_err, self.intrinsics, dist_coeffs, rvecs, tvecs = cv2.calibrateCamera( + opts, ipts, + self.size, + intrinsics_in, + None, + flags=self.calib_flags) + # OpenCV returns more than 8 coefficients (the additional ones all zeros) when CALIB_RATIONAL_MODEL is set. + # The extra ones include e.g. thin prism coefficients, which we are not interested in. + if self.calib_flags & cv2.CALIB_RATIONAL_MODEL: + # rational polynomial + self.distortion = dist_coeffs.flat[:8].reshape(-1, 1) + else: + # plumb bob + self.distortion = dist_coeffs.flat[:5].reshape(-1, 1) + elif self.camera_model == CAMERA_MODEL.FISHEYE: + print("mono fisheye calibration...") + # WARNING: cv2.fisheye.calibrate wants float64 points + ipts64 = numpy.asarray(ipts, dtype=numpy.float64) + ipts = ipts64 + opts64 = numpy.asarray(opts, dtype=numpy.float64) + opts = opts64 + reproj_err, self.intrinsics, self.distortion, rvecs, tvecs = cv2.fisheye.calibrate( + opts, ipts, self.size, + intrinsics_in, None, flags=self.fisheye_calib_flags) + + # R is identity matrix for monocular calibration + self.R = numpy.eye(3, dtype=numpy.float64) + self.P = numpy.zeros((3, 4), dtype=numpy.float64) + + self.set_alpha(0.0) + + def set_alpha(self, a): + """ + Set the alpha value for the calibrated camera solution. The alpha + value is a zoom, and ranges from 0 (zoomed in, all pixels in + calibrated image are valid) to 1 (zoomed out, all pixels in + original image are in calibrated image). + """ + + if self.camera_model == CAMERA_MODEL.PINHOLE: + # NOTE: Prior to Electric, this code was broken such that we never actually saved the new + # camera matrix. In effect, this enforced P = [K|0] for monocular cameras. + # TODO: Verify that OpenCV #1199 gets applied (improved GetOptimalNewCameraMatrix) + ncm, _ = cv2.getOptimalNewCameraMatrix( + self.intrinsics, self.distortion, self.size, a) + for j in range(3): + for i in range(3): + self.P[j, i] = ncm[j, i] + self.mapx, self.mapy = cv2.initUndistortRectifyMap( + self.intrinsics, self.distortion, self.R, ncm, self.size, cv2.CV_32FC1) + elif self.camera_model == CAMERA_MODEL.FISHEYE: + # NOTE: cv2.fisheye.estimateNewCameraMatrixForUndistortRectify not producing proper results, using a naive approach instead: + self.P[:3, :3] = self.intrinsics[:3, :3] + self.P[0, 0] /= (1. + a) + self.P[1, 1] /= (1. + a) + self.mapx, self.mapy = cv2.fisheye.initUndistortRectifyMap( + self.intrinsics, self.distortion, self.R, self.P, self.size, cv2.CV_32FC1) + + def remap(self, src): + """ + :param src: source image + :type src: :class:`cvMat` + + Apply the post-calibration undistortion to the source image + """ + return cv2.remap(src, self.mapx, self.mapy, cv2.INTER_LINEAR) + + def undistort_points(self, src): + """ + :param src: N source pixel points (u,v) as an Nx2 matrix + :type src: :class:`cvMat` + + Apply the post-calibration undistortion to the source points + """ + if self.camera_model == CAMERA_MODEL.PINHOLE: + return cv2.undistortPoints(src, self.intrinsics, self.distortion, R=self.R, P=self.P) + elif self.camera_model == CAMERA_MODEL.FISHEYE: + return cv2.fisheye.undistortPoints( + src, self.intrinsics, self.distortion, R=self.R, P=self.P) + + def as_message(self): + """ Return the camera calibration as a CameraInfo message """ + return self.lrmsg( + self.distortion, self.intrinsics, self.R, self.P, self.size, self.camera_model) + + def from_message(self, msg, alpha=0.0): + """ Initialize the camera calibration from a CameraInfo message """ + + self.size = (msg.width, msg.height) + self.intrinsics = numpy.array( + msg.k, dtype=numpy.float64, copy=True).reshape((3, 3)) + self.distortion = numpy.array( + msg.d, dtype=numpy.float64, copy=True).reshape((len(msg.d), 1)) + self.R = numpy.array(msg.r, dtype=numpy.float64, + copy=True).reshape((3, 3)) + self.P = numpy.array(msg.p, dtype=numpy.float64, + copy=True).reshape((3, 4)) + + self.set_alpha(0.0) + + def report(self): + self.lrreport(self.distortion, self.intrinsics, self.R, self.P) + + def ost(self): + return self.lrost(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size) + + def yaml(self): + return self.lryaml(self.name, self.distortion, self.intrinsics, self.R, self.P, self.size, + self.camera_model) + + def linear_error_from_image(self, image): + """ + Detect the checkerboard and compute the linear error. + Mainly for use in tests. + """ + _, corners, _, ids, board, _ = self.downsample_and_detect(image) + if corners is None: + return None + + undistorted = self.undistort_points(corners) + return self.linear_error(undistorted, ids, board) + + @staticmethod + def linear_error(corners, ids, b): + """ + Returns the linear error for a set of corners detected in the unrectified image. + """ + + if corners is None: + return None + + corners = numpy.squeeze(corners) + + def pt2line(x0, y0, x1, y1, x2, y2): + """ point is (x0, y0), line is (x1, y1, x2, y2) """ + return abs((x2 - x1) * (y1 - y0) - (x1 - x0) * (y2 - y1)) / math.sqrt((x2 - x1) ** 2 + + (y2 - y1) ** 2) + + n_cols = b.n_cols + n_rows = b.n_rows + if b.pattern == 'charuco': + n_cols -= 1 + n_rows -= 1 + n_pts = n_cols * n_rows + + if ids is None: + ids = numpy.arange(n_pts).reshape((n_pts, 1)) + + ids_to_idx = dict((ids[i, 0], i) for i in range(len(ids))) + + errors = [] + for row in range(n_rows): + row_min = row * n_cols + row_max = (row+1) * n_cols + pts_in_row = [x for x in ids if row_min <= x < row_max] + + # not enough points to calculate error + if len(pts_in_row) <= 2: + continue + + left_pt = min(pts_in_row)[0] + right_pt = max(pts_in_row)[0] + x_left = corners[ids_to_idx[left_pt], 0] + y_left = corners[ids_to_idx[left_pt], 1] + x_right = corners[ids_to_idx[right_pt], 0] + y_right = corners[ids_to_idx[right_pt], 1] + + for pt in pts_in_row: + if pt[0] in (left_pt, right_pt): + continue + x = corners[ids_to_idx[pt[0]], 0] + y = corners[ids_to_idx[pt[0]], 1] + errors.append(pt2line(x, y, x_left, y_left, x_right, y_right)) + + if errors: + return math.sqrt(sum([e**2 for e in errors]) / len(errors)) + else: + return None + + def handle_msg(self, msg): + """ + Detects the calibration target and, if found and provides enough new information, + adds it to the sample database. + + Returns a MonoDrawable message with the display image and progress info. + """ + gray = self.mkgray(msg) + linear_error = -1 + + # Get display-image-to-be (scrib) and detection of the calibration target + scrib_mono, corners, downsampled_corners, ids, board, ( + x_scale, y_scale) = self.downsample_and_detect(gray) + + if self.calibrated: + # Show rectified image + # TODO Pull out downsampling code into function + gray_remap = self.remap(gray) + gray_rect = gray_remap + if x_scale != 1.0 or y_scale != 1.0: + gray_rect = cv2.resize( + gray_remap, (scrib_mono.shape[1], scrib_mono.shape[0])) + + scrib = cv2.cvtColor(gray_rect, cv2.COLOR_GRAY2BGR) + + if corners is not None: + # Report linear error + undistorted = self.undistort_points(corners) + linear_error = self.linear_error(undistorted, ids, board) + + # Draw rectified corners + scrib_src = undistorted.copy() + scrib_src[:, :, 0] /= x_scale + scrib_src[:, :, 1] /= y_scale + cv2.drawChessboardCorners( + scrib, (board.n_cols, board.n_rows), scrib_src, True) + + else: + scrib = cv2.cvtColor(scrib_mono, cv2.COLOR_GRAY2BGR) + if corners is not None: + # Draw (potentially downsampled) corners onto display image + if board.pattern == "charuco": + cv2.aruco.drawDetectedCornersCharuco( + scrib, downsampled_corners, ids) + else: + cv2.drawChessboardCorners( + scrib, (board.n_cols, board.n_rows), downsampled_corners, True) + + # Add sample to database only if it's sufficiently different from any previous sample. + params = self.get_parameters( + corners, ids, board, (gray.shape[1], gray.shape[0])) + if self.is_good_sample( + params, corners, ids, self.last_frame_corners, self.last_frame_ids): + self.db.append((params, gray)) + if self.pattern == Patterns.ChArUco: + self.good_corners.append((corners, ids, board)) + else: + self.good_corners.append((corners, None, board)) + print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % + tuple([len(self.db)] + params))) + + self.last_frame_corners = corners + self.last_frame_ids = ids + rv = MonoDrawable() + rv.scrib = scrib + rv.params = self.compute_goodenough() + rv.linear_error = linear_error + return rv + + def do_calibration(self, dump=False): + if not self.good_corners: + print("**** Collecting corners for all images! ****") # DEBUG + images = [i for (p, i) in self.db] + self.good_corners = self.collect_corners(images) + # TODO Needs to be set externally + self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) + # Dump should only occur if user wants it + if dump: + pickle.dump((self.is_mono, self.size, self.good_corners), + open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) + self.cal_fromcorners(self.good_corners) + self.calibrated = True + # DEBUG + print((self.report())) + print((self.ost())) + + def do_tarfile_save(self, tf): + """ Write images and calibration solution to a tarfile object """ + + def taradd(name, buf): + if isinstance(buf, str): + s = BytesIO(buf.encode('utf-8')) + else: + s = BytesIO(buf) + ti = tarfile.TarInfo(name) + ti.size = len(s.getvalue()) + ti.uname = 'calibrator' + ti.mtime = int(time.time()) + tf.addfile(tarinfo=ti, fileobj=s) + + ims = [("left-%04d.png" % i, im) for i, (_, im) in enumerate(self.db)] + for (name, im) in ims: + taradd(name, cv2.imencode(".png", im)[1].tostring()) + taradd('ost.yaml', self.yaml()) + taradd('ost.txt', self.ost()) + + def do_tarfile_calibration(self, filename): + archive = tarfile.open(filename, 'r') + + limages = [image_from_archive(archive, f) for f in archive.getnames() if ( + f.startswith('left') and (f.endswith('.pgm') or f.endswith('png')))] + + self.cal(limages) diff --git a/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py b/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py index df08e2706..612cd104b 100755 --- a/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py +++ b/camera_calibration/src/camera_calibration/nodes/cameracalibrator.py @@ -40,17 +40,20 @@ from camera_calibration.calibrator import ChessboardInfo, Patterns from message_filters import ApproximateTimeSynchronizer + def optionsValidCharuco(options, parser): """ Validates the provided options when the pattern type is 'charuco' """ - if options.pattern != 'charuco': return False + if options.pattern != 'charuco': + return False n_boards = len(options.size) if (n_boards != len(options.square) or n_boards != len(options.charuco_marker_size) or n_boards != len(options.aruco_dict)): - parser.error("When using ChArUco boards, --size, --square, --charuco_marker_size, and --aruco_dict " + - "must be specified for each board") + parser.error( + "When using ChArUco boards, --size, --square, --charuco_marker_size, and --aruco_dict " + + "must be specified for each board") return False # TODO: check for fisheye and stereo (not implemented with ChArUco) @@ -62,18 +65,19 @@ def main(): parser = OptionParser("%prog --size SIZE1 --square SQUARE1 [ --size SIZE2 --square SQUARE2 ]", description=None) parser.add_option("-c", "--camera_name", - type="string", default='narrow_stereo', - help="name of the camera to appear in the calibration file") - group = OptionGroup(parser, "Chessboard Options", - "You must specify one or more chessboards as pairs of --size and --square options.") + type="string", default='narrow_stereo', + help="name of the camera to appear in the calibration file") + group = OptionGroup( + parser, "Chessboard Options", + "You must specify one or more chessboards as pairs of --size and --square options.") group.add_option("-p", "--pattern", type="string", default="chessboard", help="calibration pattern to detect - 'chessboard', 'circles', 'acircles', 'charuco'\n" + " if 'charuco' is used, a --charuco_marker_size and --aruco_dict argument must be supplied\n" + " with each --size and --square argument") - group.add_option("-s", "--size", - action="append", default=[], - help="chessboard size as NxM, counting interior corners (e.g. a standard chessboard is 7x7)") + group.add_option( + "-s", "--size", action="append", default=[], + help="chessboard size as NxM, counting interior corners (e.g. a standard chessboard is 7x7)") group.add_option("-q", "--square", action="append", default=[], help="chessboard square size in meters") @@ -86,9 +90,9 @@ def main(): "'5x5_250', '6x6_250', '7x7_250'") parser.add_option_group(group) group = OptionGroup(parser, "ROS Communication Options") - group.add_option("--approximate", - type="float", default=0.0, - help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras") + group.add_option( + "--approximate", type="float", default=0.0, + help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras") group.add_option("--no-service-check", action="store_false", dest="service_check", default=True, help="disable check for set_camera_info services at startup") @@ -106,22 +110,22 @@ def main(): group.add_option("--zero-tangent-dist", action="store_true", default=False, help="for pinhole, set tangential distortion coefficients (p1, p2) to zero") - group.add_option("-k", "--k-coefficients", - type="int", default=2, metavar="NUM_COEFFS", - help="for pinhole, number of radial distortion coefficients to use (up to 6, default %default)") + group.add_option( + "-k", "--k-coefficients", type="int", default=2, metavar="NUM_COEFFS", + help="for pinhole, number of radial distortion coefficients to use (up to 6, default %default)") - group.add_option("--fisheye-recompute-extrinsicsts", - action="store_true", default=False, - help="for fisheye, extrinsic will be recomputed after each iteration of intrinsic optimization") + group.add_option( + "--fisheye-recompute-extrinsicsts", action="store_true", default=False, + help="for fisheye, extrinsic will be recomputed after each iteration of intrinsic optimization") group.add_option("--fisheye-fix-skew", action="store_true", default=False, help="for fisheye, skew coefficient (alpha) is set to zero and stay zero") group.add_option("--fisheye-fix-principal-point", action="store_true", default=False, help="for fisheye,fix the principal point at the image center") - group.add_option("--fisheye-k-coefficients", - type="int", default=4, metavar="NUM_COEFFS", - help="for fisheye, number of radial distortion coefficients to use fixing to zero the rest (up to 4, default %default)") + group.add_option( + "--fisheye-k-coefficients", type="int", default=4, metavar="NUM_COEFFS", + help="for fisheye, number of radial distortion coefficients to use fixing to zero the rest (up to 4, default %default)") group.add_option("--fisheye-check-conditions", action="store_true", default=False, @@ -147,16 +151,19 @@ def main(): if options.pattern == "charuco" and optionsValidCharuco(options, parser): for (sz, sq, ms, ad) in zip(options.size, options.square, options.charuco_marker_size, options.aruco_dict): size = tuple([int(c) for c in sz.split('x')]) - boards.append(ChessboardInfo('charuco', size[0], size[1], float(sq), float(ms), ad)) + boards.append(ChessboardInfo( + 'charuco', size[0], size[1], float(sq), float(ms), ad)) else: for (sz, sq) in zip(options.size, options.square): size = tuple([int(c) for c in sz.split('x')]) - boards.append(ChessboardInfo(options.pattern, size[0], size[1], float(sq))) + boards.append(ChessboardInfo( + options.pattern, size[0], size[1], float(sq))) if options.approximate == 0.0: sync = message_filters.TimeSynchronizer else: - sync = functools.partial(ApproximateTimeSynchronizer, slop=options.approximate) + sync = functools.partial( + ApproximateTimeSynchronizer, slop=options.approximate) # Pinhole opencv calibration options parsing num_ks = options.k_coefficients @@ -211,7 +218,8 @@ def main(): elif options.pattern == 'charuco': pattern = Patterns.ChArUco elif options.pattern != 'chessboard': - print('Unrecognized pattern %s, defaulting to chessboard' % options.pattern) + print('Unrecognized pattern %s, defaulting to chessboard' % + options.pattern) if options.disable_calib_cb_fast_check: checkerboard_flags = 0 @@ -219,13 +227,14 @@ def main(): checkerboard_flags = cv2.CALIB_CB_FAST_CHECK rclpy.init() - node = OpenCVCalibrationNode("cameracalibrator", boards, options.service_check, sync, - calib_flags, fisheye_calib_flags, pattern, options.camera_name, - checkerboard_flags=checkerboard_flags, max_chessboard_speed=options.max_chessboard_speed, - queue_size=options.queue_size) + node = OpenCVCalibrationNode( + "cameracalibrator", boards, options.service_check, sync, calib_flags, fisheye_calib_flags, + pattern, options.camera_name, checkerboard_flags=checkerboard_flags, + max_chessboard_speed=options.max_chessboard_speed, queue_size=options.queue_size) node.spin() rclpy.shutdown() + if __name__ == "__main__": try: main() diff --git a/camera_calibration/src/camera_calibration/nodes/cameracheck.py b/camera_calibration/src/camera_calibration/nodes/cameracheck.py index b2b3906cf..2b4f1b739 100755 --- a/camera_calibration/src/camera_calibration/nodes/cameracheck.py +++ b/camera_calibration/src/camera_calibration/nodes/cameracheck.py @@ -39,11 +39,13 @@ def main(): from optparse import OptionParser parser = OptionParser() - parser.add_option("-s", "--size", default="8x6", help="specify chessboard size as nxm [default: %default]") - parser.add_option("-q", "--square", default=".108", help="specify chessboard square size in meters [default: %default]") - parser.add_option("--approximate", - type="float", default=0.0, - help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras") + parser.add_option("-s", "--size", default="8x6", + help="specify chessboard size as nxm [default: %default]") + parser.add_option("-q", "--square", default=".108", + help="specify chessboard square size in meters [default: %default]") + parser.add_option( + "--approximate", type="float", default=0.0, + help="allow specified slop (in seconds) when pairing images from unsynchronized stereo cameras") options, _ = parser.parse_args(rclpy.utilities.remove_ros_args()) rclpy.init() @@ -54,5 +56,6 @@ def main(): node = CameraCheckerNode("cameracheck", size, dim, approximate) rclpy.spin(node) + if __name__ == "__main__": main() diff --git a/camera_calibration/src/camera_calibration/nodes/tarfile_calibration.py b/camera_calibration/src/camera_calibration/nodes/tarfile_calibration.py index f5f0db6a2..92f134a9c 100755 --- a/camera_calibration/src/camera_calibration/nodes/tarfile_calibration.py +++ b/camera_calibration/src/camera_calibration/nodes/tarfile_calibration.py @@ -34,26 +34,29 @@ import os import numpy - import cv2 import cv_bridge import tarfile -from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, CalibrationException, ChessboardInfo - import rclpy import sensor_msgs.srv +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import ChessboardInfo + + def display(win_name, img): cv2.namedWindow(win_name, cv2.WINDOW_NORMAL) - cv2.imshow( win_name, numpy.asarray( img[:,:] )) + cv2.imshow(win_name, numpy.asarray(img[:, :])) k = cv2.waitKey(0) cv2.destroyWindow(win_name) if k in [27, ord('q')]: rclpy.shutdown() -def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags = 0, visualize = False, alpha=1.0): +def cal_from_tarfile( + boards, tarname, mono=False, upload=False, calib_flags=0, visualize=False, alpha=1.0): if mono: calibrator = MonoCalibrator(boards, calib_flags) else: @@ -63,26 +66,31 @@ def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags print(calibrator.ost()) - if upload: + if upload: info = calibrator.as_message() if mono: - set_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "camera", sensor_msgs.srv.SetCameraInfo) + set_camera_info_service = rclpy.ServiceProxy( + "%s/set_camera_info" % "camera", sensor_msgs.srv.SetCameraInfo) response = set_camera_info_service.call(info) if not response.success: - raise RuntimeError("connected to set_camera_info service, but failed setting camera_info") + raise RuntimeError( + "connected to set_camera_info service, but failed setting camera_info") else: - set_left_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "left_camera", sensor_msgs.srv.SetCameraInfo) - set_right_camera_info_service = rclpy.ServiceProxy("%s/set_camera_info" % "right_camera", sensor_msgs.srv.SetCameraInfo) + set_left_camera_info_service = rclpy.ServiceProxy( + "%s/set_camera_info" % "left_camera", sensor_msgs.srv.SetCameraInfo) + set_right_camera_info_service = rclpy.ServiceProxy( + "%s/set_camera_info" % "right_camera", sensor_msgs.srv.SetCameraInfo) response1 = set_left_camera_info_service(info[0]) response2 = set_right_camera_info_service(info[1]) if not (response1.result().success and response2.result().success): - raise RuntimeError("connected to set_camera_info service, but failed setting camera_info") + raise RuntimeError( + "connected to set_camera_info service, but failed setting camera_info") if visualize: - #Show rectified images + # Show rectified images calibrator.set_alpha(alpha) archive = tarfile.open(tarname, 'r') @@ -90,62 +98,70 @@ def cal_from_tarfile(boards, tarname, mono = False, upload = False, calib_flags for f in archive.getnames(): if f.startswith('left') and (f.endswith('.pgm') or f.endswith('png')): filedata = archive.extractfile(f).read() - file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) - im=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) + file_bytes = numpy.asarray( + bytearray(filedata), dtype=numpy.uint8) + im = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) bridge = cv_bridge.CvBridge() try: - msg=bridge.cv2_to_imgmsg(im, "bgr8") + msg = bridge.cv2_to_imgmsg(im, "bgr8") except cv_bridge.CvBridgeError as e: print(e) - #handle msg returns the recitifed image with corner detection once camera is calibrated. - drawable=calibrator.handle_msg(msg) - vis=numpy.asarray( drawable.scrib[:,:]) - #Display. Name of window:f + # handle msg returns the recitifed image with corner detection once camera is calibrated. + drawable = calibrator.handle_msg(msg) + vis = numpy.asarray(drawable.scrib[:, :]) + # Display. Name of window:f display(f, vis) else: - limages = [ f for f in archive.getnames() if (f.startswith('left') and (f.endswith('pgm') or f.endswith('png'))) ] + limages = [f for f in archive.getnames() if (f.startswith( + 'left') and (f.endswith('pgm') or f.endswith('png')))] limages.sort() - rimages = [ f for f in archive.getnames() if (f.startswith('right') and (f.endswith('pgm') or f.endswith('png'))) ] + rimages = [f for f in archive.getnames() if (f.startswith( + 'right') and (f.endswith('pgm') or f.endswith('png')))] rimages.sort() if not len(limages) == len(rimages): - raise RuntimeError("Left, right images don't match. %d left images, %d right" % (len(limages), len(rimages))) - + raise RuntimeError("Left, right images don't match. %d left images, %d right" % ( + len(limages), len(rimages))) + for i in range(len(limages)): - l=limages[i] - r=rimages[i] + l = limages[i] + r = rimages[i] - if l.startswith('left') and (l.endswith('.pgm') or l.endswith('png')) and r.startswith('right') and (r.endswith('.pgm') or r.endswith('png')): + if l.startswith('left') and ( + l.endswith('.pgm') or l.endswith('png')) and r.startswith('right') and ( + r.endswith('.pgm') or r.endswith('png')): # LEFT IMAGE filedata = archive.extractfile(l).read() - file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) - im_left=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) - + file_bytes = numpy.asarray( + bytearray(filedata), dtype=numpy.uint8) + im_left = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) + bridge = cv_bridge.CvBridge() try: - msg_left=bridge.cv2_to_imgmsg(im_left, "bgr8") + msg_left = bridge.cv2_to_imgmsg(im_left, "bgr8") except cv_bridge.CvBridgeError as e: print(e) - #RIGHT IMAGE + # RIGHT IMAGE filedata = archive.extractfile(r).read() - file_bytes = numpy.asarray(bytearray(filedata), dtype=numpy.uint8) - im_right=cv2.imdecode(file_bytes,cv2.IMREAD_COLOR) + file_bytes = numpy.asarray( + bytearray(filedata), dtype=numpy.uint8) + im_right = cv2.imdecode(file_bytes, cv2.IMREAD_COLOR) try: - msg_right=bridge.cv2_to_imgmsg(im_right, "bgr8") + msg_right = bridge.cv2_to_imgmsg(im_right, "bgr8") except cv_bridge.CvBridgeError as e: print(e) - drawable=calibrator.handle_msg([ msg_left,msg_right] ) + drawable = calibrator.handle_msg([msg_left, msg_right]) - h, w = numpy.asarray(drawable.lscrib[:,:]).shape[:2] + h, w = numpy.asarray(drawable.lscrib[:, :]).shape[:2] vis = numpy.zeros((h, w*2, 3), numpy.uint8) - vis[:h, :w ,:] = numpy.asarray(drawable.lscrib[:,:]) - vis[:h, w:w*2, :] = numpy.asarray(drawable.rscrib[:,:]) - - display(l+" "+r,vis) + vis[:h, :w, :] = numpy.asarray(drawable.lscrib[:, :]) + vis[:h, w:w*2, :] = numpy.asarray(drawable.rscrib[:, :]) + + display(l+" "+r, vis) def main(): @@ -160,23 +176,25 @@ def main(): parser.add_option("--upload", default=False, action="store_true", dest="upload", help="Upload results to camera(s).") parser.add_option("--fix-principal-point", action="store_true", default=False, - help="fix the principal point at the image center") + help="fix the principal point at the image center") parser.add_option("--fix-aspect-ratio", action="store_true", default=False, - help="enforce focal lengths (fx, fy) are equal") + help="enforce focal lengths (fx, fy) are equal") parser.add_option("--zero-tangent-dist", action="store_true", default=False, - help="set tangential distortion coefficients (p1, p2) to zero") - parser.add_option("-k", "--k-coefficients", type="int", default=2, metavar="NUM_COEFFS", - help="number of radial distortion coefficients to use (up to 6, default %default)") + help="set tangential distortion coefficients (p1, p2) to zero") + parser.add_option( + "-k", "--k-coefficients", type="int", default=2, metavar="NUM_COEFFS", + help="number of radial distortion coefficients to use (up to 6, default %default)") parser.add_option("--visualize", action="store_true", default=False, - help="visualize rectified images after calibration") - parser.add_option("-a", "--alpha", type="float", default=1.0, metavar="ALPHA", - help="zoom for visualization of rectifies images. Ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). default %default)") + help="visualize rectified images after calibration") + parser.add_option( + "-a", "--alpha", type="float", default=1.0, metavar="ALPHA", + help="zoom for visualization of rectifies images. Ranges from 0 (zoomed in, all pixels in calibrated image are valid) to 1 (zoomed out, all pixels in original image are in calibrated image). default %default)") options, args = parser.parse_args() - + if len(options.size) != len(options.square): parser.error("Number of size and square inputs must be the same!") - + if not options.square: options.square.append("0.108") options.size.append("8x6") @@ -197,7 +215,8 @@ def main(): if not args: parser.error("Must give tarfile name") if not os.path.exists(args[0]): - parser.error("Tarfile %s does not exist. Please select valid tarfile" % args[0]) + parser.error( + "Tarfile %s does not exist. Please select valid tarfile" % args[0]) tarname = args[0] @@ -225,7 +244,8 @@ def main(): if (num_ks < 1): calib_flags |= cv2.CALIB_FIX_K1 - cal_from_tarfile(boards, tarname, options.mono, options.upload, calib_flags, options.visualize, options.alpha) + cal_from_tarfile(boards, tarname, options.mono, options.upload, + calib_flags, options.visualize, options.alpha) if __name__ == '__main__': diff --git a/camera_calibration/src/camera_calibration/stereo_calibrator.py b/camera_calibration/src/camera_calibration/stereo_calibrator.py new file mode 100644 index 000000000..4173531d7 --- /dev/null +++ b/camera_calibration/src/camera_calibration/stereo_calibrator.py @@ -0,0 +1,501 @@ +#!/usr/bin/env python +# +# Software License Agreement (BSD License) +# +# Copyright (c) 2009, Willow Garage, Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions +# are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following +# disclaimer in the documentation and/or other materials provided +# with the distribution. +# * Neither the name of the Willow Garage nor the names of its +# contributors may be used to endorse or promote products derived +# from this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS +# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE +# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, +# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, +# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT +# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN +# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + +from io import BytesIO +import cv2 +import image_geometry +import math +import numpy.linalg +import pickle +import random +import sys +import tarfile +import time +from semver import VersionInfo +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.calibrator import ( + Calibrator, + CalibrationException, + CAMERA_MODEL, + Patterns, + ImageDrawable, + image_from_archive +) + + +class StereoDrawable(ImageDrawable): + def __init__(self): + ImageDrawable.__init__(self) + self.lscrib = None + self.rscrib = None + self.epierror = -1 + self.dim = -1 + +# TODO Replicate MonoCalibrator improvements in stereo + + +class StereoCalibrator(Calibrator): + """ + Calibration class for stereo cameras:: + + limages = [cv2.imread("left%d.png") for i in range(8)] + rimages = [cv2.imread("right%d.png") for i in range(8)] + sc = StereoCalibrator() + sc.cal(limages, rimages) + print sc.as_message() + """ + + is_mono = False + + def __init__(self, *args, **kwargs): + if 'name' not in kwargs: + kwargs['name'] = 'narrow_stereo' + super(StereoCalibrator, self).__init__(*args, **kwargs) + self.l = MonoCalibrator(*args, **kwargs) + self.r = MonoCalibrator(*args, **kwargs) + # Collecting from two cameras in a horizontal stereo rig, can't get + # full X range in the left camera. + self.param_ranges[0] = 0.4 + + # override + def set_cammodel(self, modeltype): + super(StereoCalibrator, self).set_cammodel(modeltype) + self.l.set_cammodel(modeltype) + self.r.set_cammodel(modeltype) + + def cal(self, limages, rimages): + """ + :param limages: source left images containing chessboards + :type limages: list of :class:`cvMat` + :param rimages: source right images containing chessboards + :type rimages: list of :class:`cvMat` + + Find chessboards in images, and runs the OpenCV calibration solver. + """ + goodcorners = self.collect_corners(limages, rimages) + self.size = (limages[0].shape[1], limages[0].shape[0]) + self.l.size = self.size + self.r.size = self.size + self.cal_fromcorners(goodcorners) + self.calibrated = True + + def collect_corners(self, limages, rimages): + """ + For a sequence of left and right images, find pairs of images where both + left and right have a chessboard, and return their corners as a list of pairs. + """ + # Pick out (corners, ids, board) tuples + lcorners = [] + rcorners = [] + for img in limages: + (_, corners, _, ids, board, _) = self.downsample_and_detect(img) + lcorners.append((corners, ids, board)) + for img in rimages: + (_, corners, _, ids, board, _) = self.downsample_and_detect(img) + rcorners.append((corners, ids, board)) + + good = [(lco, rco, lid, rid, b) for ((lco, lid, b), (rco, rid, br)) + in zip(lcorners, rcorners) if (lco is not None and rco is not None)] + + if len(good) == 0: + raise CalibrationException("No corners found in images!") + return good + + def cal_fromcorners(self, good): + # Perform monocular calibrations + lcorners = [(lco, lid, b) for (lco, rco, lid, rid, b) in good] + rcorners = [(rco, rid, b) for (lco, rco, lid, rid, b) in good] + self.l.cal_fromcorners(lcorners) + self.r.cal_fromcorners(rcorners) + + (lipts, ripts, _, _, boards) = zip(*good) + + opts = self.mk_object_points(boards, True) + + flags = cv2.CALIB_FIX_INTRINSIC + + self.T = numpy.zeros((3, 1), dtype=numpy.float64) + self.R = numpy.eye(3, dtype=numpy.float64) + + if self.camera_model == CAMERA_MODEL.PINHOLE: + print("stereo pinhole calibration...") + if VersionInfo.parse(cv2.__version__).major < 3: + ret_values = cv2.stereoCalibrate(opts, lipts, ripts, self.size, + self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.R, # R + self.T, # T + criteria=(cv2.TERM_CRITERIA_EPS + \ + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), + flags=flags) + else: + ret_values = cv2.stereoCalibrate(opts, lipts, ripts, + self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.size, + self.R, # R + self.T, # T + criteria=(cv2.TERM_CRITERIA_EPS + \ + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), + flags=flags) + print(f"Stereo RMS re-projection error: {ret_values[0]}") + elif self.camera_model == CAMERA_MODEL.FISHEYE: + print("stereo fisheye calibration...") + if VersionInfo.parse(cv2.__version__).major < 3: + print("ERROR: You need OpenCV >3 to use fisheye camera model") + sys.exit() + else: + # WARNING: cv2.fisheye.stereoCalibrate wants float64 points + lipts64 = numpy.asarray(lipts, dtype=numpy.float64) + lipts = lipts64 + ripts64 = numpy.asarray(ripts, dtype=numpy.float64) + ripts = ripts64 + opts64 = numpy.asarray(opts, dtype=numpy.float64) + opts = opts64 + + cv2.fisheye.stereoCalibrate(opts, lipts, ripts, + self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.size, + self.R, # R + self.T, # T + # 30, 1e-6 + criteria=( + cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 1, 1e-5), + flags=flags) + + self.set_alpha(0.0) + + def set_alpha(self, a): + """ + Set the alpha value for the calibrated camera solution. The + alpha value is a zoom, and ranges from 0 (zoomed in, all pixels + in calibrated image are valid) to 1 (zoomed out, all pixels in + original image are in calibrated image). + """ + if self.camera_model == CAMERA_MODEL.PINHOLE: + cv2.stereoRectify(self.l.intrinsics, + self.l.distortion, + self.r.intrinsics, + self.r.distortion, + self.size, + self.R, + self.T, + self.l.R, self.r.R, self.l.P, self.r.P, + alpha=a) + + cv2.initUndistortRectifyMap( + self.l.intrinsics, self.l.distortion, self.l.R, self.l.P, self.size, cv2.CV_32FC1, + self.l.mapx, self.l.mapy) + cv2.initUndistortRectifyMap( + self.r.intrinsics, self.r.distortion, self.r.R, self.r.P, self.size, cv2.CV_32FC1, + self.r.mapx, self.r.mapy) + + elif self.camera_model == CAMERA_MODEL.FISHEYE: + self.Q = numpy.zeros((4, 4), dtype=numpy.float64) + + # Operation flags that may be zero or CALIB_ZERO_DISPARITY . + flags = cv2.CALIB_ZERO_DISPARITY + # If the flag is set, the function makes the principal points of each camera have the same pixel coordinates in the rectified views. + # And if the flag is not set, the function may still shift the images in the horizontal or vertical direction + # (depending on the orientation of epipolar lines) to maximize the useful image area. + + cv2.fisheye.stereoRectify(self.l.intrinsics, self.l.distortion, + self.r.intrinsics, self.r.distortion, + self.size, + self.R, self.T, + flags, + self.l.R, self.r.R, + self.l.P, self.r.P, + self.Q, + self.size, + a, + 1.0) + self.l.P[:3, :3] = numpy.dot(self.l.intrinsics, self.l.R) + self.r.P[:3, :3] = numpy.dot(self.r.intrinsics, self.r.R) + cv2.fisheye.initUndistortRectifyMap( + self.l.intrinsics, self.l.distortion, self.l.R, self.l.intrinsics, self.size, cv2. + CV_32FC1, self.l.mapx, self.l.mapy) + cv2.fisheye.initUndistortRectifyMap( + self.r.intrinsics, self.r.distortion, self.r.R, self.r.intrinsics, self.size, cv2. + CV_32FC1, self.r.mapx, self.r.mapy) + + def as_message(self): + """ + Return the camera calibration as a pair of CameraInfo messages, for left + and right cameras respectively. + """ + + return (self.lrmsg(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size, self.l.camera_model), + self.lrmsg(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size, self.r.camera_model)) + + def from_message(self, msgs, alpha=0.0): + """ Initialize the camera calibration from a pair of CameraInfo messages. """ + self.size = (msgs[0].width, msgs[0].height) + + self.T = numpy.zeros((3, 1), dtype=numpy.float64) + self.R = numpy.eye(3, dtype=numpy.float64) + + self.l.from_message(msgs[0]) + self.r.from_message(msgs[1]) + # Need to compute self.T and self.R here, using the monocular parameters above + if False: + self.set_alpha(0.0) + + def report(self): + print("\nLeft:") + self.lrreport(self.l.distortion, self.l.intrinsics, self.l.R, self.l.P) + print("\nRight:") + self.lrreport(self.r.distortion, self.r.intrinsics, self.r.R, self.r.P) + print("self.T =", numpy.ravel(self.T).tolist()) + print("self.R =", numpy.ravel(self.R).tolist()) + + def ost(self): + return (self.lrost(self.name + "/left", self.l.distortion, self.l.intrinsics, self.l.R, self.l.P, self.size) + + self.lrost(self.name + "/right", self.r.distortion, self.r.intrinsics, self.r.R, self.r.P, self.size)) + + def yaml(self, suffix, info): + return self.lryaml(self.name + suffix, info.distortion, info.intrinsics, info.R, info.P, + self.size, self.camera_model) + + # TODO Get rid of "from_images" versions of these, instead have function to get undistorted corners + def epipolar_error_from_images(self, limage, rimage): + """ + Detect the checkerboard in both images and compute the epipolar error. + Mainly for use in tests. + """ + lcorners = self.downsample_and_detect(limage)[1] + rcorners = self.downsample_and_detect(rimage)[1] + if lcorners is None or rcorners is None: + return None + + lundistorted = self.l.undistort_points(lcorners) + rundistorted = self.r.undistort_points(rcorners) + + return self.epipolar_error(lundistorted, rundistorted) + + def epipolar_error(self, lcorners, rcorners): + """ + Compute the epipolar error from two sets of matching undistorted points + """ + d = lcorners[:, :, 1] - rcorners[:, :, 1] + return numpy.sqrt(numpy.square(d).sum() / d.size) + + def chessboard_size_from_images(self, limage, rimage): + _, lcorners, _, _, board, _ = self.downsample_and_detect(limage) + _, rcorners, _, _, board, _ = self.downsample_and_detect(rimage) + if lcorners is None or rcorners is None: + return None + + lundistorted = self.l.undistort_points(lcorners) + rundistorted = self.r.undistort_points(rcorners) + + return self.chessboard_size(lundistorted, rundistorted, board) + + def chessboard_size(self, lcorners, rcorners, board, msg=None): + """ + Compute the square edge length from two sets of matching undistorted points + given the current calibration. + :param msg: a tuple of (left_msg, right_msg) + """ + # Project the points to 3d + cam = image_geometry.StereoCameraModel() + if msg == None: + msg = self.as_message() + cam.from_camera_info(*msg) + disparities = lcorners[:, :, 0] - rcorners[:, :, 0] + pt3d = [cam.project_pixel_to_3d( + (lcorners[i, 0, 0], + lcorners[i, 0, 1]), + disparities[i, 0]) for i in range(lcorners.shape[0])] + + def l2(p0, p1): + return math.sqrt(sum([(c0 - c1) ** 2 for (c0, c1) in zip(p0, p1)])) + + # Compute the length from each horizontal and vertical line, and return the mean + cc = board.n_cols + cr = board.n_rows + lengths = ( + [l2(pt3d[cc * r + 0], pt3d[cc * r + (cc - 1)]) / (cc - 1) for r in range(cr)] + + [l2(pt3d[c + 0], pt3d[c + (cc * (cr - 1))]) / (cr - 1) for c in range(cc)]) + return sum(lengths) / len(lengths) + + def update_db(self, lgray, rgray, lcorners, rcorners, lids, rids, lboard): + """ + update database with images and good corners if good samples are detected + """ + params = self.get_parameters( + lcorners, lids, lboard, (lgray.shape[1], lgray.shape[0])) + if self.is_good_sample( + params, lcorners, lids, self.last_frame_corners, self.last_frame_ids): + self.db.append((params, lgray, rgray)) + self.good_corners.append( + (lcorners, rcorners, lids, rids, lboard)) + print(("*** Added sample %d, p_x = %.3f, p_y = %.3f, p_size = %.3f, skew = %.3f" % + tuple([len(self.db)] + params))) + + def handle_msg(self, msg): + # TODO Various asserts that images have same dimension, same board detected... + (lmsg, rmsg) = msg + lgray = self.mkgray(lmsg) + rgray = self.mkgray(rmsg) + epierror = -1 + + # Get display-images-to-be and detections of the calibration target + lscrib_mono, lcorners, ldownsampled_corners, lids, lboard, ( + x_scale, y_scale) = self.downsample_and_detect(lgray) + rscrib_mono, rcorners, rdownsampled_corners, rids, rboard, _ = self.downsample_and_detect( + rgray) + + if self.calibrated: + # Show rectified images + lremap = self.l.remap(lgray) + rremap = self.r.remap(rgray) + lrect = lremap + rrect = rremap + if x_scale != 1.0 or y_scale != 1.0: + lrect = cv2.resize( + lremap, (lscrib_mono.shape[1], lscrib_mono.shape[0])) + rrect = cv2.resize( + rremap, (rscrib_mono.shape[1], rscrib_mono.shape[0])) + + lscrib = cv2.cvtColor(lrect, cv2.COLOR_GRAY2BGR) + rscrib = cv2.cvtColor(rrect, cv2.COLOR_GRAY2BGR) + + # Draw rectified corners + if lcorners is not None: + lundistorted = self.l.undistort_points(lcorners) + scrib_src = lundistorted.copy() + scrib_src[:, :, 0] /= x_scale + scrib_src[:, :, 1] /= y_scale + cv2.drawChessboardCorners( + lscrib, (lboard.n_cols, lboard.n_rows), scrib_src, True) + + if rcorners is not None: + rundistorted = self.r.undistort_points(rcorners) + scrib_src = rundistorted.copy() + scrib_src[:, :, 0] /= x_scale + scrib_src[:, :, 1] /= y_scale + cv2.drawChessboardCorners( + rscrib, (rboard.n_cols, rboard.n_rows), scrib_src, True) + + # Report epipolar error + if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): + epierror = self.epipolar_error(lundistorted, rundistorted) + + else: + lscrib = cv2.cvtColor(lscrib_mono, cv2.COLOR_GRAY2BGR) + rscrib = cv2.cvtColor(rscrib_mono, cv2.COLOR_GRAY2BGR) + # Draw any detected chessboards onto display (downsampled) images + if lcorners is not None: + cv2.drawChessboardCorners(lscrib, (lboard.n_cols, lboard.n_rows), + ldownsampled_corners, True) + if rcorners is not None: + cv2.drawChessboardCorners(rscrib, (rboard.n_cols, rboard.n_rows), + rdownsampled_corners, True) + + # Add sample to database only if it's sufficiently different from any previous sample + if lcorners is not None and rcorners is not None and len(lcorners) == len(rcorners): + # Add samples only with entire board in view if charuco + if self.pattern == Patterns.ChArUco: + if len(lcorners) == lboard.charuco_board.chessboardCorners.shape[0]: + self.update_db(lgray, rgray, lcorners, + rcorners, lids, rids, lboard) + else: + self.update_db(lgray, rgray, lcorners, + rcorners, lids, rids, lboard) + + self.last_frame_corners = lcorners + self.last_frame_ids = lids + rv = StereoDrawable() + rv.lscrib = lscrib + rv.rscrib = rscrib + rv.params = self.compute_goodenough() + rv.epierror = epierror + return rv + + def do_calibration(self, dump=False): + # TODO MonoCalibrator collects corners if needed here + # TODO Needs to be set externally + self.size = (self.db[0][1].shape[1], self.db[0][1].shape[0]) + # Dump should only occur if user wants it + if dump: + pickle.dump((self.is_mono, self.size, self.good_corners), + open("/tmp/camera_calibration_%08x.pickle" % random.getrandbits(32), "w")) + self.l.size = self.size + self.r.size = self.size + self.cal_fromcorners(self.good_corners) + self.calibrated = True + # DEBUG + print((self.report())) + print((self.ost())) + + def do_tarfile_save(self, tf): + """ Write images and calibration solution to a tarfile object """ + ims = ([("left-%04d.png" % i, im) for i, (_, im, _) in enumerate(self.db)] + + [("right-%04d.png" % i, im) for i, (_, _, im) in enumerate(self.db)]) + + def taradd(name, buf): + if isinstance(buf, str): + s = BytesIO(buf.encode('utf-8')) + else: + s = BytesIO(buf) + ti = tarfile.TarInfo(name) + ti.size = len(s.getvalue()) + ti.uname = 'calibrator' + ti.mtime = int(time.time()) + tf.addfile(tarinfo=ti, fileobj=s) + + for (name, im) in ims: + taradd(name, cv2.imencode(".png", im)[1].tostring()) + taradd('left.yaml', self.yaml("/left", self.l)) + taradd('right.yaml', self.yaml("/right", self.r)) + taradd('ost.txt', self.ost()) + + def do_tarfile_calibration(self, filename): + archive = tarfile.open(filename, 'r') + limages = [image_from_archive(archive, f) for f in archive.getnames() if ( + f.startswith('left') and (f.endswith('pgm') or f.endswith('png')))] + rimages = [image_from_archive(archive, f) for f in archive.getnames() if ( + f.startswith('right') and (f.endswith('pgm') or f.endswith('png')))] + + if not len(limages) == len(rimages): + raise CalibrationException( + "Left, right images don't match. %d left images, %d right" % + (len(limages), + len(rimages))) + + # \todo Check that the filenames match and stuff + + self.cal(limages, rimages) diff --git a/camera_calibration/test/test_directed.py b/camera_calibration/test/test_directed.py index 80ec1a8b9..3481b491f 100644 --- a/camera_calibration/test/test_directed.py +++ b/camera_calibration/test/test_directed.py @@ -33,7 +33,6 @@ # POSSIBILITY OF SUCH DAMAGE. import cv2 - import collections import copy import numpy @@ -42,14 +41,16 @@ import tarfile import unittest -from camera_calibration.calibrator import MonoCalibrator, StereoCalibrator, \ - Patterns, CalibrationException, ChessboardInfo, image_from_archive +from camera_calibration.mono_calibrator import MonoCalibrator +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import Patterns, CalibrationException, ChessboardInfo, image_from_archive board = ChessboardInfo() board.n_cols = 8 board.n_rows = 6 board.dim = 0.108 + class TestDirected(unittest.TestCase): def setUp(self): if not os.path.isfile('/tmp/camera_calibration.tar.gz'): @@ -60,20 +61,22 @@ def setUp(self): tar_path = '/tmp/camera_calibration.tar.gz' self.tar = tarfile.open(tar_path, 'r') - self.limages = [image_from_archive(self.tar, "wide/left%04d.pgm" % i) for i in range(3, 15)] - self.rimages = [image_from_archive(self.tar, "wide/right%04d.pgm" % i) for i in range(3, 15)] + self.limages = [image_from_archive( + self.tar, "wide/left%04d.pgm" % i) for i in range(3, 15)] + self.rimages = [image_from_archive( + self.tar, "wide/right%04d.pgm" % i) for i in range(3, 15)] self.l = {} self.r = {} - self.sizes = [(320,240), (640,480), (800,600), (1024,768)] + self.sizes = [(320, 240), (640, 480), (800, 600), (1024, 768)] for dim in self.sizes: self.l[dim] = [] self.r[dim] = [] - for li,ri in zip(self.limages, self.rimages): + for li, ri in zip(self.limages, self.rimages): rli = cv2.resize(li, (dim[0], dim[1])) rri = cv2.resize(ri, (dim[0], dim[1])) self.l[dim].append(rli) self.r[dim].append(rri) - + def assert_good_mono(self, c, dim, max_err): self.assertTrue(len(c.ost()) > 0) lin_err = 0 @@ -93,7 +96,7 @@ def assert_good_mono(self, c, dim, max_err): def test_monocular(self): # Run the calibrator, produce a calibration, check it - mc = MonoCalibrator([ board ], cv2.CALIB_FIX_K3) + mc = MonoCalibrator([board], cv2.CALIB_FIX_K3) max_errs = [0.1, 0.2, 0.4, 0.7] for i, dim in enumerate(self.sizes): mc.cal(self.l[dim]) @@ -114,7 +117,7 @@ def test_stereo(self): sc.cal(self.l[dim], self.r[dim]) sc.report() - #print sc.ost() + # print sc.ost() # NOTE: epipolar error currently increases with resolution. # At highest res expect error ~0.75 @@ -127,11 +130,12 @@ def test_stereo(self): n += 1 epierror /= n self.assertTrue(epierror < epierrors[i], - 'Epipolar error is %f for resolution i = %d' % (epierror, i)) + 'Epipolar error is %f for resolution i = %d' % (epierror, i)) - self.assertAlmostEqual(sc.chessboard_size_from_images(self.l[dim][0], self.r[dim][0]), .108, 2) + self.assertAlmostEqual(sc.chessboard_size_from_images( + self.l[dim][0], self.r[dim][0]), .108, 2) - #print sc.as_message() + # print sc.as_message() img = self.l[dim][0] flat = sc.l.remap(img) @@ -142,7 +146,7 @@ def test_stereo(self): sc2 = StereoCalibrator([board]) sc2.from_message(sc.as_message()) # sc2.set_alpha(1.0) - #sc2.report() + # sc2.report() self.assertTrue(len(sc2.ost()) > 0) def test_nochecker(self): @@ -153,19 +157,21 @@ def test_nochecker(self): new_board.n_rows = 7 sc = StereoCalibrator([new_board]) - self.assertRaises(CalibrationException, lambda: sc.cal(self.limages, self.rimages)) + self.assertRaises(CalibrationException, + lambda: sc.cal(self.limages, self.rimages)) mc = MonoCalibrator([new_board]) self.assertRaises(CalibrationException, lambda: mc.cal(self.limages)) - class TestArtificial(unittest.TestCase): - Setup = collections.namedtuple('Setup', ['pattern', 'cols', 'rows', 'lin_err', 'K_err']) + Setup = collections.namedtuple( + 'Setup', ['pattern', 'cols', 'rows', 'lin_err', 'K_err']) def setUp(self): # Define some image transforms that will simulate a camera position M = [] - self.k = numpy.array([[500, 0, 250], [0, 500, 250], [0, 0, 1]], numpy.float32) + self.k = numpy.array( + [[500, 0, 250], [0, 500, 250], [0, 0, 1]], numpy.float32) self.d = numpy.array([]) # physical size of the board self.board_width_dim = 1 @@ -173,9 +179,12 @@ def setUp(self): # Generate data for different grid types. For each grid type, define the different sizes of # grid that are recognized (n row, n col) # Patterns.Circles, Patterns.ACircles - self.setups = [ self.Setup(pattern=Patterns.Chessboard, cols=7, rows=8, lin_err=0.2, K_err=8.2), - self.Setup(pattern=Patterns.Circles, cols=7, rows=8, lin_err=0.1, K_err=4), - self.Setup(pattern=Patterns.ACircles, cols=3, rows=5, lin_err=0.1, K_err=8) ] + self.setups = [ + self.Setup( + pattern=Patterns.Chessboard, cols=7, rows=8, lin_err=0.2, K_err=8.2), + self.Setup(pattern=Patterns.Circles, cols=7, rows=8, lin_err=0.1, K_err=4), + self.Setup( + pattern=Patterns.ACircles, cols=3, rows=5, lin_err=0.1, K_err=8)] self.limages = [] self.rimages = [] for setup in self.setups: @@ -184,50 +193,61 @@ def setUp(self): # Create the pattern if setup.pattern == Patterns.Chessboard: - pattern = numpy.zeros((50*(setup.rows+3), 50*(setup.cols+3), 1), numpy.uint8) + pattern = numpy.zeros( + (50*(setup.rows+3), 50*(setup.cols+3), 1), numpy.uint8) pattern.fill(255) for j in range(1, setup.rows+2): - for i in range(1+(j%2), setup.cols+2, 2): + for i in range(1+(j % 2), setup.cols+2, 2): pattern[50*j:50*(j+1), 50*i:50*(i+1)].fill(0) elif setup.pattern == Patterns.Circles: - pattern = numpy.zeros((50*(setup.rows+2), 50*(setup.cols+2), 1), numpy.uint8) + pattern = numpy.zeros( + (50*(setup.rows+2), 50*(setup.cols+2), 1), numpy.uint8) pattern.fill(255) for j in range(1, setup.rows+1): for i in range(1, setup.cols+1): - cv2.circle(pattern, (int(50*i + 25), int(50*j + 25)), 15, (0,0,0), -1) + cv2.circle(pattern, (int(50*i + 25), + int(50*j + 25)), 15, (0, 0, 0), -1) elif setup.pattern == Patterns.ACircles: x = 60 - pattern = numpy.zeros((x*(setup.rows+2), x*(setup.cols+5), 1), numpy.uint8) + pattern = numpy.zeros( + (x*(setup.rows+2), x*(setup.cols+5), 1), numpy.uint8) pattern.fill(255) for j in range(1, setup.rows+1): for i in range(0, setup.cols): - cv2.circle(pattern, (int(x*(1 + 2*i + (j%2)) + x/2), int(x*j + x/2)), int(x/3), (0,0,0), -1) + cv2.circle(pattern, (int(x*(1 + 2*i + (j % 2)) + x/2), + int(x*j + x/2)), int(x/3), (0, 0, 0), -1) rows, cols, _ = pattern.shape - object_points_2d = numpy.array([[0, 0], [0, cols-1], [rows-1, cols-1], [rows-1, 0]], numpy.float32) - object_points_3d = numpy.array([[0, 0, 0], [0, cols-1, 0], [rows-1, cols-1, 0], [rows-1, 0, 0]], numpy.float32) + object_points_2d = numpy.array( + [[0, 0], [0, cols-1], [rows-1, cols-1], [rows-1, 0]], numpy.float32) + object_points_3d = numpy.array( + [[0, 0, 0], [0, cols-1, 0], [rows-1, cols-1, 0], [rows-1, 0, 0]], numpy.float32) object_points_3d *= self.board_width_dim/float(cols) # create the artificial view points - rvec = [ [0, 0, 0], [0, 0, 0.4], [0, 0.4, 0], [0.4, 0, 0], [0.4, 0.4, 0], [0.4, 0, 0.4], [0, 0.4, 0.4], [0.4, 0.4, 0.4] ] - tvec = [ [-0.5, -0.5, 3], [-0.5, -0.5, 3], [-0.5, -0.1, 3], [-0.1, -0.5, 3], [-0.1, -0.1, 3], [-0.1, -0.5, 3], [-0.5, -0.1, 3], [-0.1, 0.1, 3] ] + rvec = [[0, 0, 0], [0, 0, 0.4], [0, 0.4, 0], [0.4, 0, 0], [ + 0.4, 0.4, 0], [0.4, 0, 0.4], [0, 0.4, 0.4], [0.4, 0.4, 0.4]] + tvec = [[-0.5, -0.5, 3], [-0.5, -0.5, 3], [-0.5, -0.1, 3], [-0.1, -0.5, 3], + [-0.1, -0.1, 3], [-0.1, -0.5, 3], [-0.5, -0.1, 3], [-0.1, 0.1, 3]] dsize = (480, 640) for i in range(len(rvec)): R = numpy.array(rvec[i], numpy.float32) T = numpy.array(tvec[i], numpy.float32) - - image_points, _ = cv2.projectPoints(object_points_3d, R, T, self.k, self.d) + + image_points, _ = cv2.projectPoints( + object_points_3d, R, T, self.k, self.d) # deduce the perspective transform - M.append(cv2.getPerspectiveTransform(object_points_2d, image_points)) + M.append(cv2.getPerspectiveTransform( + object_points_2d, image_points)) # project the pattern according to the different cameras pattern_warped = cv2.warpPerspective(pattern, M[i], dsize) self.limages[-1].append(pattern_warped) def assert_good_mono(self, c, images, max_err): - #c.report() + # c.report() self.assertTrue(len(c.ost()) > 0) lin_err = 0 n = 0 @@ -253,7 +273,8 @@ def test_monocular(self): board.n_rows = setup.rows board.dim = self.board_width_dim - mc = MonoCalibrator([ board ], flags=cv2.CALIB_FIX_K3, pattern=setup.pattern) + mc = MonoCalibrator( + [board], flags=cv2.CALIB_FIX_K3, pattern=setup.pattern) if 0: # display the patterns viewed by the camera @@ -265,10 +286,12 @@ def test_monocular(self): self.assert_good_mono(mc, self.limages[i], setup.lin_err) # Make sure the intrinsics are similar - err_intrinsics = numpy.linalg.norm(mc.intrinsics - self.k, ord=numpy.inf) + err_intrinsics = numpy.linalg.norm( + mc.intrinsics - self.k, ord=numpy.inf) self.assertTrue(err_intrinsics < setup.K_err, - 'intrinsics error is %f for resolution i = %d' % (err_intrinsics, i)) - print('intrinsics error is %f' % numpy.linalg.norm(mc.intrinsics - self.k, ord=numpy.inf)) + 'intrinsics error is %f for resolution i = %d' % (err_intrinsics, i)) + print('intrinsics error is %f' % numpy.linalg.norm( + mc.intrinsics - self.k, ord=numpy.inf)) def test_rational_polynomial_model(self): """Test that the distortion coefficients returned for a rational_polynomial model are not empty.""" @@ -278,15 +301,18 @@ def test_rational_polynomial_model(self): board.n_rows = setup.rows board.dim = self.board_width_dim - mc = MonoCalibrator([ board ], flags=cv2.CALIB_RATIONAL_MODEL, pattern=setup.pattern) + mc = MonoCalibrator( + [board], flags=cv2.CALIB_RATIONAL_MODEL, pattern=setup.pattern) mc.cal(self.limages[i]) self.assertEqual(len(mc.distortion.flat), 8, 'length of distortion coefficients is %d' % len(mc.distortion.flat)) self.assertTrue(all(mc.distortion.flat != 0), - 'some distortion coefficients are zero: %s' % str(mc.distortion.flatten())) - self.assertEqual(mc.as_message().distortion_model, 'rational_polynomial') + 'some distortion coefficients are zero: %s' % + str(mc.distortion.flatten())) + self.assertEqual(mc.as_message().distortion_model, + 'rational_polynomial') self.assert_good_mono(mc, self.limages[i], setup.lin_err) - + yaml = mc.yaml() # Issue #278 self.assertIn('cols: 8', yaml) diff --git a/camera_calibration/test/test_multiple_boards.py b/camera_calibration/test/test_multiple_boards.py index 40e1faa50..a1c17a35a 100644 --- a/camera_calibration/test/test_multiple_boards.py +++ b/camera_calibration/test/test_multiple_boards.py @@ -32,14 +32,13 @@ # ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # POSSIBILITY OF SUCH DAMAGE. -import rclpy -import ament_index_python import requests import unittest import tarfile import os -from camera_calibration.calibrator import StereoCalibrator, ChessboardInfo, image_from_archive +from camera_calibration.stereo_calibrator import StereoCalibrator +from camera_calibration.calibrator import ChessboardInfo, image_from_archive # Large board used for PR2 calibration board = ChessboardInfo() @@ -47,6 +46,7 @@ board.n_rows = 6 board.dim = 0.108 + class TestMultipleBoards(unittest.TestCase): def test_multiple_boards(self): small_board = ChessboardInfo() @@ -66,21 +66,22 @@ def test_multiple_boards(self): stereo_cal.report() stereo_cal.ost() - + # Check error for big image archive = tarfile.open(my_archive_name) l1_big = image_from_archive(archive, "left-0000.png") r1_big = image_from_archive(archive, "right-0000.png") epi_big = stereo_cal.epipolar_error_from_images(l1_big, r1_big) - self.assertTrue(epi_big < 1.0, "Epipolar error for large checkerboard > 1.0. Error: %.2f" % epi_big) + self.assertTrue( + epi_big < 1.0, "Epipolar error for large checkerboard > 1.0. Error: %.2f" % epi_big) # Small checkerboard has larger error threshold for now l1_sm = image_from_archive(archive, "left-0012-sm.png") r1_sm = image_from_archive(archive, "right-0012-sm.png") epi_sm = stereo_cal.epipolar_error_from_images(l1_sm, r1_sm) - self.assertTrue(epi_sm < 2.0, "Epipolar error for small checkerboard > 2.0. Error: %.2f" % epi_sm) - + self.assertTrue( + epi_sm < 2.0, "Epipolar error for small checkerboard > 2.0. Error: %.2f" % epi_sm) if __name__ == '__main__': - unittest.main(verbosity=2) \ No newline at end of file + unittest.main(verbosity=2) diff --git a/depth_image_proc/CHANGELOG.rst b/depth_image_proc/CHANGELOG.rst index d89656826..44acbcd48 100644 --- a/depth_image_proc/CHANGELOG.rst +++ b/depth_image_proc/CHANGELOG.rst @@ -2,6 +2,65 @@ Changelog for package depth_image_proc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ +* Publish using unique ptr (`#1016 `_) + Prevents doing an extra copy of the data when using intra-process + communication. +* Finish QoS updates (`#1019 `_) + This implements the remainder of `#847 `_: + - Make sure publishers default to system defaults (reliable) + - Add QoS overriding where possible (some of the image_transport / + message_filters stuff doesn't really support that) + - Use the matching heuristic for subscribers consistently +* fix signature issue from `#943 `_ (`#1018 `_) + Without this, we get + ``` + symbol lookup error: /home/ubr/jazzy/install/depth_image_proc/lib/libdepth_image_proc.so: undefined symbol: _ZN16depth_image_proc10convertRgbERKSt10shared_ptrIKN11sensor_msgs3msg6Image_ISaIvEEEES0_INS2_12PointCloud2_IS4_EEEiiii + c++filt _ZN16depth_image_proc10convertRgbERKSt10shared_ptrIKN11sensor_msgs3msg6Image_ISaIvEEEES0_INS2_12PointCloud2_IS4_EEEiiii + depth_image_proc::convertRgb(std::shared_ptr > const> const&, std::shared_ptr > >, int, int, int, int) + ``` +* Contributors: Błażej Sowa, Michael Ferguson + +6.0.2 (2024-07-23) +------------------ +* Removed deprecation warnings (`#1010 `_) +* Contributors: Alejandro Hernández Cordero + +6.0.1 (2024-07-22) +------------------ +* Updated deprecated message filter headers (`#1012 `_) +* Contributors: Alejandro Hernández Cordero + +6.0.0 (2024-05-27) +------------------ + +5.0.1 (2024-03-26) +------------------ +* Update depth_image_proc::RegisterNode documentation (`#957 `_) + Adding missing parameters from register node of depth_image_proc + package. + Related to Issue `#956 `_ +* add invalid_depth param (`#943 `_) + Add option to set all invalid depth pixels to a specified value, typically the maximum range. + * Updates convertDepth parameter name and optimizes use of the parameter. + * Updates PointCloudXYZ, PointCloudXYZI, and PointCloudXYZRGB with new invalid_depth parameter +* fix image publisher remapping (`#941 `_) + Addresses `#940 `_ - fixes the compressed/etc topic remapping for publishers +* unified changelog, add missing image, deduplicate tutorials (`#938 `_) + Last bit of documentation updates - putting together a single changelog + summary for the whole release (rather than scattering among packages). + Unified the camera_info tutorial so it isn't duplicated. Added a missing + image from image_rotate (was on local disk, but hadn't committed it) +* migrate image_pipeline docs (`#929 `_) + * Migrates image_pipeline overview page + * Migrates CameraInfo wiki page + * Adds links to the other packages in this stack + * Updates depth_image_proc and image_proc to have the overview page properly named and in the TOC +* migrate depth_image_proc docs (`#926 `_) +* Fixed image types in depth_image_proc +* Contributors: Alejandro Hernández Cordero, Alessio Parmeggiani, Michael Ferguson, philipp.polterauer + 5.0.0 (2024-01-24) ------------------ * radial nodes: should all sub to raw topics (`#906 `_) diff --git a/depth_image_proc/doc/tutorials.rst b/depth_image_proc/doc/tutorials.rst index 74713f5e6..a37dc373b 100644 --- a/depth_image_proc/doc/tutorials.rst +++ b/depth_image_proc/doc/tutorials.rst @@ -57,3 +57,7 @@ parameter is used: Remapping camera_info Topics ---------------------------- See `tutorial in image_pipline `_. + +Using QoS Overrides +------------------- +See `tutorial in image_pipline `_. diff --git a/depth_image_proc/include/depth_image_proc/conversions.hpp b/depth_image_proc/include/depth_image_proc/conversions.hpp index 8af177bd0..4221eaa79 100644 --- a/depth_image_proc/include/depth_image_proc/conversions.hpp +++ b/depth_image_proc/include/depth_image_proc/conversions.hpp @@ -51,7 +51,7 @@ namespace depth_image_proc template void convertDepth( const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg, - const sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg, + sensor_msgs::msg::PointCloud2 & cloud_msg, const image_geometry::PinholeCameraModel & model, double invalid_depth = 0.0) { @@ -71,14 +71,14 @@ void convertDepth( if (use_invalid_depth) { invalid_depth_cvt = DepthTraits::fromMeters(invalid_depth); } - sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x"); - sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y"); - sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z"); + sensor_msgs::PointCloud2Iterator iter_x(cloud_msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(cloud_msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(cloud_msg, "z"); const T * depth_row = reinterpret_cast(&depth_msg->data[0]); uint32_t row_step = depth_msg->step / sizeof(T); - for (uint32_t v = 0; v < cloud_msg->height; ++v, depth_row += row_step) { - for (uint32_t u = 0; u < cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z) { + for (uint32_t v = 0; v < cloud_msg.height; ++v, depth_row += row_step) { + for (uint32_t u = 0; u < cloud_msg.width; ++u, ++iter_x, ++iter_y, ++iter_z) { T depth = depth_row[u]; // Missing points denoted by NaNs @@ -103,19 +103,19 @@ void convertDepth( template void convertDepthRadial( const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg, - const sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg, + sensor_msgs::msg::PointCloud2 & cloud_msg, const cv::Mat & transform) { // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) float bad_point = std::numeric_limits::quiet_NaN(); - sensor_msgs::PointCloud2Iterator iter_x(*cloud_msg, "x"); - sensor_msgs::PointCloud2Iterator iter_y(*cloud_msg, "y"); - sensor_msgs::PointCloud2Iterator iter_z(*cloud_msg, "z"); + sensor_msgs::PointCloud2Iterator iter_x(cloud_msg, "x"); + sensor_msgs::PointCloud2Iterator iter_y(cloud_msg, "y"); + sensor_msgs::PointCloud2Iterator iter_z(cloud_msg, "z"); const T * depth_row = reinterpret_cast(&depth_msg->data[0]); int row_step = depth_msg->step / sizeof(T); - for (int v = 0; v < static_cast(cloud_msg->height); ++v, depth_row += row_step) { - for (int u = 0; u < static_cast(cloud_msg->width); ++u, ++iter_x, ++iter_y, ++iter_z) { + for (int v = 0; v < static_cast(cloud_msg.height); ++v, depth_row += row_step) { + for (int u = 0; u < static_cast(cloud_msg.width); ++u, ++iter_x, ++iter_y, ++iter_z) { T depth = depth_row[u]; // Missing points denoted by NaNs @@ -136,14 +136,14 @@ void convertDepthRadial( template void convertIntensity( const sensor_msgs::msg::Image::ConstSharedPtr & intensity_msg, - const sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg) + sensor_msgs::msg::PointCloud2 & cloud_msg) { - sensor_msgs::PointCloud2Iterator iter_i(*cloud_msg, "intensity"); + sensor_msgs::PointCloud2Iterator iter_i(cloud_msg, "intensity"); const T * inten_row = reinterpret_cast(&intensity_msg->data[0]); const int i_row_step = intensity_msg->step / sizeof(T); - for (int v = 0; v < static_cast(cloud_msg->height); ++v, inten_row += i_row_step) { - for (int u = 0; u < static_cast(cloud_msg->width); ++u, ++iter_i) { + for (int v = 0; v < static_cast(cloud_msg.height); ++v, inten_row += i_row_step) { + for (int u = 0; u < static_cast(cloud_msg.width); ++u, ++iter_i) { *iter_i = inten_row[u]; } } @@ -152,7 +152,7 @@ void convertIntensity( // Handles RGB8, BGR8, and MONO8 void convertRgb( const sensor_msgs::msg::Image::ConstSharedPtr & rgb_msg, - sensor_msgs::msg::PointCloud2::SharedPtr cloud_msg, + sensor_msgs::msg::PointCloud2 & cloud_msg, int red_offset, int green_offset, int blue_offset, int color_step); cv::Mat initMatrix(cv::Mat cameraMatrix, cv::Mat distCoeffs, int width, int height, bool radial); diff --git a/depth_image_proc/include/depth_image_proc/point_cloud_xyzi.hpp b/depth_image_proc/include/depth_image_proc/point_cloud_xyzi.hpp index cb079cc1d..8be05e3ce 100644 --- a/depth_image_proc/include/depth_image_proc/point_cloud_xyzi.hpp +++ b/depth_image_proc/include/depth_image_proc/point_cloud_xyzi.hpp @@ -38,9 +38,9 @@ #include "depth_image_proc/visibility.h" #include "image_geometry/pinhole_camera_model.hpp" -#include "message_filters/subscriber.h" -#include "message_filters/sync_policies/approximate_time.h" -#include "message_filters/synchronizer.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/sync_policies/approximate_time.hpp" +#include "message_filters/synchronizer.hpp" #include #include diff --git a/depth_image_proc/include/depth_image_proc/point_cloud_xyzi_radial.hpp b/depth_image_proc/include/depth_image_proc/point_cloud_xyzi_radial.hpp index d4838e120..51d7bc546 100644 --- a/depth_image_proc/include/depth_image_proc/point_cloud_xyzi_radial.hpp +++ b/depth_image_proc/include/depth_image_proc/point_cloud_xyzi_radial.hpp @@ -39,9 +39,9 @@ #include #include "depth_image_proc/visibility.h" -#include "message_filters/subscriber.h" -#include "message_filters/synchronizer.h" -#include "message_filters/sync_policies/exact_time.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/synchronizer.hpp" +#include "message_filters/sync_policies/exact_time.hpp" #include #include diff --git a/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb.hpp b/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb.hpp index 47c7e0cba..c44c198c8 100644 --- a/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb.hpp +++ b/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb.hpp @@ -38,10 +38,10 @@ #include "depth_image_proc/visibility.h" #include "image_geometry/pinhole_camera_model.hpp" -#include "message_filters/subscriber.h" -#include "message_filters/synchronizer.h" -#include "message_filters/sync_policies/exact_time.h" -#include "message_filters/sync_policies/approximate_time.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/synchronizer.hpp" +#include "message_filters/sync_policies/exact_time.hpp" +#include "message_filters/sync_policies/approximate_time.hpp" #include #include diff --git a/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb_radial.hpp b/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb_radial.hpp index 6db3759f7..8dc3098e4 100644 --- a/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb_radial.hpp +++ b/depth_image_proc/include/depth_image_proc/point_cloud_xyzrgb_radial.hpp @@ -40,10 +40,10 @@ #include "depth_image_proc/visibility.h" #include "image_geometry/pinhole_camera_model.hpp" -#include "message_filters/subscriber.h" -#include "message_filters/synchronizer.h" -#include "message_filters/sync_policies/exact_time.h" -#include "message_filters/sync_policies/approximate_time.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/synchronizer.hpp" +#include "message_filters/sync_policies/exact_time.hpp" +#include "message_filters/sync_policies/approximate_time.hpp" #include #include diff --git a/depth_image_proc/package.xml b/depth_image_proc/package.xml index d7d4e215f..98c35c9f0 100644 --- a/depth_image_proc/package.xml +++ b/depth_image_proc/package.xml @@ -2,7 +2,7 @@ depth_image_proc - 5.0.0 + 6.0.3 Contains components for processing depth images such as those @@ -32,6 +32,7 @@ image_geometry image_transport libopencv-dev + image_proc message_filters rclcpp rclcpp_components diff --git a/depth_image_proc/src/conversions.cpp b/depth_image_proc/src/conversions.cpp index 697734414..e5c1b2052 100644 --- a/depth_image_proc/src/conversions.cpp +++ b/depth_image_proc/src/conversions.cpp @@ -79,16 +79,16 @@ cv::Mat initMatrix( void convertRgb( const sensor_msgs::msg::Image::ConstSharedPtr & rgb_msg, - const sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg, + sensor_msgs::msg::PointCloud2 & cloud_msg, int red_offset, int green_offset, int blue_offset, int color_step) { - sensor_msgs::PointCloud2Iterator iter_r(*cloud_msg, "r"); - sensor_msgs::PointCloud2Iterator iter_g(*cloud_msg, "g"); - sensor_msgs::PointCloud2Iterator iter_b(*cloud_msg, "b"); + sensor_msgs::PointCloud2Iterator iter_r(cloud_msg, "r"); + sensor_msgs::PointCloud2Iterator iter_g(cloud_msg, "g"); + sensor_msgs::PointCloud2Iterator iter_b(cloud_msg, "b"); const uint8_t * rgb = &rgb_msg->data[0]; int rgb_skip = rgb_msg->step - rgb_msg->width * color_step; - for (int v = 0; v < static_cast(cloud_msg->height); ++v, rgb += rgb_skip) { - for (int u = 0; u < static_cast(cloud_msg->width); ++u, + for (int v = 0; v < static_cast(cloud_msg.height); ++v, rgb += rgb_skip) { + for (int u = 0; u < static_cast(cloud_msg.width); ++u, rgb += color_step, ++iter_r, ++iter_g, ++iter_b) { *iter_r = rgb[red_offset]; @@ -101,7 +101,7 @@ void convertRgb( // force template instantiation template void convertDepth( const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg, - const sensor_msgs::msg::PointCloud2::SharedPtr & cloud_msg, + sensor_msgs::msg::PointCloud2 & cloud_msg, const image_geometry::PinholeCameraModel & model, double invalid_depth); diff --git a/depth_image_proc/src/convert_metric.cpp b/depth_image_proc/src/convert_metric.cpp index 60e18a217..7ef841366 100644 --- a/depth_image_proc/src/convert_metric.cpp +++ b/depth_image_proc/src/convert_metric.cpp @@ -41,6 +41,7 @@ #include #include +#include #include namespace depth_image_proc @@ -68,7 +69,7 @@ ConvertMetricNode::ConvertMetricNode(const rclcpp::NodeOptions & options) // TransportHints does not actually declare the parameter this->declare_parameter("image_transport", "raw"); - // Create publisher with connect callback + // Setup lazy subscriber using publisher connection callback rclcpp::PublisherOptions pub_options; pub_options.event_callbacks.matched_callback = [this](rclcpp::MatchedInfo &) @@ -83,23 +84,29 @@ ConvertMetricNode::ConvertMetricNode(const rclcpp::NodeOptions & options) std::string topic = node_base->resolve_topic_or_service_name("image_raw", false); // Get transport hints image_transport::TransportHints hints(this); + // Create subscriber with QoS matched to subscribed topic publisher + auto qos_profile = image_proc::getTopicQosProfile(this, topic); sub_raw_ = image_transport::create_subscription( this, topic, std::bind(&ConvertMetricNode::depthCb, this, std::placeholders::_1), - hints.getTransport()); + hints.getTransport(), + qos_profile); } }; // For compressed topics to remap appropriately, we need to pass a // fully expanded and remapped topic name to image_transport auto node_base = this->get_node_base_interface(); std::string topic = node_base->resolve_topic_or_service_name("image", false); - pub_depth_ = - image_transport::create_publisher(this, topic, rmw_qos_profile_default, pub_options); + + // Create publisher - allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); + pub_depth_ = image_transport::create_publisher(this, topic, rmw_qos_profile_default, + pub_options); } void ConvertMetricNode::depthCb(const sensor_msgs::msg::Image::ConstSharedPtr & raw_msg) { - auto depth_msg = std::make_shared(); + auto depth_msg = std::make_unique(); depth_msg->header = raw_msg->header; depth_msg->height = raw_msg->height; depth_msg->width = raw_msg->width; @@ -136,7 +143,7 @@ void ConvertMetricNode::depthCb(const sensor_msgs::msg::Image::ConstSharedPtr & return; } - pub_depth_.publish(depth_msg); + pub_depth_.publish(std::move(depth_msg)); } } // namespace depth_image_proc diff --git a/depth_image_proc/src/crop_foremost.cpp b/depth_image_proc/src/crop_foremost.cpp index 3807cd939..965018c23 100644 --- a/depth_image_proc/src/crop_foremost.cpp +++ b/depth_image_proc/src/crop_foremost.cpp @@ -38,6 +38,7 @@ #include "depth_image_proc/visibility.h" #include +#include #include #include @@ -74,7 +75,8 @@ CropForemostNode::CropForemostNode(const rclcpp::NodeOptions & options) distance_ = this->declare_parameter("distance", 0.0); - // Create publisher with connect callback + + // Setup lazy subscriber using publisher connection callback rclcpp::PublisherOptions pub_options; pub_options.event_callbacks.matched_callback = [this](rclcpp::MatchedInfo &) @@ -89,18 +91,24 @@ CropForemostNode::CropForemostNode(const rclcpp::NodeOptions & options) std::string topic = node_base->resolve_topic_or_service_name("image_raw", false); // Get transport hints image_transport::TransportHints hints(this); + // Create publisher with same QoS as subscribed topic publisher + auto qos_profile = image_proc::getTopicQosProfile(this, topic); sub_raw_ = image_transport::create_subscription( this, topic, std::bind(&CropForemostNode::depthCb, this, std::placeholders::_1), - hints.getTransport()); + hints.getTransport(), + qos_profile); } }; // For compressed topics to remap appropriately, we need to pass a // fully expanded and remapped topic name to image_transport auto node_base = this->get_node_base_interface(); std::string topic = node_base->resolve_topic_or_service_name("image", false); - pub_depth_ = - image_transport::create_publisher(this, topic, rmw_qos_profile_default, pub_options); + + // Create publisher - allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); + pub_depth_ = image_transport::create_publisher(this, topic, rmw_qos_profile_default, + pub_options); } void CropForemostNode::depthCb(const sensor_msgs::msg::Image::ConstSharedPtr & raw_msg) @@ -144,7 +152,9 @@ void CropForemostNode::depthCb(const sensor_msgs::msg::Image::ConstSharedPtr & r break; } - pub_depth_.publish(cv_ptr->toImageMsg()); + auto image_msg = std::make_unique(); + cv_ptr->toImageMsg(*image_msg); + pub_depth_.publish(std::move(image_msg)); } } // namespace depth_image_proc diff --git a/depth_image_proc/src/disparity.cpp b/depth_image_proc/src/disparity.cpp index 4cad8eb5d..86db0c73a 100644 --- a/depth_image_proc/src/disparity.cpp +++ b/depth_image_proc/src/disparity.cpp @@ -37,8 +37,8 @@ #include #include "depth_image_proc/visibility.h" -#include "message_filters/subscriber.h" -#include "message_filters/time_synchronizer.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/time_synchronizer.hpp" #include #include @@ -77,7 +77,7 @@ class DisparityNode : public rclcpp::Node template void convert( const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg, - stereo_msgs::msg::DisparityImage::SharedPtr & disp_msg); + stereo_msgs::msg::DisparityImage & disp_msg); }; DisparityNode::DisparityNode(const rclcpp::NodeOptions & options) @@ -116,18 +116,20 @@ DisparityNode::DisparityNode(const rclcpp::NodeOptions & options) std::string topic = node_base->resolve_topic_or_service_name("left/image_rect", false); image_transport::TransportHints hints(this); sub_depth_image_.subscribe(this, topic, hints.getTransport()); - sub_info_.subscribe(this, "right/camera_info"); + sub_info_.subscribe(this, "right/camera_info", rclcpp::QoS(10)); } }; + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); pub_disparity_ = create_publisher( - "left/disparity", rclcpp::SensorDataQoS(), pub_options); + "left/disparity", rclcpp::SystemDefaultsQoS(), pub_options); } void DisparityNode::depthCb( const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg, const sensor_msgs::msg::CameraInfo::ConstSharedPtr & info_msg) { - auto disp_msg = std::make_shared(); + auto disp_msg = std::make_unique(); disp_msg->header = depth_msg->header; disp_msg->image.header = disp_msg->header; disp_msg->image.encoding = sensor_msgs::image_encodings::TYPE_32FC1; @@ -144,30 +146,30 @@ void DisparityNode::depthCb( disp_msg->delta_d = delta_d_; if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - convert(depth_msg, disp_msg); + convert(depth_msg, *disp_msg); } else if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_32FC1) { - convert(depth_msg, disp_msg); + convert(depth_msg, *disp_msg); } else { RCLCPP_ERROR( get_logger(), "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); return; } - pub_disparity_->publish(*disp_msg); + pub_disparity_->publish(std::move(disp_msg)); } template void DisparityNode::convert( const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg, - stereo_msgs::msg::DisparityImage::SharedPtr & disp_msg) + stereo_msgs::msg::DisparityImage & disp_msg) { // For each depth Z, disparity d = fT / Z float unit_scaling = DepthTraits::toMeters(T(1) ); - float constant = disp_msg->f * disp_msg->t / unit_scaling; + float constant = disp_msg.f * disp_msg.t / unit_scaling; const T * depth_row = reinterpret_cast(&depth_msg->data[0]); int row_step = depth_msg->step / sizeof(T); - float * disp_data = reinterpret_cast(&disp_msg->image.data[0]); + float * disp_data = reinterpret_cast(&disp_msg.image.data[0]); for (int v = 0; v < static_cast(depth_msg->height); ++v) { for (int u = 0; u < static_cast(depth_msg->width); ++u) { T depth = depth_row[u]; diff --git a/depth_image_proc/src/point_cloud_xyz.cpp b/depth_image_proc/src/point_cloud_xyz.cpp index 227a4100b..6f6074ad0 100644 --- a/depth_image_proc/src/point_cloud_xyz.cpp +++ b/depth_image_proc/src/point_cloud_xyz.cpp @@ -40,6 +40,7 @@ #include #include +#include #include #include #include @@ -75,10 +76,12 @@ PointCloudXyzNode::PointCloudXyzNode(const rclcpp::NodeOptions & options) auto node_base = this->get_node_base_interface(); std::string topic = node_base->resolve_topic_or_service_name("image_rect", false); - // Get transport and QoS + // Get transport hints image_transport::TransportHints depth_hints(this, "raw", "depth_image_transport"); - auto custom_qos = rmw_qos_profile_system_default; - custom_qos.depth = queue_size_; + + // Create subscriber with QoS matched to subscribed topic publisher + auto qos_profile = image_proc::getTopicQosProfile(this, topic); + qos_profile.depth = queue_size_; sub_depth_ = image_transport::create_camera_subscription( this, @@ -87,17 +90,21 @@ PointCloudXyzNode::PointCloudXyzNode(const rclcpp::NodeOptions & options) &PointCloudXyzNode::depthCb, this, std::placeholders::_1, std::placeholders::_2), depth_hints.getTransport(), - custom_qos); + qos_profile); } }; - pub_point_cloud_ = create_publisher("points", rclcpp::SensorDataQoS(), pub_options); + + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); + pub_point_cloud_ = + create_publisher("points", rclcpp::SystemDefaultsQoS(), pub_options); } void PointCloudXyzNode::depthCb( const Image::ConstSharedPtr & depth_msg, const CameraInfo::ConstSharedPtr & info_msg) { - const PointCloud2::SharedPtr cloud_msg = std::make_shared(); + auto cloud_msg = std::make_unique(); cloud_msg->header = depth_msg->header; cloud_msg->height = depth_msg->height; cloud_msg->width = depth_msg->width; @@ -112,16 +119,16 @@ void PointCloudXyzNode::depthCb( // Convert Depth Image to Pointcloud if (depth_msg->encoding == enc::TYPE_16UC1 || depth_msg->encoding == enc::MONO16) { - convertDepth(depth_msg, cloud_msg, model_, invalid_depth_); + convertDepth(depth_msg, *cloud_msg, model_, invalid_depth_); } else if (depth_msg->encoding == enc::TYPE_32FC1) { - convertDepth(depth_msg, cloud_msg, model_, invalid_depth_); + convertDepth(depth_msg, *cloud_msg, model_, invalid_depth_); } else { RCLCPP_ERROR( get_logger(), "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); return; } - pub_point_cloud_->publish(*cloud_msg); + pub_point_cloud_->publish(std::move(cloud_msg)); } } // namespace depth_image_proc diff --git a/depth_image_proc/src/point_cloud_xyz_radial.cpp b/depth_image_proc/src/point_cloud_xyz_radial.cpp index 660ea99df..a7f48be30 100644 --- a/depth_image_proc/src/point_cloud_xyz_radial.cpp +++ b/depth_image_proc/src/point_cloud_xyz_radial.cpp @@ -39,6 +39,7 @@ #include #include +#include #include #include #include @@ -56,7 +57,7 @@ PointCloudXyzRadialNode::PointCloudXyzRadialNode(const rclcpp::NodeOptions & opt // Read parameters queue_size_ = this->declare_parameter("queue_size", 5); - // Create publisher with connect callback + // Setup lazy subscriber using publisher connection callback rclcpp::PublisherOptions pub_options; pub_options.event_callbacks.matched_callback = [this](rclcpp::MatchedInfo & s) @@ -69,10 +70,11 @@ PointCloudXyzRadialNode::PointCloudXyzRadialNode(const rclcpp::NodeOptions & opt // fully expanded and remapped topic name to image_transport auto node_base = this->get_node_base_interface(); std::string topic = node_base->resolve_topic_or_service_name("depth/image_raw", false); - // Get transport and QoS + // Get transport hints image_transport::TransportHints depth_hints(this, "raw", "depth_image_transport"); - auto custom_qos = rmw_qos_profile_system_default; - custom_qos.depth = queue_size_; + // Create subscriber with QoS matched to subscribed topic publisher + auto qos_profile = image_proc::getTopicQosProfile(this, topic); + qos_profile.depth = queue_size_; // Create subscriber sub_depth_ = image_transport::create_camera_subscription( this, @@ -81,18 +83,21 @@ PointCloudXyzRadialNode::PointCloudXyzRadialNode(const rclcpp::NodeOptions & opt &PointCloudXyzRadialNode::depthCb, this, std::placeholders::_1, std::placeholders::_2), depth_hints.getTransport(), - custom_qos); + qos_profile); } }; + + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); pub_point_cloud_ = create_publisher( - "points", rclcpp::SensorDataQoS(), pub_options); + "points", rclcpp::SystemDefaultsQoS(), pub_options); } void PointCloudXyzRadialNode::depthCb( const sensor_msgs::msg::Image::ConstSharedPtr & depth_msg, const sensor_msgs::msg::CameraInfo::ConstSharedPtr & info_msg) { - auto cloud_msg = std::make_shared(); + auto cloud_msg = std::make_unique(); cloud_msg->header = depth_msg->header; cloud_msg->height = depth_msg->height; cloud_msg->width = depth_msg->width; @@ -114,16 +119,16 @@ void PointCloudXyzRadialNode::depthCb( // Convert Depth Image to Pointcloud if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - convertDepthRadial(depth_msg, cloud_msg, transform_); + convertDepthRadial(depth_msg, *cloud_msg, transform_); } else if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_32FC1) { - convertDepthRadial(depth_msg, cloud_msg, transform_); + convertDepthRadial(depth_msg, *cloud_msg, transform_); } else { RCLCPP_ERROR( get_logger(), "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); return; } - pub_point_cloud_->publish(*cloud_msg); + pub_point_cloud_->publish(std::move(cloud_msg)); } } // namespace depth_image_proc diff --git a/depth_image_proc/src/point_cloud_xyzi.cpp b/depth_image_proc/src/point_cloud_xyzi.cpp index 0abc0ff8c..2ff520479 100644 --- a/depth_image_proc/src/point_cloud_xyzi.cpp +++ b/depth_image_proc/src/point_cloud_xyzi.cpp @@ -108,10 +108,13 @@ PointCloudXyziNode::PointCloudXyziNode(const rclcpp::NodeOptions & options) // intensity uses normal ros transport hints. image_transport::TransportHints hints(this, "raw"); sub_intensity_.subscribe(this, intensity_topic, hints.getTransport()); - sub_info_.subscribe(this, intensity_info_topic); + sub_info_.subscribe(this, intensity_info_topic, rclcpp::QoS(10)); } }; - pub_point_cloud_ = create_publisher("points", rclcpp::SensorDataQoS(), pub_options); + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); + pub_point_cloud_ = create_publisher("points", rclcpp::SystemDefaultsQoS(), + pub_options); } void PointCloudXyziNode::imageCb( @@ -187,7 +190,7 @@ void PointCloudXyziNode::imageCb( } } - auto cloud_msg = std::make_shared(); + auto cloud_msg = std::make_unique(); cloud_msg->header = depth_msg->header; // Use depth image time stamp cloud_msg->height = depth_msg->height; cloud_msg->width = depth_msg->width; @@ -205,9 +208,9 @@ void PointCloudXyziNode::imageCb( // Convert Depth Image to Pointcloud if (depth_msg->encoding == enc::TYPE_16UC1) { - convertDepth(depth_msg, cloud_msg, model_, invalid_depth_); + convertDepth(depth_msg, *cloud_msg, model_, invalid_depth_); } else if (depth_msg->encoding == enc::TYPE_32FC1) { - convertDepth(depth_msg, cloud_msg, model_, invalid_depth_); + convertDepth(depth_msg, *cloud_msg, model_, invalid_depth_); } else { RCLCPP_ERROR( get_logger(), "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); @@ -216,13 +219,13 @@ void PointCloudXyziNode::imageCb( // Convert Intensity Image to Pointcloud if (intensity_msg->encoding == enc::MONO8) { - convertIntensity(intensity_msg, cloud_msg); + convertIntensity(intensity_msg, *cloud_msg); } else if (intensity_msg->encoding == enc::MONO16) { - convertIntensity(intensity_msg, cloud_msg); + convertIntensity(intensity_msg, *cloud_msg); } else if (intensity_msg->encoding == enc::TYPE_16UC1) { - convertIntensity(intensity_msg, cloud_msg); + convertIntensity(intensity_msg, *cloud_msg); } else if (intensity_msg->encoding == enc::TYPE_32FC1) { - convertIntensity(intensity_msg, cloud_msg); + convertIntensity(intensity_msg, *cloud_msg); } else { RCLCPP_ERROR( get_logger(), "Intensity image has unsupported encoding [%s]", @@ -230,7 +233,7 @@ void PointCloudXyziNode::imageCb( return; } - pub_point_cloud_->publish(*cloud_msg); + pub_point_cloud_->publish(std::move(cloud_msg)); } diff --git a/depth_image_proc/src/point_cloud_xyzi_radial.cpp b/depth_image_proc/src/point_cloud_xyzi_radial.cpp index c592851e0..e89ee5a05 100644 --- a/depth_image_proc/src/point_cloud_xyzi_radial.cpp +++ b/depth_image_proc/src/point_cloud_xyzi_radial.cpp @@ -37,6 +37,7 @@ #include "depth_image_proc/visibility.h" +#include #include #include #include @@ -73,7 +74,7 @@ PointCloudXyziRadialNode::PointCloudXyziRadialNode(const rclcpp::NodeOptions & o std::placeholders::_2, std::placeholders::_3)); - // Create publisher with connect callback + // Setup lazy subscriber using publisher connection callback rclcpp::PublisherOptions pub_options; pub_options.event_callbacks.matched_callback = [this](rclcpp::MatchedInfo & s) @@ -98,16 +99,23 @@ PointCloudXyziRadialNode::PointCloudXyziRadialNode(const rclcpp::NodeOptions & o // depth image can use different transport.(e.g. compressedDepth) image_transport::TransportHints depth_hints(this, "raw", "depth_image_transport"); - sub_depth_.subscribe(this, depth_topic, depth_hints.getTransport()); + // Create subscriber with QoS matched to subscribed topic publisher + auto depth_qos_profile = image_proc::getTopicQosProfile(this, depth_topic); + sub_depth_.subscribe(this, depth_topic, depth_hints.getTransport(), depth_qos_profile); // intensity uses normal ros transport hints. image_transport::TransportHints hints(this); - sub_intensity_.subscribe(this, intensity_topic, hints.getTransport()); - sub_info_.subscribe(this, intensity_info_topic); + // Create subscriber with QoS matched to subscribed topic publisher + auto qos_profile = image_proc::getTopicQosProfile(this, intensity_topic); + sub_intensity_.subscribe(this, intensity_topic, hints.getTransport(), qos_profile); + sub_info_.subscribe(this, intensity_info_topic, rclcpp::QoS(10)); } }; + + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); pub_point_cloud_ = create_publisher( - "points", rclcpp::SensorDataQoS(), pub_options); + "points", rclcpp::SystemDefaultsQoS(), pub_options); } void PointCloudXyziRadialNode::imageCb( @@ -115,7 +123,7 @@ void PointCloudXyziRadialNode::imageCb( const Image::ConstSharedPtr & intensity_msg, const CameraInfo::ConstSharedPtr & info_msg) { - auto cloud_msg = std::make_shared(); + auto cloud_msg = std::make_unique(); cloud_msg->header = depth_msg->header; cloud_msg->height = depth_msg->height; cloud_msg->width = depth_msg->width; @@ -143,9 +151,9 @@ void PointCloudXyziRadialNode::imageCb( // Convert Depth Image to Pointcloud if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - convertDepthRadial(depth_msg, cloud_msg, transform_); + convertDepthRadial(depth_msg, *cloud_msg, transform_); } else if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_32FC1) { - convertDepthRadial(depth_msg, cloud_msg, transform_); + convertDepthRadial(depth_msg, *cloud_msg, transform_); } else { RCLCPP_ERROR( get_logger(), "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); @@ -153,13 +161,13 @@ void PointCloudXyziRadialNode::imageCb( } if (intensity_msg->encoding == sensor_msgs::image_encodings::MONO8) { - convertIntensity(intensity_msg, cloud_msg); + convertIntensity(intensity_msg, *cloud_msg); } else if (intensity_msg->encoding == sensor_msgs::image_encodings::MONO16) { - convertIntensity(intensity_msg, cloud_msg); + convertIntensity(intensity_msg, *cloud_msg); } else if (intensity_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - convertIntensity(intensity_msg, cloud_msg); + convertIntensity(intensity_msg, *cloud_msg); } else if (intensity_msg->encoding == sensor_msgs::image_encodings::TYPE_32FC1) { - convertIntensity(intensity_msg, cloud_msg); + convertIntensity(intensity_msg, *cloud_msg); } else { RCLCPP_ERROR( get_logger(), "Intensity image has unsupported encoding [%s]", @@ -167,7 +175,7 @@ void PointCloudXyziRadialNode::imageCb( return; } - pub_point_cloud_->publish(*cloud_msg); + pub_point_cloud_->publish(std::move(cloud_msg)); } } // namespace depth_image_proc diff --git a/depth_image_proc/src/point_cloud_xyzrgb.cpp b/depth_image_proc/src/point_cloud_xyzrgb.cpp index 5ef736d96..0f502be96 100644 --- a/depth_image_proc/src/point_cloud_xyzrgb.cpp +++ b/depth_image_proc/src/point_cloud_xyzrgb.cpp @@ -133,11 +133,15 @@ PointCloudXyzrgbNode::PointCloudXyzrgbNode(const rclcpp::NodeOptions & options) image_transport::TransportHints hints(this); sub_rgb_.subscribe( this, rgb_topic, - hints.getTransport(), rmw_qos_profile_default, sub_opts); - sub_info_.subscribe(this, rgb_info_topic); + hints.getTransport(), + rmw_qos_profile_default, sub_opts); + sub_info_.subscribe(this, rgb_info_topic, rclcpp::QoS(10)); } }; - pub_point_cloud_ = create_publisher("points", rclcpp::SensorDataQoS(), pub_options); + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); + pub_point_cloud_ = create_publisher("points", rclcpp::SystemDefaultsQoS(), + pub_options); } void PointCloudXyzrgbNode::imageCb( @@ -247,7 +251,7 @@ void PointCloudXyzrgbNode::imageCb( color_step = 3; } - auto cloud_msg = std::make_shared(); + auto cloud_msg = std::make_unique(); cloud_msg->header = depth_msg->header; // Use depth image time stamp cloud_msg->height = depth_msg->height; cloud_msg->width = depth_msg->width; @@ -259,9 +263,9 @@ void PointCloudXyzrgbNode::imageCb( // Convert Depth Image to Pointcloud if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - convertDepth(depth_msg, cloud_msg, model_, invalid_depth_); + convertDepth(depth_msg, *cloud_msg, model_, invalid_depth_); } else if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_32FC1) { - convertDepth(depth_msg, cloud_msg, model_, invalid_depth_); + convertDepth(depth_msg, *cloud_msg, model_, invalid_depth_); } else { RCLCPP_ERROR( get_logger(), "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); @@ -270,22 +274,22 @@ void PointCloudXyzrgbNode::imageCb( // Convert RGB if (rgb_msg->encoding == sensor_msgs::image_encodings::RGB8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else if (rgb_msg->encoding == sensor_msgs::image_encodings::BGR8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else if (rgb_msg->encoding == sensor_msgs::image_encodings::BGRA8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else if (rgb_msg->encoding == sensor_msgs::image_encodings::RGBA8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else if (rgb_msg->encoding == sensor_msgs::image_encodings::MONO8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else { RCLCPP_ERROR( get_logger(), "RGB image has unsupported encoding [%s]", rgb_msg->encoding.c_str()); return; } - pub_point_cloud_->publish(*cloud_msg); + pub_point_cloud_->publish(std::move(cloud_msg)); } } // namespace depth_image_proc diff --git a/depth_image_proc/src/point_cloud_xyzrgb_radial.cpp b/depth_image_proc/src/point_cloud_xyzrgb_radial.cpp index 7d2c92477..0694e3f8a 100644 --- a/depth_image_proc/src/point_cloud_xyzrgb_radial.cpp +++ b/depth_image_proc/src/point_cloud_xyzrgb_radial.cpp @@ -118,10 +118,13 @@ PointCloudXyzrgbRadialNode::PointCloudXyzrgbRadialNode(const rclcpp::NodeOptions // rgb uses normal ros transport hints. image_transport::TransportHints hints(this, "raw"); sub_rgb_.subscribe(this, rgb_topic, hints.getTransport()); - sub_info_.subscribe(this, rgb_info_topic); + sub_info_.subscribe(this, rgb_info_topic, rclcpp::QoS(10)); } }; - pub_point_cloud_ = create_publisher("points", rclcpp::SensorDataQoS(), pub_options); + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); + pub_point_cloud_ = create_publisher("points", rclcpp::SystemDefaultsQoS(), + pub_options); } void PointCloudXyzrgbRadialNode::imageCb( @@ -239,7 +242,7 @@ void PointCloudXyzrgbRadialNode::imageCb( color_step = 3; } - auto cloud_msg = std::make_shared(); + auto cloud_msg = std::make_unique(); cloud_msg->header = depth_msg->header; // Use depth image time stamp cloud_msg->height = depth_msg->height; cloud_msg->width = depth_msg->width; @@ -251,9 +254,9 @@ void PointCloudXyzrgbRadialNode::imageCb( // Convert Depth Image to Pointcloud if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - convertDepthRadial(depth_msg, cloud_msg, transform_); + convertDepthRadial(depth_msg, *cloud_msg, transform_); } else if (depth_msg->encoding == sensor_msgs::image_encodings::TYPE_32FC1) { - convertDepthRadial(depth_msg, cloud_msg, transform_); + convertDepthRadial(depth_msg, *cloud_msg, transform_); } else { RCLCPP_ERROR( get_logger(), "Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); @@ -262,22 +265,22 @@ void PointCloudXyzrgbRadialNode::imageCb( // Convert RGB if (rgb_msg->encoding == sensor_msgs::image_encodings::RGB8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else if (rgb_msg->encoding == sensor_msgs::image_encodings::BGR8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else if (rgb_msg->encoding == sensor_msgs::image_encodings::BGRA8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else if (rgb_msg->encoding == sensor_msgs::image_encodings::RGBA8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else if (rgb_msg->encoding == sensor_msgs::image_encodings::MONO8) { - convertRgb(rgb_msg, cloud_msg, red_offset, green_offset, blue_offset, color_step); + convertRgb(rgb_msg, *cloud_msg, red_offset, green_offset, blue_offset, color_step); } else { RCLCPP_ERROR( get_logger(), "RGB image has unsupported encoding [%s]", rgb_msg->encoding.c_str()); return; } - pub_point_cloud_->publish(*cloud_msg); + pub_point_cloud_->publish(std::move(cloud_msg)); } } // namespace depth_image_proc diff --git a/depth_image_proc/src/register.cpp b/depth_image_proc/src/register.cpp index 6269f4cf4..7db288436 100644 --- a/depth_image_proc/src/register.cpp +++ b/depth_image_proc/src/register.cpp @@ -38,9 +38,9 @@ #include "Eigen/Geometry" #include "depth_image_proc/visibility.h" #include "image_geometry/pinhole_camera_model.hpp" -#include "message_filters/subscriber.h" -#include "message_filters/synchronizer.h" -#include "message_filters/sync_policies/approximate_time.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/synchronizer.hpp" +#include "message_filters/sync_policies/approximate_time.hpp" #include "tf2_ros/buffer.h" #include "tf2_ros/transform_listener.h" @@ -92,7 +92,7 @@ class RegisterNode : public rclcpp::Node template void convert( const Image::ConstSharedPtr & depth_msg, - const Image::SharedPtr & registered_msg, + Image & registered_msg, const Eigen::Affine3d & depth_to_rgb); }; @@ -139,8 +139,8 @@ RegisterNode::RegisterNode(const rclcpp::NodeOptions & options) std::string topic = node_base->resolve_topic_or_service_name("depth/image_rect", false); image_transport::TransportHints hints(this, "raw", "depth_image_transport"); sub_depth_image_.subscribe(this, topic, hints.getTransport()); - sub_depth_info_.subscribe(this, "depth/camera_info"); - sub_rgb_info_.subscribe(this, "rgb/camera_info"); + sub_depth_info_.subscribe(this, "depth/camera_info", rclcpp::QoS(10)); + sub_rgb_info_.subscribe(this, "rgb/camera_info", rclcpp::QoS(10)); } }; // For compressed topics to remap appropriately, we need to pass a @@ -149,6 +149,8 @@ RegisterNode::RegisterNode(const rclcpp::NodeOptions & options) std::string topic = node_base->resolve_topic_or_service_name("depth_registered/image_rect", false); + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); pub_registered_ = image_transport::create_camera_publisher( this, topic, @@ -183,7 +185,7 @@ void RegisterNode::imageCb( /// don't call publish() in this cb. What's going on roscpp? } - auto registered_msg = std::make_shared(); + auto registered_msg = std::make_unique(); registered_msg->header.stamp = use_rgb_timestamp_ ? rgb_info_msg->header.stamp : depth_image_msg->header.stamp; registered_msg->header.frame_id = rgb_info_msg->header.frame_id; @@ -195,9 +197,9 @@ void RegisterNode::imageCb( // step and data set in convert(), depend on depth data type if (depth_image_msg->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { - convert(depth_image_msg, registered_msg, depth_to_rgb); + convert(depth_image_msg, *registered_msg, depth_to_rgb); } else if (depth_image_msg->encoding == sensor_msgs::image_encodings::TYPE_32FC1) { - convert(depth_image_msg, registered_msg, depth_to_rgb); + convert(depth_image_msg, *registered_msg, depth_to_rgb); } else { RCLCPP_ERROR( get_logger(), "Depth image has unsupported encoding [%s]", @@ -206,24 +208,24 @@ void RegisterNode::imageCb( } // Registered camera info is the same as the RGB info, but uses the depth timestamp - auto registered_info_msg = std::make_shared(*rgb_info_msg); + auto registered_info_msg = std::make_unique(*rgb_info_msg); registered_info_msg->header.stamp = registered_msg->header.stamp; - pub_registered_.publish(registered_msg, registered_info_msg); + pub_registered_.publish(std::move(registered_msg), std::move(registered_info_msg)); } template void RegisterNode::convert( const Image::ConstSharedPtr & depth_msg, - const Image::SharedPtr & registered_msg, + Image & registered_msg, const Eigen::Affine3d & depth_to_rgb) { // Allocate memory for registered depth image - registered_msg->step = registered_msg->width * sizeof(T); - registered_msg->data.resize(registered_msg->height * registered_msg->step); + registered_msg.step = registered_msg.width * sizeof(T); + registered_msg.data.resize(registered_msg.height * registered_msg.step); // data is already zero-filled in the uint16 case, // but for floats we want to initialize everything to NaN. - DepthTraits::initializeBuffer(registered_msg->data); + DepthTraits::initializeBuffer(registered_msg.data); // Extract all the parameters we need double inv_depth_fx = 1.0 / depth_model_.fx(); @@ -239,7 +241,7 @@ void RegisterNode::convert( // registered image const T * depth_row = reinterpret_cast(&depth_msg->data[0]); int row_step = depth_msg->step / sizeof(T); - T * registered_data = reinterpret_cast(®istered_msg->data[0]); + T * registered_data = reinterpret_cast(®istered_msg.data[0]); int raw_index = 0; for (unsigned v = 0; v < depth_msg->height; ++v, depth_row += row_step) { for (unsigned u = 0; u < depth_msg->width; ++u, ++raw_index) { @@ -267,13 +269,13 @@ void RegisterNode::convert( int u_rgb = (rgb_fx * xyz_rgb.x() + rgb_Tx) * inv_Z + rgb_cx + 0.5; int v_rgb = (rgb_fy * xyz_rgb.y() + rgb_Ty) * inv_Z + rgb_cy + 0.5; - if (u_rgb < 0 || u_rgb >= static_cast(registered_msg->width) || - v_rgb < 0 || v_rgb >= static_cast(registered_msg->height)) + if (u_rgb < 0 || u_rgb >= static_cast(registered_msg.width) || + v_rgb < 0 || v_rgb >= static_cast(registered_msg.height)) { continue; } - T & reg_depth = registered_data[v_rgb * registered_msg->width + u_rgb]; + T & reg_depth = registered_data[v_rgb * registered_msg.width + u_rgb]; T new_depth = DepthTraits::fromMeters(xyz_rgb.z()); // Validity and Z-buffer checks if (!DepthTraits::valid(reg_depth) || reg_depth > new_depth) { @@ -303,15 +305,15 @@ void RegisterNode::convert( int u_rgb_2 = (rgb_fx * xyz_rgb_2.x() + rgb_Tx) * inv_Z + rgb_cx + 0.5; int v_rgb_2 = (rgb_fy * xyz_rgb_2.y() + rgb_Ty) * inv_Z + rgb_cy + 0.5; - if (u_rgb_1 < 0 || u_rgb_2 >= static_cast(registered_msg->width) || - v_rgb_1 < 0 || v_rgb_2 >= static_cast(registered_msg->height)) + if (u_rgb_1 < 0 || u_rgb_2 >= static_cast(registered_msg.width) || + v_rgb_1 < 0 || v_rgb_2 >= static_cast(registered_msg.height)) { continue; } for (int nv = v_rgb_1; nv <= v_rgb_2; ++nv) { for (int nu = u_rgb_1; nu <= u_rgb_2; ++nu) { - T & reg_depth = registered_data[nv * registered_msg->width + nu]; + T & reg_depth = registered_data[nv * registered_msg.width + nu]; T new_depth = DepthTraits::fromMeters(0.5 * (xyz_rgb_1.z() + xyz_rgb_2.z())); // Validity and Z-buffer checks if (!DepthTraits::valid(reg_depth) || reg_depth > new_depth) { diff --git a/image_pipeline/CHANGELOG.rst b/image_pipeline/CHANGELOG.rst index 101451fab..59b1e88e5 100644 --- a/image_pipeline/CHANGELOG.rst +++ b/image_pipeline/CHANGELOG.rst @@ -2,6 +2,44 @@ Changelog for package image_pipeline ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ +* Finish QoS updates (`#1019 `_) + This implements the remainder of `#847 `_: + - Make sure publishers default to system defaults (reliable) + - Add QoS overriding where possible (some of the image_transport / + message_filters stuff doesn't really support that) + - Use the matching heuristic for subscribers consistently +* Contributors: Michael Ferguson + +6.0.2 (2024-07-23) +------------------ + +6.0.1 (2024-07-22) +------------------ + +6.0.0 (2024-05-27) +------------------ + +5.0.1 (2024-03-26) +------------------ +* DisparityNode: replace full_dp parameter with sgbm_mode (`#945 `_) + Previously, only the SGBM and HH modes were allowed +* add missing tutorial page (`#939 `_) + This was supposed to be part of `#938 `_ +* unified changelog, add missing image, deduplicate tutorials (`#938 `_) + Last bit of documentation updates - putting together a single changelog + summary for the whole release (rather than scattering among packages). + Unified the camera_info tutorial so it isn't duplicated. Added a missing + image from image_rotate (was on local disk, but hadn't committed it) +* fix documentation links (`#933 `_) +* migrate image_pipeline docs (`#929 `_) + * Migrates image_pipeline overview page + * Migrates CameraInfo wiki page + * Adds links to the other packages in this stack + * Updates depth_image_proc and image_proc to have the overview page properly named and in the TOC +* Contributors: Michael Ferguson, Pablo David Aranda Rodríguez + 5.0.0 (2024-01-24) ------------------ * add myself as a maintainer (`#846 `_) diff --git a/image_pipeline/doc/tutorials.rst b/image_pipeline/doc/tutorials.rst index b5e83a32e..1dffb7a50 100644 --- a/image_pipeline/doc/tutorials.rst +++ b/image_pipeline/doc/tutorials.rst @@ -24,3 +24,83 @@ camera info topic. An example: * The fully resolved name for the camera info is now ``my_camera/camera_info``. * If your camera driver actually publishes ``another_ns/camera_info``, then you would have to remap ``my_camera/camera_info`` to ``another_ns/camera_info``. + +.. _`Using QoS Overrides`: + +Using QoS Overrides +------------------- +Most components in image_pipeline follow the Quality of Service (QoS) recommendations +of `REP-2003 `_ by default. This means that +subscribers are configured with the "Sensor Data" QoS (which uses "best effort"), +and publishers are configured with "System Default" QoS (which uses "reliable" transport). + +These QoS settings work well for many applications, but can be overridden using the +standard features of recent ROS 2 releases. This involves adding additional parameters +to your YAML or launch file. For example, we could update the +`image_publisher_file.launch.py` launch file to change the QoS settings: + +.. code-block:: python + + import os + + from ament_index_python.packages import get_package_share_directory + from launch import LaunchDescription + import launch_ros.actions + + def generate_launch_description(): + filename = os.path.join(get_package_share_directory('image_publisher'), 'launch', + 'splash.png') + return LaunchDescription([ + + launch_ros.actions.Node( + package='image_publisher', executable='image_publisher_node', output='screen', + arguments=[filename], + parameters=[{ + 'qos_overrides': { + '/camera/image_raw': { + 'publisher': { + 'reliability': 'best_effort', + 'history': 'keep_last', + 'depth': 100, + } + } + }, + }], + remappings=[('image_raw', '/camera/image_raw'), + ('camera_info', '/camera/camera_info')]), + ]) + +If we then run the launch file, we can see our settings are applied: + +.. code-block:: bash + + $ ros2 topic info /camera/image_raw -v + Type: sensor_msgs/msg/Image + + Publisher count: 1 + + Node name: ImagePublisher + Node namespace: / + Topic type: sensor_msgs/msg/Image + Topic type hash: RIHS01_d31d41a9a4c4bc8eae9be757b0beed306564f7526c88ea6a4588fb9582527d47 + Endpoint type: PUBLISHER + GID: 01.10.bf.bd.b7.85.a8.33.58.34.5c.ae.00.00.17.03 + QoS profile: + Reliability: BEST_EFFORT + History (Depth): KEEP_LAST (100) + Durability: VOLATILE + Lifespan: Infinite + Deadline: Infinite + Liveliness: AUTOMATIC + Liveliness lease duration: Infinite + + Subscription count: 0 + +A few things to note: + + * The topic name (``/camera/image_raw``) must be the fully resolved topic name, + and therefore we use the remapped topic name rather than the name in the code + for the component. + * Only ``reliability``, ``history``, and ``depth`` can be overwritten. + +For more information on QoS overrides, see the `design doc `_. diff --git a/image_pipeline/package.xml b/image_pipeline/package.xml index 46eae3409..cf9ebef1d 100644 --- a/image_pipeline/package.xml +++ b/image_pipeline/package.xml @@ -2,7 +2,7 @@ image_pipeline - 5.0.0 + 6.0.3 image_pipeline fills the gap between getting raw images from a camera driver and higher-level vision processing. Vincent Rabaud diff --git a/image_proc/CHANGELOG.rst b/image_proc/CHANGELOG.rst index 1160e5388..607808d35 100644 --- a/image_proc/CHANGELOG.rst +++ b/image_proc/CHANGELOG.rst @@ -2,6 +2,69 @@ Changelog for package image_proc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ +* Publish using unique ptr (`#1016 `_) + Prevents doing an extra copy of the data when using intra-process + communication. +* Finish QoS updates (`#1019 `_) + This implements the remainder of `#847 `_: + - Make sure publishers default to system defaults (reliable) + - Add QoS overriding where possible (some of the image_transport / + message_filters stuff doesn't really support that) + - Use the matching heuristic for subscribers consistently +* Updated deprecated rcpputils::path (`#1014 `_) +* Contributors: Alejandro Hernández Cordero, Błażej Sowa, Michael Ferguson + +6.0.2 (2024-07-23) +------------------ + +6.0.1 (2024-07-22) +------------------ + +6.0.0 (2024-05-27) +------------------ + +5.0.1 (2024-03-26) +------------------ +* Fix parameter names in components.rst (`#959 `_) + In the docs for `image_proc::CropDecimateNode` , change the parameter + names `x_offset` and `y_offset` to `offset_x` and `offset_y`, matching + the actual names of parameters defined in crop_decimate.cpp +* fix image publisher remapping (`#941 `_) + Addresses `#940 `_ - fixes the compressed/etc topic remapping for publishers +* unified changelog, add missing image, deduplicate tutorials (`#938 `_) + Last bit of documentation updates - putting together a single changelog + summary for the whole release (rather than scattering among packages). + Unified the camera_info tutorial so it isn't duplicated. Added a missing + image from image_rotate (was on local disk, but hadn't committed it) +* Add TrackMarkerNode to image_proc (`#930 `_) + Converts sensors_msgs/Image into geometry_msg/PoseStamped using OpenCV Aruco marker detection. +* migrate image_pipeline docs (`#929 `_) + * Migrates image_pipeline overview page + * Migrates CameraInfo wiki page + * Adds links to the other packages in this stack + * Updates depth_image_proc and image_proc to have the overview page properly named and in the TOC +* migrate depth_image_proc docs (`#926 `_) +* fixup bash command rendering (`#927 `_) + I didn't actually rebuild with the suggestions in `#925 `_ - but this is + actually proper rendering (even my three-ticks version wasn't quite + pretty) +* migrate and update image_proc docs (`#925 `_) + * move component documentation from ros wiki, update for various changes + * add tutorial on how to run components + * update tutorial on debayer/rectify to use launch file + * remove image_proc node, it has always been completely broken and the + launch file has the same (but working) functionality + * update launch file to support namespace parameter for tutorial +* QoS improvements for image_proc and stereo_image_proc (`#922 `_) + First part of `#847 `_ + * Add QoS overrides for all publishers (in the new, standard way) + * stereo_image_proc: Default subscriber QoS to SensorDataQoS + * Clean up some of the comments around lazy subscribers, make them more + consistent across nodes +* Contributors: Michael Ferguson, Noah Mollerstuen + 5.0.0 (2024-01-24) ------------------ * Port image_proc test to ROS 2 (`#910 `_) diff --git a/image_proc/doc/tutorials.rst b/image_proc/doc/tutorials.rst index b13fbc2a0..f86ef123c 100644 --- a/image_proc/doc/tutorials.rst +++ b/image_proc/doc/tutorials.rst @@ -75,6 +75,10 @@ Remapping camera_info Topics ---------------------------- See `tutorial in image_pipline `_. +Using QoS Overrides +------------------- +See `tutorial in image_pipline `_. + .. _Using image_proc Launch File: Using image_proc Launch File diff --git a/image_proc/include/image_proc/utils.hpp b/image_proc/include/image_proc/utils.hpp index 6be4b88f7..ee32ef21d 100644 --- a/image_proc/include/image_proc/utils.hpp +++ b/image_proc/include/image_proc/utils.hpp @@ -40,6 +40,7 @@ namespace image_proc { +inline rmw_qos_profile_t getTopicQosProfile(rclcpp::Node * node, const std::string & topic) { /** diff --git a/image_proc/package.xml b/image_proc/package.xml index 0c06ea17c..9457ed952 100644 --- a/image_proc/package.xml +++ b/image_proc/package.xml @@ -2,7 +2,7 @@ image_proc - 5.0.0 + 6.0.3 Single image rectification and color processing. Vincent Rabaud diff --git a/image_proc/src/crop_decimate.cpp b/image_proc/src/crop_decimate.cpp index a53ab9ac2..29a18d5e0 100644 --- a/image_proc/src/crop_decimate.cpp +++ b/image_proc/src/crop_decimate.cpp @@ -152,12 +152,10 @@ CropDecimateNode::CropDecimateNode(const rclcpp::NodeOptions & options) } }; - // Allow overriding QoS settings (history, depth, reliability) + // Create publisher - allow overriding QoS settings (history, depth, reliability) pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); - - // Create publisher with QoS matched to subscribed topic publisher - auto qos_profile = getTopicQosProfile(this, image_topic_); - pub_ = image_transport::create_camera_publisher(this, pub_topic, qos_profile, pub_options); + pub_ = image_transport::create_camera_publisher(this, pub_topic, rmw_qos_profile_default, + pub_options); } void CropDecimateNode::imageCb( @@ -327,11 +325,11 @@ void CropDecimateNode::imageCb( // Create output Image message /// @todo Could save copies by allocating this above and having output.image alias it - sensor_msgs::msg::Image::SharedPtr out_image = output.toImageMsg(); + auto out_image = std::make_unique(); + output.toImageMsg(*out_image); // Create updated CameraInfo message - sensor_msgs::msg::CameraInfo::SharedPtr out_info = - std::make_shared(*info_msg); + auto out_info = std::make_unique(*info_msg); int binning_x = std::max(static_cast(info_msg->binning_x), 1); int binning_y = std::max(static_cast(info_msg->binning_y), 1); out_info->binning_x = binning_x * decimation_x_; @@ -353,7 +351,7 @@ void CropDecimateNode::imageCb( out_info->header.frame_id = target_frame_id_; } - pub_.publish(out_image, out_info); + pub_.publish(std::move(out_image), std::move(out_info)); } } // namespace image_proc diff --git a/image_proc/src/crop_non_zero.cpp b/image_proc/src/crop_non_zero.cpp index 100a23ed7..772484334 100644 --- a/image_proc/src/crop_non_zero.cpp +++ b/image_proc/src/crop_non_zero.cpp @@ -80,19 +80,16 @@ CropNonZeroNode::CropNonZeroNode(const rclcpp::NodeOptions & options) } }; - // Allow overriding QoS settings (history, depth, reliability) + // Create publisher - allow overriding QoS settings (history, depth, reliability) pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); - - // Create publisher with QoS matched to subscribed topic publisher - auto qos_profile = getTopicQosProfile(this, image_topic_); - pub_ = image_transport::create_publisher(this, pub_topic, qos_profile, pub_options); + pub_ = image_transport::create_publisher(this, pub_topic, rmw_qos_profile_default, pub_options); } void CropNonZeroNode::imageCb(const sensor_msgs::msg::Image::ConstSharedPtr & raw_msg) { - cv_bridge::CvImagePtr cv_ptr; + cv_bridge::CvImageConstPtr cv_ptr; try { - cv_ptr = cv_bridge::toCvCopy(raw_msg); + cv_ptr = cv_bridge::toCvShare(raw_msg); } catch (cv_bridge::Exception & e) { RCLCPP_ERROR(this->get_logger(), "cv_bridge exception: %s", e.what()); return; @@ -135,7 +132,10 @@ void CropNonZeroNode::imageCb(const sensor_msgs::msg::Image::ConstSharedPtr & ra out_msg.encoding = raw_msg->encoding; out_msg.image = cv_ptr->image(r); - pub_.publish(out_msg.toImageMsg()); + auto out_image = std::make_unique(); + out_msg.toImageMsg(*out_image); + + pub_.publish(std::move(out_image)); } } // namespace image_proc diff --git a/image_proc/src/debayer.cpp b/image_proc/src/debayer.cpp index fe4faf9ec..8eddbd77e 100644 --- a/image_proc/src/debayer.cpp +++ b/image_proc/src/debayer.cpp @@ -84,13 +84,12 @@ DebayerNode::DebayerNode(const rclcpp::NodeOptions & options) } }; - // Allow overriding QoS settings (history, depth, reliability) + // Create publisher - allow overriding QoS settings (history, depth, reliability) pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); - - // Create publisher with QoS matched to subscribed topic publisher - auto qos_profile = getTopicQosProfile(this, image_topic_); - pub_mono_ = image_transport::create_publisher(this, mono_topic, qos_profile, pub_options); - pub_color_ = image_transport::create_publisher(this, color_topic, qos_profile, pub_options); + pub_mono_ = image_transport::create_publisher(this, mono_topic, rmw_qos_profile_default, + pub_options); + pub_color_ = image_transport::create_publisher(this, color_topic, rmw_qos_profile_default, + pub_options); } void DebayerNode::imageCb(const sensor_msgs::msg::Image::ConstSharedPtr & raw_msg) @@ -104,7 +103,7 @@ void DebayerNode::imageCb(const sensor_msgs::msg::Image::ConstSharedPtr & raw_ms // First publish to mono if needed if (pub_mono_.getNumSubscribers()) { if (sensor_msgs::image_encodings::isMono(raw_msg->encoding)) { - pub_mono_.publish(raw_msg); + pub_mono_.publish(std::move(raw_msg)); } else { if ((bit_depth != 8) && (bit_depth != 16)) { RCLCPP_WARN( @@ -114,18 +113,21 @@ void DebayerNode::imageCb(const sensor_msgs::msg::Image::ConstSharedPtr & raw_ms } else { // Use cv_bridge to convert to Mono. If a type is not supported, // it will error out there - sensor_msgs::msg::Image::SharedPtr gray_msg; + auto gray_msg = std::make_unique(); + cv_bridge::CvImagePtr cv_image; try { if (bit_depth == 8) { - gray_msg = - cv_bridge::toCvCopy(raw_msg, sensor_msgs::image_encodings::MONO8)->toImageMsg(); + cv_image = + cv_bridge::toCvCopy(raw_msg, sensor_msgs::image_encodings::MONO8); } else { - gray_msg = - cv_bridge::toCvCopy(raw_msg, sensor_msgs::image_encodings::MONO16)->toImageMsg(); + cv_image = + cv_bridge::toCvCopy(raw_msg, sensor_msgs::image_encodings::MONO16); } - pub_mono_.publish(gray_msg); + cv_image->toImageMsg(*gray_msg); + + pub_mono_.publish(std::move(gray_msg)); } catch (cv_bridge::Exception & e) { RCLCPP_WARN(this->get_logger(), "cv_bridge conversion error: '%s'", e.what()); } @@ -155,8 +157,7 @@ void DebayerNode::imageCb(const sensor_msgs::msg::Image::ConstSharedPtr & raw_ms raw_msg->height, raw_msg->width, CV_MAKETYPE(type, 1), const_cast(&raw_msg->data[0]), raw_msg->step); - sensor_msgs::msg::Image::SharedPtr color_msg = - std::make_shared(); + auto color_msg = std::make_unique(); color_msg->header = raw_msg->header; color_msg->height = raw_msg->height; color_msg->width = raw_msg->width; @@ -219,16 +220,17 @@ void DebayerNode::imageCb(const sensor_msgs::msg::Image::ConstSharedPtr & raw_ms cv::cvtColor(bayer, color, code); } - pub_color_.publish(color_msg); + pub_color_.publish(std::move(color_msg)); } else if (raw_msg->encoding == sensor_msgs::image_encodings::YUV422 || // NOLINT raw_msg->encoding == sensor_msgs::image_encodings::YUV422_YUY2) { // Use cv_bridge to convert to BGR8 - sensor_msgs::msg::Image::SharedPtr color_msg; + auto color_msg = std::make_unique(); try { - color_msg = cv_bridge::toCvCopy(raw_msg, sensor_msgs::image_encodings::BGR8)->toImageMsg(); - pub_color_.publish(color_msg); + auto cv_image = cv_bridge::toCvCopy(raw_msg, sensor_msgs::image_encodings::BGR8); + cv_image->toImageMsg(*color_msg); + pub_color_.publish(std::move(color_msg)); } catch (const cv_bridge::Exception & e) { RCLCPP_WARN(this->get_logger(), "cv_bridge conversion error: '%s'", e.what()); } diff --git a/image_proc/src/rectify.cpp b/image_proc/src/rectify.cpp index 24d37768c..d1ada0c97 100644 --- a/image_proc/src/rectify.cpp +++ b/image_proc/src/rectify.cpp @@ -81,12 +81,10 @@ RectifyNode::RectifyNode(const rclcpp::NodeOptions & options) } }; - // Allow overriding QoS settings (history, depth, reliability) + // Create publisher - allow overriding QoS settings (history, depth, reliability) pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); - - // Create publisher with QoS matched to subscribed topic publisher - auto qos_profile = getTopicQosProfile(this, image_topic_); - pub_rect_ = image_transport::create_publisher(this, "image_rect", qos_profile, pub_options); + pub_rect_ = image_transport::create_publisher(this, "image_rect", rmw_qos_profile_default, + pub_options); } void RectifyNode::imageCb( @@ -154,9 +152,9 @@ void RectifyNode::imageCb( model_.rectifyImage(image, rect, interpolation_); // Allocate new rectified image message - sensor_msgs::msg::Image::SharedPtr rect_msg = - cv_bridge::CvImage(image_msg->header, image_msg->encoding, rect).toImageMsg(); - pub_rect_.publish(rect_msg); + auto rect_msg = std::make_unique(); + cv_bridge::CvImage(image_msg->header, image_msg->encoding, rect).toImageMsg(*rect_msg); + pub_rect_.publish(std::move(rect_msg)); TRACEPOINT( image_proc_rectify_fini, diff --git a/image_proc/src/resize.cpp b/image_proc/src/resize.cpp index fdd908df1..bca4a1d33 100644 --- a/image_proc/src/resize.cpp +++ b/image_proc/src/resize.cpp @@ -89,13 +89,10 @@ ResizeNode::ResizeNode(const rclcpp::NodeOptions & options) } }; - // Allow overriding QoS settings (history, depth, reliability) + // Create publisher - allow overriding QoS settings (history, depth, reliability) pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); - - // Create publisher with QoS matched to subscribed topic publisher - auto qos_profile = getTopicQosProfile(this, image_topic_); pub_image_ = - image_transport::create_camera_publisher(this, pub_topic, qos_profile, pub_options); + image_transport::create_camera_publisher(this, pub_topic, rmw_qos_profile_default, pub_options); } void ResizeNode::imageCb( @@ -132,8 +129,7 @@ void ResizeNode::imageCb( cv::resize(cv_ptr->image, scaled_cv_.image, cv::Size(width, height), 0, 0, interpolation_); } - sensor_msgs::msg::CameraInfo::SharedPtr dst_info_msg = - std::make_shared(*info_msg); + auto dst_info_msg = std::make_unique(*info_msg); double scale_y; double scale_x; @@ -166,9 +162,13 @@ void ResizeNode::imageCb( dst_info_msg->roi.width = static_cast(dst_info_msg->roi.width * scale_x); dst_info_msg->roi.height = static_cast(dst_info_msg->roi.height * scale_y); + auto dst_image_msg = std::make_unique(); + scaled_cv_.header = image_msg->header; scaled_cv_.encoding = image_msg->encoding; - pub_image_.publish(*scaled_cv_.toImageMsg(), *dst_info_msg); + scaled_cv_.toImageMsg(*dst_image_msg); + + pub_image_.publish(std::move(dst_image_msg), std::move(dst_info_msg)); TRACEPOINT( image_proc_resize_fini, diff --git a/image_proc/src/track_marker.cpp b/image_proc/src/track_marker.cpp index c0586e475..e103de067 100644 --- a/image_proc/src/track_marker.cpp +++ b/image_proc/src/track_marker.cpp @@ -86,10 +86,8 @@ TrackMarkerNode::TrackMarkerNode(const rclcpp::NodeOptions & options) } }; - // Allow overriding QoS settings (history, depth, reliability) + // Create publisher - allow overriding QoS settings (history, depth, reliability) pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); - - // Create publisher pub_ = this->create_publisher( "tracked_pose", 10, pub_options); } diff --git a/image_proc/test/rostest.cpp b/image_proc/test/rostest.cpp index f84594af0..633a63cfa 100644 --- a/image_proc/test/rostest.cpp +++ b/image_proc/test/rostest.cpp @@ -32,6 +32,7 @@ #include +#include #include #include @@ -65,9 +66,9 @@ class ImageProcTest topic_color = camera_ns + "image_color"; topic_rect_color = camera_ns + "image_rect_color"; - const rcpputils::fs::path base{_SRC_RESOURCES_DIR_PATH}; - const rcpputils::fs::path raw_image_file = base / "logo.png"; - const rcpputils::fs::path cam_info_file = base / "calibration_file.ini"; + const std::filesystem::path base{_SRC_RESOURCES_DIR_PATH}; + const std::filesystem::path raw_image_file = base / "logo.png"; + const std::filesystem::path cam_info_file = base / "calibration_file.ini"; /// @todo Test variety of encodings for raw image (bayer, mono, color) cv::Mat img = cv::imread(raw_image_file.string(), 0); diff --git a/image_publisher/CHANGELOG.rst b/image_publisher/CHANGELOG.rst index aac76262e..7c651a1e9 100644 --- a/image_publisher/CHANGELOG.rst +++ b/image_publisher/CHANGELOG.rst @@ -2,6 +2,71 @@ Changelog for package image_publisher ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ +* Publish using unique ptr (`#1016 `_) + Prevents doing an extra copy of the data when using intra-process + communication. +* Finish QoS updates (`#1019 `_) + This implements the remainder of `#847 `_: + - Make sure publishers default to system defaults (reliable) + - Add QoS overriding where possible (some of the image_transport / + message_filters stuff doesn't really support that) + - Use the matching heuristic for subscribers consistently +* Contributors: Błażej Sowa, Michael Ferguson + +6.0.2 (2024-07-23) +------------------ + +6.0.1 (2024-07-22) +------------------ +* [rolling] image_publisher: Fix loading of the camera info parameters on startup (`#983 `_) + As described in + https://github.com/ros-perception/image_pipeline/issues/965 camera info + is not loaded from the file on node initialization, but only when the + parameter is reloaded. + This PR resolves this issue and should be straightforward to port it to + `Humble`, `Iron` and `Jazzy`. +* [rolling] image_publisher: add field of view parameter (`#985 `_) + Currently, the default value for focal length when no camera info is + provided defaults to `1.0` rendering whole approximate intrinsics and + projection matrices useless. Based on [this + article](https://learnopencv.com/approximate-focal-length-for-webcams-and-cell-phone-cameras/), + I propose a better approximation of the focal length based on the field + of view of the camera. + For most of the use cases, users will either know the field of view of + the camera the used, or they already calibrated it ahead of time. + If there is some documentation to fill. please let me know. + This PR should be straightforward to port it to `Humble`, `Iron` and + `Jazzy`. + --------- + Co-authored-by: Alejandro Hernández Cordero +* Contributors: Krzysztof Wojciechowski + +6.0.0 (2024-05-27) +------------------ +* [rolling] image_publisher: Fix image, constantly flipping when static image is published (`#986 `_) + Continuation of + https://github.com/ros-perception/image_pipeline/pull/984. + When publishing video stream from a camera, the image was flipped + correctly. Yet for a static image, which was loaded once, the flip + function was applied every time `ImagePublisher::doWork()` was called, + resulting in the published image being flipped back and forth all the + time. + This PR should be straightforward to port it to `Humble`, `Iron` and + `Jazzy`. +* Contributors: Krzysztof Wojciechowski + +5.0.1 (2024-03-26) +------------------ +* unified changelog, add missing image, deduplicate tutorials (`#938 `_) + Last bit of documentation updates - putting together a single changelog + summary for the whole release (rather than scattering among packages). + Unified the camera_info tutorial so it isn't duplicated. Added a missing + image from image_rotate (was on local disk, but hadn't committed it) +* add docs for image_rotate/publisher (`#936 `_) +* Contributors: Michael Ferguson + 5.0.0 (2024-01-24) ------------------ * Removed cfg files related with ROS 1 parameters (`#911 `_) diff --git a/image_publisher/doc/components.rst b/image_publisher/doc/components.rst index afd7fcf5e..e381edfc2 100644 --- a/image_publisher/doc/components.rst +++ b/image_publisher/doc/components.rst @@ -14,6 +14,7 @@ Published Topics Parameters ^^^^^^^^^^ * **filename** (string, default: ""): Name of image file to be published. + * **field_of_view** (double, default: 0): Camera field of view (deg) used to calculate focal length for camera info topic. * **flip_horizontal** (bool, default: false): Flip output image horizontally. * **flip_vertical** (bool, default: false): Flip output image vertically. * **frame_id** (string, default: "camera") Frame id inserted in published diff --git a/image_publisher/include/image_publisher/image_publisher.hpp b/image_publisher/include/image_publisher/image_publisher.hpp index 6c22b0199..1ad1dcdea 100644 --- a/image_publisher/include/image_publisher/image_publisher.hpp +++ b/image_publisher/include/image_publisher/image_publisher.hpp @@ -65,8 +65,10 @@ class ImagePublisher : public rclcpp::Node rclcpp::Node::OnSetParametersCallbackHandle::SharedPtr on_set_parameters_callback_handle_; std::string filename_; + double field_of_view_; bool flip_horizontal_; bool flip_vertical_; + bool image_flipped_; bool retry_; // If enabled will retry loading image from the filename_ int timeout_; // Time after which retrying starts diff --git a/image_publisher/package.xml b/image_publisher/package.xml index 23f15530f..f3bbc02b6 100644 --- a/image_publisher/package.xml +++ b/image_publisher/package.xml @@ -2,7 +2,7 @@ image_publisher - 5.0.0 + 6.0.3 Contains a node publish an image stream from single image file diff --git a/image_publisher/src/image_publisher.cpp b/image_publisher/src/image_publisher.cpp index 1f8068de4..44a4a514b 100644 --- a/image_publisher/src/image_publisher.cpp +++ b/image_publisher/src/image_publisher.cpp @@ -31,7 +31,9 @@ // ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. +#include #include +#include #include #include #include @@ -58,8 +60,12 @@ ImagePublisher::ImagePublisher( // fully expanded and remapped topic name to image_transport auto node_base = this->get_node_base_interface(); std::string topic_name = node_base->resolve_topic_or_service_name("image_raw", false); - pub_ = image_transport::create_camera_publisher(this, topic_name); + rclcpp::PublisherOptions pub_options; + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); + pub_ = image_transport::create_camera_publisher(this, topic_name, rmw_qos_profile_default, + pub_options); + field_of_view_ = this->declare_parameter("field_of_view", static_cast(0)); flip_horizontal_ = this->declare_parameter("flip_horizontal", false); flip_vertical_ = this->declare_parameter("flip_vertical", false); frame_id_ = this->declare_parameter("frame_id", std::string("camera")); @@ -71,36 +77,48 @@ ImagePublisher::ImagePublisher( auto param_change_callback = [this](std::vector parameters) -> rcl_interfaces::msg::SetParametersResult { + bool call_init = false; + bool call_reconfigure = false; + auto result = rcl_interfaces::msg::SetParametersResult(); result.successful = true; for (auto parameter : parameters) { if (parameter.get_name() == "filename") { filename_ = parameter.as_string(); RCLCPP_INFO(get_logger(), "Reset filename as '%s'", filename_.c_str()); - ImagePublisher::onInit(); - return result; + call_init = true; + } else if (parameter.get_name() == "field_of_view") { + field_of_view_ = parameter.as_double(); + RCLCPP_INFO(get_logger(), "Reset field_of_view as '%f'", field_of_view_); + call_init = true; } else if (parameter.get_name() == "flip_horizontal") { flip_horizontal_ = parameter.as_bool(); RCLCPP_INFO(get_logger(), "Reset flip_horizontal as '%d'", flip_horizontal_); - ImagePublisher::onInit(); - return result; + call_init = true; } else if (parameter.get_name() == "flip_vertical") { flip_vertical_ = parameter.as_bool(); RCLCPP_INFO(get_logger(), "Reset flip_vertical as '%d'", flip_vertical_); - ImagePublisher::onInit(); - return result; + call_init = true; } else if (parameter.get_name() == "frame_id") { frame_id_ = parameter.as_string(); RCLCPP_INFO(get_logger(), "Reset frame_id as '%s'", frame_id_.c_str()); } else if (parameter.get_name() == "publish_rate") { publish_rate_ = parameter.as_double(); RCLCPP_INFO(get_logger(), "Reset publish_rate as '%lf'", publish_rate_); + call_reconfigure = true; } else if (parameter.get_name() == "camera_info_url") { camera_info_url_ = parameter.as_string(); - RCLCPP_INFO(get_logger(), "Reset camera_info_url as '%s'", camera_info_url_.c_str()); + RCLCPP_INFO(get_logger(), "Reset camera_info_rul as '%s'", camera_info_url_.c_str()); + call_reconfigure = true; } } - ImagePublisher::reconfigureCallback(); + // reconfigureCallback() is called within onInit() so there is no need to call it twice + if (call_reconfigure && !call_init) { + ImagePublisher::reconfigureCallback(); + } else if (call_init) { + ImagePublisher::onInit(); + } + return result; }; on_set_parameters_callback_handle_ = this->add_on_set_parameters_callback(param_change_callback); @@ -144,19 +162,23 @@ void ImagePublisher::doWork() if (!cap_.read(image_)) { cap_.set(cv::CAP_PROP_POS_FRAMES, 0); } + image_flipped_ = false; } - if (flip_image_) { + if (flip_image_ && !image_flipped_) { cv::flip(image_, image_, flip_value_); + image_flipped_ = true; } - sensor_msgs::msg::Image::SharedPtr out_img = - cv_bridge::CvImage(std_msgs::msg::Header(), "bgr8", image_).toImageMsg(); + auto out_img = std::make_unique(); + cv_bridge::CvImage(std_msgs::msg::Header(), "bgr8", image_).toImageMsg(*out_img); out_img->header.frame_id = frame_id_; out_img->header.stamp = this->now(); - camera_info_.header.frame_id = out_img->header.frame_id; - camera_info_.header.stamp = out_img->header.stamp; - pub_.publish(*out_img, camera_info_); + auto cam_info = std::make_unique(camera_info_); + cam_info->header.frame_id = out_img->header.frame_id; + cam_info->header.stamp = out_img->header.stamp; + + pub_.publish(std::move(out_img), std::move(cam_info)); } catch (cv::Exception & e) { RCLCPP_ERROR( this->get_logger(), "Image processing error: %s %s %s %i", @@ -213,20 +235,25 @@ void ImagePublisher::onInit() } else { flip_image_ = false; } + image_flipped_ = false; // Image newly read, needs to be flipped camera_info_.width = image_.cols; camera_info_.height = image_.rows; camera_info_.distortion_model = "plumb_bob"; camera_info_.d = {0, 0, 0, 0, 0}; - camera_info_.k = {1, 0, static_cast(camera_info_.width / 2), 0, 1, + + double f_approx = 1.0; // FOV equal to 0 disables the approximation + if (std::abs(field_of_view_) > std::numeric_limits::epsilon()) { + // Based on https://learnopencv.com/approximate-focal-length-for-webcams-and-cell-phone-cameras/ + f_approx = (camera_info_.width / 2) / std::tan((field_of_view_ * M_PI / 180) / 2); + } + camera_info_.k = {f_approx, 0, static_cast(camera_info_.width / 2), 0, f_approx, static_cast(camera_info_.height / 2), 0, 0, 1}; camera_info_.r = {1, 0, 0, 0, 1, 0, 0, 0, 1}; - camera_info_.p = {1, 0, static_cast(camera_info_.width / 2), 0, 0, 1, + camera_info_.p = {f_approx, 0, static_cast(camera_info_.width / 2), 0, 0, f_approx, static_cast(camera_info_.height / 2), 0, 0, 0, 1, 0}; - timer_ = this->create_wall_timer( - std::chrono::milliseconds(static_cast(1000 / publish_rate_)), - std::bind(&ImagePublisher::doWork, this)); + ImagePublisher::reconfigureCallback(); } } // namespace image_publisher diff --git a/image_rotate/CHANGELOG.rst b/image_rotate/CHANGELOG.rst index d5c5eda59..d6eae8fed 100644 --- a/image_rotate/CHANGELOG.rst +++ b/image_rotate/CHANGELOG.rst @@ -2,6 +2,40 @@ Changelog for package image_rotate ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ +* Publish using unique ptr (`#1016 `_) + Prevents doing an extra copy of the data when using intra-process + communication. +* Finish QoS updates (`#1019 `_) + This implements the remainder of `#847 `_: + - Make sure publishers default to system defaults (reliable) + - Add QoS overriding where possible (some of the image_transport / + message_filters stuff doesn't really support that) + - Use the matching heuristic for subscribers consistently +* Contributors: Błażej Sowa, Michael Ferguson + +6.0.2 (2024-07-23) +------------------ + +6.0.1 (2024-07-22) +------------------ + +6.0.0 (2024-05-27) +------------------ + +5.0.1 (2024-03-26) +------------------ +* fix image publisher remapping (`#941 `_) + Addresses `#940 `_ - fixes the compressed/etc topic remapping for publishers +* unified changelog, add missing image, deduplicate tutorials (`#938 `_) + Last bit of documentation updates - putting together a single changelog + summary for the whole release (rather than scattering among packages). + Unified the camera_info tutorial so it isn't duplicated. Added a missing + image from image_rotate (was on local disk, but hadn't committed it) +* add docs for image_rotate/publisher (`#936 `_) +* Contributors: Michael Ferguson + 5.0.0 (2024-01-24) ------------------ * Removed cfg files related with ROS 1 parameters (`#911 `_) diff --git a/image_rotate/package.xml b/image_rotate/package.xml index 01a21a290..6af9f5e9e 100644 --- a/image_rotate/package.xml +++ b/image_rotate/package.xml @@ -2,7 +2,7 @@ image_rotate - 5.0.0 + 6.0.3

Contains a node that rotates an image stream in a way that minimizes diff --git a/image_rotate/src/image_rotate_node.cpp b/image_rotate/src/image_rotate_node.cpp index 280954489..6b8097cc2 100644 --- a/image_rotate/src/image_rotate_node.cpp +++ b/image_rotate/src/image_rotate_node.cpp @@ -262,10 +262,10 @@ void ImageRotateNode::do_work( cv::warpAffine(in_image, out_image, rot_matrix, cv::Size(out_size, out_size)); // Publish the image. - sensor_msgs::msg::Image::SharedPtr out_img = - cv_bridge::CvImage(msg->header, msg->encoding, out_image).toImageMsg(); + auto out_img = std::make_unique(); + cv_bridge::CvImage(msg->header, msg->encoding, out_image).toImageMsg(*out_img); out_img->header.frame_id = transform.child_frame_id; - img_pub_.publish(out_img); + img_pub_.publish(std::move(out_img)); } catch (const cv::Exception & e) { RCLCPP_ERROR( get_logger(), @@ -344,6 +344,8 @@ void ImageRotateNode::onInit() auto node_base = this->get_node_base_interface(); std::string topic = node_base->resolve_topic_or_service_name("rotated/image", false); + // Allow overriding QoS settings (history, depth, reliability) + pub_options.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); img_pub_ = image_transport::create_publisher(this, topic, rmw_qos_profile_default, pub_options); } } // namespace image_rotate diff --git a/image_view/CHANGELOG.rst b/image_view/CHANGELOG.rst index 905a8fe5e..7ae219791 100644 --- a/image_view/CHANGELOG.rst +++ b/image_view/CHANGELOG.rst @@ -2,6 +2,66 @@ Changelog for package image_view ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ +* Finish QoS updates (`#1019 `_) + This implements the remainder of `#847 `_: + - Make sure publishers default to system defaults (reliable) + - Add QoS overriding where possible (some of the image_transport / + message_filters stuff doesn't really support that) + - Use the matching heuristic for subscribers consistently +* Contributors: Michael Ferguson + +6.0.2 (2024-07-23) +------------------ +* Removed deprecation warnings (`#1010 `_) +* Contributors: Alejandro Hernández Cordero + +6.0.1 (2024-07-22) +------------------ +* Updated deprecated message filter headers (`#1012 `_) +* Contributors: Alejandro Hernández Cordero + +6.0.0 (2024-05-27) +------------------ + +5.0.1 (2024-03-26) +------------------ +* unified changelog, add missing image, deduplicate tutorials (`#938 `_) + Last bit of documentation updates - putting together a single changelog + summary for the whole release (rather than scattering among packages). + Unified the camera_info tutorial so it isn't duplicated. Added a missing + image from image_rotate (was on local disk, but hadn't committed it) +* add docs for image_rotate/publisher (`#936 `_) +* migrate image_view docs (`#934 `_) + * migrate docs from ROS wiki + * FIX: video recorder start/end services should only be advertised when + feature is enabled + * FIX: image remapping didn't work as expected/documented in + stereo_image_proc +* default to encoding in the image message (`#921 `_) + My camera is publishing rgb8 encoding - the existing code throws an + error that 8UC3 is not a valid encoding, but if we pass rgb8 from the + message then things work fine. The encoding in the image should always + be more descriptive than just the bit and channel size. + If encoding is not filled in, the existing behavior is used as a + fallback +* Update extract_images_sync to add sec_per_frame parameter (`#920 `_) + Added sec_per_frame parameter to allow decimation of frames being + synchronized and captured. + fixes `#726 `_ + --------- + Co-authored-by: Michael Ferguson +* Port extract_images_sync script to ROS 2 (`#919 `_) + Change Description: The extract_images_sync python script was upgraded + to ROS 2. + Testing: Upgraded node tested against Iron on Ubuntu 22.04.3 LTS + Issue: fixes `#860 `_ + --------- + Co-authored-by: Alejandro Hernández Cordero + Co-authored-by: Michael Ferguson +* Contributors: Michael Ferguson, Siddharth Vaghela + 5.0.0 (2024-01-24) ------------------ * remove the last bit of boost (`#912 `_) diff --git a/image_view/include/image_view/stereo_view_node.hpp b/image_view/include/image_view/stereo_view_node.hpp index f2c40c625..463ed1116 100644 --- a/image_view/include/image_view/stereo_view_node.hpp +++ b/image_view/include/image_view/stereo_view_node.hpp @@ -53,9 +53,9 @@ #include #include -#include "message_filters/subscriber.h" -#include "message_filters/sync_policies/approximate_time.h" -#include "message_filters/sync_policies/exact_time.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/sync_policies/approximate_time.hpp" +#include "message_filters/sync_policies/exact_time.hpp" #include diff --git a/image_view/package.xml b/image_view/package.xml index 1ec083f5f..189637914 100644 --- a/image_view/package.xml +++ b/image_view/package.xml @@ -2,7 +2,7 @@ image_view - 5.0.0 + 6.0.3 A simple viewer for ROS image topics. Includes a specialized viewer for stereo + disparity images. diff --git a/image_view/src/extract_images_node.cpp b/image_view/src/extract_images_node.cpp index 271ced3a9..e073616ca 100644 --- a/image_view/src/extract_images_node.cpp +++ b/image_view/src/extract_images_node.cpp @@ -82,7 +82,7 @@ ExtractImagesNode::ExtractImagesNode(const rclcpp::NodeOptions & options) sub_ = image_transport::create_subscription( this, topic, std::bind( &ExtractImagesNode::image_cb, this, std::placeholders::_1), - hints.getTransport()); + hints.getTransport(), rmw_qos_profile_sensor_data); auto topics = this->get_topic_names_and_types(); diff --git a/image_view/src/image_saver_node.cpp b/image_view/src/image_saver_node.cpp index 075d84853..abd8d2576 100644 --- a/image_view/src/image_saver_node.cpp +++ b/image_view/src/image_saver_node.cpp @@ -85,7 +85,7 @@ ImageSaverNode::ImageSaverNode(const rclcpp::NodeOptions & options) cam_sub_ = image_transport::create_camera_subscription( this, topic, std::bind( &ImageSaverNode::callbackWithCameraInfo, this, std::placeholders::_1, std::placeholders::_2), - hints.getTransport()); + hints.getTransport(), rmw_qos_profile_sensor_data); // Useful when CameraInfo is not being published image_sub_ = image_transport::create_subscription( diff --git a/image_view/src/image_view_node.cpp b/image_view/src/image_view_node.cpp index fe607d9d2..951eedd9c 100644 --- a/image_view/src/image_view_node.cpp +++ b/image_view/src/image_view_node.cpp @@ -117,8 +117,8 @@ ImageViewNode::ImageViewNode(const rclcpp::NodeOptions & options) pub_ = this->create_publisher("output", 1); sub_ = image_transport::create_subscription( - this, topic, std::bind( - &ImageViewNode::imageCb, this, std::placeholders::_1), hints.getTransport()); + this, topic, std::bind(&ImageViewNode::imageCb, this, std::placeholders::_1), + hints.getTransport(), rmw_qos_profile_sensor_data); auto topics = this->get_topic_names_and_types(); diff --git a/image_view/src/stereo_view_node.cpp b/image_view/src/stereo_view_node.cpp index 5113da7f2..ec5fd39ab 100644 --- a/image_view/src/stereo_view_node.cpp +++ b/image_view/src/stereo_view_node.cpp @@ -53,10 +53,10 @@ #include #include "cv_bridge/cv_bridge.hpp" -#include "message_filters/subscriber.h" -#include "message_filters/sync_policies/approximate_time.h" -#include "message_filters/sync_policies/exact_time.h" -#include "message_filters/synchronizer.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/sync_policies/approximate_time.hpp" +#include "message_filters/sync_policies/exact_time.hpp" +#include "message_filters/synchronizer.hpp" #include "image_view/stereo_view_node.hpp" @@ -130,7 +130,7 @@ StereoViewNode::StereoViewNode(const rclcpp::NodeOptions & options) // Subscribe to three input topics. left_sub_.subscribe(this, left_topic, hints.getTransport()); right_sub_.subscribe(this, right_topic, hints.getTransport()); - disparity_sub_.subscribe(this, disparity_topic); + disparity_sub_.subscribe(this, disparity_topic, rclcpp::QoS(10)); RCLCPP_INFO( this->get_logger(), diff --git a/stereo_image_proc/CHANGELOG.rst b/stereo_image_proc/CHANGELOG.rst index ce6800f57..5366a5819 100644 --- a/stereo_image_proc/CHANGELOG.rst +++ b/stereo_image_proc/CHANGELOG.rst @@ -2,6 +2,44 @@ Changelog for package stereo_image_proc ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ +* Publish using unique ptr (`#1016 `_) + Prevents doing an extra copy of the data when using intra-process + communication. +* Contributors: Błażej Sowa + +6.0.2 (2024-07-23) +------------------ +* Removed deprecation warnings (`#1010 `_) +* Contributors: Alejandro Hernández Cordero + +6.0.1 (2024-07-22) +------------------ +* Updated deprecated message filter headers (`#1012 `_) +* Contributors: Alejandro Hernández Cordero + +6.0.0 (2024-05-27) +------------------ + +5.0.1 (2024-03-26) +------------------ +* DisparityNode: replace full_dp parameter with sgbm_mode (`#945 `_) + Previously, only the SGBM and HH modes were allowed +* unified changelog, add missing image, deduplicate tutorials (`#938 `_) + Last bit of documentation updates - putting together a single changelog + summary for the whole release (rather than scattering among packages). + Unified the camera_info tutorial so it isn't duplicated. Added a missing + image from image_rotate (was on local disk, but hadn't committed it) +* migrate stereo_image_proc docs (`#928 `_) +* QoS improvements for image_proc and stereo_image_proc (`#922 `_) + First part of `#847 `_ + * Add QoS overrides for all publishers (in the new, standard way) + * stereo_image_proc: Default subscriber QoS to SensorDataQoS + * Clean up some of the comments around lazy subscribers, make them more + consistent across nodes +* Contributors: Michael Ferguson, Pablo David Aranda Rodríguez + 5.0.0 (2024-01-24) ------------------ * stereo_image_proc: cleanup cmake (`#904 `_) diff --git a/stereo_image_proc/package.xml b/stereo_image_proc/package.xml index d10f646d9..92ecb461a 100644 --- a/stereo_image_proc/package.xml +++ b/stereo_image_proc/package.xml @@ -2,7 +2,7 @@ stereo_image_proc - 5.0.0 + 6.0.3 Stereo and single image rectification and disparity processing. Vincent Rabaud diff --git a/stereo_image_proc/src/stereo_image_proc/disparity_node.cpp b/stereo_image_proc/src/stereo_image_proc/disparity_node.cpp index a5f9c2d63..8ccf60bca 100644 --- a/stereo_image_proc/src/stereo_image_proc/disparity_node.cpp +++ b/stereo_image_proc/src/stereo_image_proc/disparity_node.cpp @@ -40,11 +40,11 @@ #include "cv_bridge/cv_bridge.hpp" #include "image_geometry/stereo_camera_model.hpp" -#include "message_filters/subscriber.h" -#include "message_filters/synchronizer.h" -#include "message_filters/sync_policies/approximate_time.h" -#include "message_filters/sync_policies/approximate_epsilon_time.h" -#include "message_filters/sync_policies/exact_time.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/synchronizer.hpp" +#include "message_filters/sync_policies/approximate_time.hpp" +#include "message_filters/sync_policies/approximate_epsilon_time.hpp" +#include "message_filters/sync_policies/exact_time.hpp" #include @@ -315,7 +315,7 @@ DisparityNode::DisparityNode(const rclcpp::NodeOptions & options) image_transport::getCameraInfoTopic(right_topic), false); // REP-2003 specifies that subscriber should be SensorDataQoS - const auto sensor_data_qos = rclcpp::SensorDataQoS().get_rmw_qos_profile(); + const auto sensor_data_qos = rclcpp::SensorDataQoS(); // Support image transport for compression image_transport::TransportHints hints(this); @@ -325,11 +325,13 @@ DisparityNode::DisparityNode(const rclcpp::NodeOptions & options) sub_opts.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); sub_l_image_.subscribe( - this, left_topic, hints.getTransport(), sensor_data_qos, sub_opts); - sub_l_info_.subscribe(this, left_info_topic, sensor_data_qos, sub_opts); + this, left_topic, hints.getTransport(), sensor_data_qos.get_rmw_qos_profile(), sub_opts); + sub_l_info_.subscribe(this, left_info_topic, + sensor_data_qos, sub_opts); sub_r_image_.subscribe( - this, right_topic, hints.getTransport(), sensor_data_qos, sub_opts); - sub_r_info_.subscribe(this, right_info_topic, sensor_data_qos, sub_opts); + this, right_topic, hints.getTransport(), sensor_data_qos.get_rmw_qos_profile(), sub_opts); + sub_r_info_.subscribe(this, right_info_topic, + sensor_data_qos, sub_opts); } }; @@ -351,7 +353,7 @@ void DisparityNode::imageCb( model_.fromCameraInfo(l_info_msg, r_info_msg); // Allocate new disparity image message - auto disp_msg = std::make_shared(); + auto disp_msg = std::make_unique(); disp_msg->header = l_info_msg->header; disp_msg->image.header = l_info_msg->header; @@ -382,7 +384,7 @@ void DisparityNode::imageCb( // Perform block matching to find the disparities block_matcher_.processDisparity(l_image, r_image, model_, *disp_msg); - pub_disparity_->publish(*disp_msg); + pub_disparity_->publish(std::move(disp_msg)); } rcl_interfaces::msg::SetParametersResult DisparityNode::parameterSetCb( diff --git a/stereo_image_proc/src/stereo_image_proc/point_cloud_node.cpp b/stereo_image_proc/src/stereo_image_proc/point_cloud_node.cpp index c654f231c..8d8715a42 100644 --- a/stereo_image_proc/src/stereo_image_proc/point_cloud_node.cpp +++ b/stereo_image_proc/src/stereo_image_proc/point_cloud_node.cpp @@ -35,11 +35,11 @@ #include #include "image_geometry/stereo_camera_model.hpp" -#include "message_filters/subscriber.h" -#include "message_filters/synchronizer.h" -#include "message_filters/sync_policies/approximate_time.h" -#include "message_filters/sync_policies/approximate_epsilon_time.h" -#include "message_filters/sync_policies/exact_time.h" +#include "message_filters/subscriber.hpp" +#include "message_filters/synchronizer.hpp" +#include "message_filters/sync_policies/approximate_time.hpp" +#include "message_filters/sync_policies/approximate_epsilon_time.hpp" +#include "message_filters/sync_policies/exact_time.hpp" #include "rcutils/logging_macros.h" #include @@ -180,7 +180,7 @@ PointCloudNode::PointCloudNode(const rclcpp::NodeOptions & options) image_transport::getCameraInfoTopic(left_topic), false); // REP-2003 specifies that subscriber should be SensorDataQoS - const auto sensor_data_qos = rclcpp::SensorDataQoS().get_rmw_qos_profile(); + const auto sensor_data_qos = rclcpp::SensorDataQoS(); // Support image transport for compression image_transport::TransportHints hints(this); @@ -190,10 +190,13 @@ PointCloudNode::PointCloudNode(const rclcpp::NodeOptions & options) sub_opts.qos_overriding_options = rclcpp::QosOverridingOptions::with_default_policies(); sub_l_image_.subscribe( - this, left_topic, hints.getTransport(), sensor_data_qos, sub_opts); - sub_l_info_.subscribe(this, left_info_topic, sensor_data_qos, sub_opts); - sub_r_info_.subscribe(this, right_topic, sensor_data_qos, sub_opts); - sub_disparity_.subscribe(this, disparity_topic, sensor_data_qos, sub_opts); + this, left_topic, hints.getTransport(), sensor_data_qos.get_rmw_qos_profile(), sub_opts); + sub_l_info_.subscribe(this, left_info_topic, + sensor_data_qos, sub_opts); + sub_r_info_.subscribe(this, right_topic, + sensor_data_qos, sub_opts); + sub_disparity_.subscribe(this, disparity_topic, + sensor_data_qos, sub_opts); } }; pub_points2_ = create_publisher("points2", 1, pub_opts); @@ -232,7 +235,7 @@ void PointCloudNode::imageCb( cv::Mat_ mat = points_mat_; // Fill in new PointCloud2 message (2D image-like layout) - auto points_msg = std::make_shared(); + auto points_msg = std::make_unique(); points_msg->header = disp_msg->header; points_msg->height = mat.rows; points_msg->width = mat.cols; @@ -348,7 +351,7 @@ void PointCloudNode::imageCb( } } - pub_points2_->publish(*points_msg); + pub_points2_->publish(std::move(points_msg)); } } // namespace stereo_image_proc diff --git a/tracetools_image_pipeline/CHANGELOG.rst b/tracetools_image_pipeline/CHANGELOG.rst index 5af0be14c..e65f91040 100644 --- a/tracetools_image_pipeline/CHANGELOG.rst +++ b/tracetools_image_pipeline/CHANGELOG.rst @@ -2,6 +2,21 @@ Changelog for package tracetools_image_pipeline ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +6.0.3 (2024-08-20) +------------------ + +6.0.2 (2024-07-23) +------------------ + +6.0.1 (2024-07-22) +------------------ + +6.0.0 (2024-05-27) +------------------ + +5.0.1 (2024-03-26) +------------------ + 5.0.0 (2024-01-24) ------------------ * ROS 2: Fixed CMake (`#899 `_) diff --git a/tracetools_image_pipeline/package.xml b/tracetools_image_pipeline/package.xml index d166e1ecc..c9b2c20fb 100644 --- a/tracetools_image_pipeline/package.xml +++ b/tracetools_image_pipeline/package.xml @@ -2,7 +2,7 @@ tracetools_image_pipeline - 5.0.0 + 6.0.3 LTTng tracing provider wrapper for image_pipeline ROS 2 meta-package.