Skip to content

Commit

Permalink
Interim point-cloud calibration session
Browse files Browse the repository at this point in the history
  • Loading branch information
treideme committed Mar 2, 2024
1 parent f04a803 commit ff9fdaa
Show file tree
Hide file tree
Showing 3 changed files with 99 additions and 11 deletions.
8 changes: 6 additions & 2 deletions keypoints/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -145,8 +145,9 @@ def handle_buffer(pvbuffer, device):
cvimage0 = cv2.cvtColor(image_data0, cv2.COLOR_YUV2BGR_YUY2)
cvimage1 = cv2.cvtColor(image_data1, cv2.COLOR_YUV2BGR_YUY2)

cvimage0 = chk.draw_keypoints(cvimage0, keypoints[0])
cvimage1 = chk.draw_keypoints(cvimage1, keypoints[1])
if len(keypoints):
cvimage0 = chk.draw_keypoints(cvimage0, keypoints[0])
cvimage1 = chk.draw_keypoints(cvimage1, keypoints[1])

display_image = np.hstack((cvimage0, cvimage1))

Expand Down Expand Up @@ -236,6 +237,9 @@ def run_demo(device, stream):
:param device: The device to stream from
:param stream: The stream to use for streaming
"""
# Create a resizable keypoints window
cv2.namedWindow('Keypoints', cv2.WINDOW_NORMAL)

# Get device parameters need to control streaming
device_params = device.GetParameters()

Expand Down
12 changes: 11 additions & 1 deletion sparse3d/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,24 @@
***This demo requires Bottlenose Stereo, it will not work with Bottlenose Mono.***

This example assumes that:
- your camera is properly calibrated and the calibration parameters uploaded the camera. Checkout the [calibration example](../calibration/README.md) to see how to upload your parameters.
- your camera is properly [calibrated and the calibration parameters uploaded the camera](). Checkout the [calibration example](../calibration/README.md) to see how to upload your parameters.
- all the image quality related settings such as `exposure`, `gain`, and `CCM` are set. Please use `Stereo Viewer` or `eBusPlayer` to configure the image quality to your like.

The Python script shows how to programmatically
- set keypoint parameters, only [FAST](https://en.wikipedia.org/wiki/Features_from_accelerated_segment_test) is shown, but can be adapted for [GFTT](https://ieeexplore.ieee.org/document/323794).
- set keypoint matching parameters
- enable chunk data transmission for sparse point cloud

## Output

The script will display the feature points that are detected in the left image that also
have valid 3d correspondences. Each valid frame will yield a `ply` and `png` file that can be viewed with a 3D
viewer such as [MeshLab](https://www.meshlab.net/). For example,

```
python demo.py --mac <mac> --offsety1 <offset_from_calibration>
```

## Setup

Set the following arguments to the ```demo.py``` file to change demo behavior.
Expand Down
90 changes: 82 additions & 8 deletions sparse3d/demo.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,8 @@
import warnings
import argparse
import eBUS as eb
import cv2
import math


# reference common utility files
Expand All @@ -37,7 +39,7 @@ def parse_args():
"""
parser = argparse.ArgumentParser()
parser.add_argument("-m", "--mac", default=None, help="MAC address of the Bottlenose camera")
parser.add_argument("-k", "--max_keypoints", type=int, default=100, choices=range(1, 65535),
parser.add_argument("-k", "--max_keypoints", type=int, default=1000, choices=range(1, 65535),
help="Maximum number of keypoints to detect")
parser.add_argument("-t", "--fast_threshold", type=int, default=20, choices=range(0, 255),
help="Keypoint threshold for the Fast9 algorithm")
Expand Down Expand Up @@ -143,6 +145,25 @@ def configure_matcher(device: eb.PvDeviceGEV, offsetx: int, offsety: int):
y_offset.SetValue(offsety)


def enable_feature_points(device):
"""
Enable the feature points chunk data
:param device: The device to enable feature points on
"""
# Get device parameters
device_params = device.GetParameters()

# Enable keypoint detection and streaming
chunk_mode = device_params.Get("ChunkModeActive")
chunk_mode.SetValue(True)

chunk_selector = device_params.Get("ChunkSelector")
chunk_selector.SetValue("FeaturePoints")

chunk_enable = device_params.Get("ChunkEnable")
chunk_enable.SetValue(True)


def enable_sparse_pointcloud(device: eb.PvDeviceGEV):
"""
Enable sparse point cloud chunk data
Expand All @@ -164,6 +185,52 @@ def enable_sparse_pointcloud(device: eb.PvDeviceGEV):
chunk_enable.SetValue(True)


def process_points(keypoints, pc, image, timestamp, min_depth=0.0, max_depth=2.5):
ply_filename = f'{timestamp}_output_point_cloud.ply'
image_filename = f'{timestamp}_matched_points.png'
intermediate_list = []
valid_points = []
# Point data
for i in range(len(keypoints[0]["data"])):
x = keypoints[0]["data"][i].x
y = keypoints[0]["data"][i].y
rgb_value = image[y, x]
if math.isnan(pc[i].x) or math.isnan(pc[i].y) or math.isnan(pc[i].z):
continue
if pc[i].z < min_depth or pc[i].z > max_depth:
continue
intermediate_list.append((pc[i].x, pc[i].y, pc[i].z, rgb_value[0], rgb_value[1], rgb_value[2]))
valid_points.append(cv2.KeyPoint(x=x, y=y, size=15))

# Draw the keypoints on the image
if len(valid_points) > 0:
image = cv2.drawKeypoints(image, valid_points, 0, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS)
cv2.imshow('Matched Points', image)
if cv2.waitKey(1) & 0xFF != 0xFF:
return False
cv2.imwrite(image_filename, image)

with open(ply_filename, 'w') as ply_file:
# PLY header
ply_file.write("ply\n")
ply_file.write("format ascii 1.0\n")
ply_file.write(f"element vertex {len(intermediate_list)}\n")
ply_file.write("property float x\n")
ply_file.write("property float y\n")
ply_file.write("property float z\n")
ply_file.write("property uchar red\n")
ply_file.write("property uchar green\n")
ply_file.write("property uchar blue\n")
ply_file.write("end_header\n")
for point in intermediate_list:
ply_file.write(f"{point[0]} {point[1]} {point[2]} {point[3]} {point[4]} {point[5]}\n")
ply_file.write("\n")

# Save point cloud to a PLY file
print(f'Point cloud saved to {ply_filename}')
return True


def handle_buffer(pvbuffer: eb.PvBuffer, device: eb.PvDeviceGEV):
"""
handles incoming buffer and decodes the associated sparse point cloud chunk data
Expand All @@ -173,17 +240,21 @@ def handle_buffer(pvbuffer: eb.PvBuffer, device: eb.PvDeviceGEV):
payload_type = pvbuffer.GetPayloadType()
if payload_type == eb.PvPayloadTypeMultiPart:
# images associated with the buffer
# image0 = pvbuffer.GetMultiPartContainer().GetPart(0).GetImage()
# image1 = pvbuffer.GetMultiPartContainer().GetPart(1).GetImage()
image0 = pvbuffer.GetMultiPartContainer().GetPart(0).GetImage() # left image
image_data = image0.GetDataPointer()
image_data = cv2.cvtColor(image_data, cv2.COLOR_YUV2BGR_YUY2)
#image1 = pvbuffer.GetMultiPartContainer().GetPart(1).GetImage()

# Parses the feature points from the buffer
keypoints = decode_chunk(device=device, buffer=pvbuffer, chunk='FeaturePoints')

# parses sparse point cloud from the buffer
# returns a list of Point3D(x,y,z). NaN values are set for unmatched points.
pc = decode_chunk(device=device, buffer=pvbuffer, chunk='SparsePointCloud')
timestamp = pvbuffer.GetTimestamp()
if len(pc) > 0:
print(f' {timestamp}: {len(pc)} points: P0({pc[0].x}, {pc[0].y}, {pc[0].z})')
else:
print(f' {timestamp}: {len(pc)} points: ')
if pc is not None and len(pc) > 0 and keypoints is not None and len(keypoints) > 0:
return process_points(keypoints, pc, image_data, timestamp)
return True


def acquire_data(device, stream):
Expand All @@ -192,6 +263,7 @@ def acquire_data(device, stream):
:param device: The device to stream from
:param stream: The stream to use for streaming
"""
cv2.namedWindow('Matched Points', cv2.WINDOW_NORMAL)

# Get device parameters need to control streaming
device_params = device.GetParameters()
Expand All @@ -210,7 +282,8 @@ def acquire_data(device, stream):
if result.IsOK():
if operational_result.IsOK():
# We now have a valid buffer.
handle_buffer(pvbuffer, device)
if not handle_buffer(pvbuffer, device):
break
else:
# Non OK operational result
warnings.warn(f"Operational result error. {operational_result.GetCodeString()} "
Expand All @@ -236,6 +309,7 @@ def acquire_data(device, stream):
set_y1_offset(device=bn_device, value=args.offsety1)
configure_fast9(device=bn_device, kp_max=args.max_keypoints, threshold=args.fast_threshold)
configure_matcher(device=bn_device, offsetx=args.match_xoffset, offsety=args.match_yoffset)
enable_feature_points(device=bn_device)
enable_sparse_pointcloud(device=bn_device)
acquire_data(device=bn_device, stream=bn_stream)

Expand Down

0 comments on commit ff9fdaa

Please sign in to comment.