From 2b39788b930b69269cd55d251571962b6adc6b4a Mon Sep 17 00:00:00 2001 From: Fernando Cladera Date: Mon, 31 Jul 2023 22:04:47 -0400 Subject: [PATCH] First release of M3ED build system --- .gitignore | 3 + build_system/README.md | 49 + build_system/bag_processing/hdf52imu_plot.py | 120 ++ build_system/bag_processing/hdf52media.bash | 32 + build_system/bag_processing/hdf52media.py | 233 ++++ build_system/bag_processing/hdf52stripped.py | 60 + build_system/bag_processing/rosbag2hdf5.bash | 57 + build_system/bag_processing/rosbag2hdf5.py | 1058 ++++++++++++++++ .../bag_processing/rosbag2timecorrection.bash | 21 + .../bag_processing/rosbag2timecorrection.py | 429 +++++++ .../bag_processing/rosbag2verify.bash | 23 + build_system/bag_processing/rosbag2verify.py | 230 ++++ build_system/calibration/aprilgrid.yaml | 6 + .../event_bag_convert/CMakeLists.txt | 135 ++ .../include/event_bag_convert/prettyprint.hpp | 445 +++++++ .../calibration/event_bag_convert/package.xml | 80 ++ .../src/bag_to_all_frames.cpp | 254 ++++ build_system/calibration/kalibr_all | 27 + build_system/calibration/kalibr_events | 20 + build_system/calibration/kalibr_ovc | 21 + build_system/calibration/only_ovc_calib.py | 37 + .../calibration/rosbag2camcalibration.bash | 29 + .../calibration/rosbag2imucalibration.bash | 32 + build_system/calibration/vn100.yaml | 20 + build_system/dataset_paths.py | 381 ++++++ build_system/docker/Dockerfile | 247 ++++ build_system/docker/docker_checks.sh | 109 ++ build_system/jenkins/Jenkinsfile_CPU_stage1 | 78 ++ build_system/jenkins/Jenkinsfile_CPU_stage2 | 45 + build_system/jenkins/Jenkinsfile_CPU_stage3 | 34 + build_system/jenkins/Jenkinsfile_GPU | 35 + build_system/jenkins/Jenkinsfile_dispatcher | 209 ++++ build_system/jenkins/Jenkinsfile_docker_build | 40 + .../CMakeLists.txt | 204 +++ .../concatenate_pcd_uncompressed/package.xml | 66 + .../src/pcl_concatenate_points_pcd.cpp | 152 +++ build_system/lidar_depth/fasterlio_gt.py | 289 +++++ build_system/lidar_depth/fasterlio_plot.py | 66 + build_system/lidar_depth/gt2media.bash | 45 + build_system/lidar_depth/gt2media.py | 115 ++ build_system/lidar_depth/gt2verify.bash | 22 + build_system/lidar_depth/gt2verify.py | 67 + build_system/lidar_depth/lidar_calib.py | 308 +++++ .../lidar_depth/long_range_ouster64.yaml | 47 + .../ouster_bag_convert/CMakeLists.txt | 218 ++++ .../ouster_bag_convert/package.xml | 74 ++ .../ouster_bag_convert/src/bag_rewriter.cpp | 139 +++ build_system/lidar_depth/pcd2gt.bash | 33 + .../lidar_depth/rosbag2oustermetadata.py | 42 + build_system/lidar_depth/rosbag2pcd.bash | 74 ++ .../lidar_depth/short_range_ouster64.yaml | 47 + build_system/lidar_depth/util.py | 352 ++++++ build_system/preamble.bash | 210 ++++ build_system/semantics/hdf52internimage.bash | 86 ++ build_system/semantics/internimage.py | 270 ++++ build_system/semantics/internimage2media.bash | 28 + build_system/semantics/internimage2media.py | 151 +++ .../stats_and_summary/dataset_statistics.py | 144 +++ build_system/stats_and_summary/getStats.bash | 67 + dataset_list.yaml | 1099 +++++++++++++++++ tools/download_m3ed.py | 87 ++ 61 files changed, 9101 insertions(+) create mode 100644 .gitignore create mode 100644 build_system/README.md create mode 100644 build_system/bag_processing/hdf52imu_plot.py create mode 100755 build_system/bag_processing/hdf52media.bash create mode 100644 build_system/bag_processing/hdf52media.py create mode 100644 build_system/bag_processing/hdf52stripped.py create mode 100755 build_system/bag_processing/rosbag2hdf5.bash create mode 100644 build_system/bag_processing/rosbag2hdf5.py create mode 100755 build_system/bag_processing/rosbag2timecorrection.bash create mode 100644 build_system/bag_processing/rosbag2timecorrection.py create mode 100755 build_system/bag_processing/rosbag2verify.bash create mode 100644 build_system/bag_processing/rosbag2verify.py create mode 100644 build_system/calibration/aprilgrid.yaml create mode 100644 build_system/calibration/event_bag_convert/CMakeLists.txt create mode 100644 build_system/calibration/event_bag_convert/include/event_bag_convert/prettyprint.hpp create mode 100644 build_system/calibration/event_bag_convert/package.xml create mode 100644 build_system/calibration/event_bag_convert/src/bag_to_all_frames.cpp create mode 100755 build_system/calibration/kalibr_all create mode 100755 build_system/calibration/kalibr_events create mode 100755 build_system/calibration/kalibr_ovc create mode 100644 build_system/calibration/only_ovc_calib.py create mode 100755 build_system/calibration/rosbag2camcalibration.bash create mode 100755 build_system/calibration/rosbag2imucalibration.bash create mode 100644 build_system/calibration/vn100.yaml create mode 100755 build_system/dataset_paths.py create mode 100644 build_system/docker/Dockerfile create mode 100755 build_system/docker/docker_checks.sh create mode 100644 build_system/jenkins/Jenkinsfile_CPU_stage1 create mode 100644 build_system/jenkins/Jenkinsfile_CPU_stage2 create mode 100644 build_system/jenkins/Jenkinsfile_CPU_stage3 create mode 100644 build_system/jenkins/Jenkinsfile_GPU create mode 100644 build_system/jenkins/Jenkinsfile_dispatcher create mode 100644 build_system/jenkins/Jenkinsfile_docker_build create mode 100644 build_system/lidar_depth/concatenate_pcd_uncompressed/CMakeLists.txt create mode 100644 build_system/lidar_depth/concatenate_pcd_uncompressed/package.xml create mode 100644 build_system/lidar_depth/concatenate_pcd_uncompressed/src/pcl_concatenate_points_pcd.cpp create mode 100644 build_system/lidar_depth/fasterlio_gt.py create mode 100755 build_system/lidar_depth/fasterlio_plot.py create mode 100755 build_system/lidar_depth/gt2media.bash create mode 100644 build_system/lidar_depth/gt2media.py create mode 100755 build_system/lidar_depth/gt2verify.bash create mode 100644 build_system/lidar_depth/gt2verify.py create mode 100755 build_system/lidar_depth/lidar_calib.py create mode 100644 build_system/lidar_depth/long_range_ouster64.yaml create mode 100644 build_system/lidar_depth/ouster_bag_convert/CMakeLists.txt create mode 100644 build_system/lidar_depth/ouster_bag_convert/package.xml create mode 100644 build_system/lidar_depth/ouster_bag_convert/src/bag_rewriter.cpp create mode 100755 build_system/lidar_depth/pcd2gt.bash create mode 100644 build_system/lidar_depth/rosbag2oustermetadata.py create mode 100755 build_system/lidar_depth/rosbag2pcd.bash create mode 100644 build_system/lidar_depth/short_range_ouster64.yaml create mode 100644 build_system/lidar_depth/util.py create mode 100644 build_system/preamble.bash create mode 100755 build_system/semantics/hdf52internimage.bash create mode 100644 build_system/semantics/internimage.py create mode 100755 build_system/semantics/internimage2media.bash create mode 100644 build_system/semantics/internimage2media.py create mode 100755 build_system/stats_and_summary/dataset_statistics.py create mode 100755 build_system/stats_and_summary/getStats.bash create mode 100644 dataset_list.yaml create mode 100755 tools/download_m3ed.py diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..a662b01 --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +utils/ +__pycache__ +Log/ diff --git a/build_system/README.md b/build_system/README.md new file mode 100644 index 0000000..28153c7 --- /dev/null +++ b/build_system/README.md @@ -0,0 +1,49 @@ +![alt text](https://github.com/daniilidis-group/m3ed/blob/master/M3ED_banner.webp) + +# M3ED Processing Scripts + +This repo contains the scripts used to process the M3ED data. These scripts are +run automatically in Jenkins. + +## Dockerfile + +We provide a `Dockerfile` to run the data conversions. We used this docker +environment to process the bags automatically and generate the bag reports. + +### Jenkins build architecture + +Jenkins was used as our deployment platform. There are three Jenkins pipelines: + + - Dispatcher: this pipeline gathers all the input files, and orchestrates the + execution of the work among the different nodes. The `Jenkinsfile_dispatcher` is used for this + pipeline. + + - Docker-build: this pipeline builds the docker image **in each node**. This + ensures that the processing pipelines have the most up-to-date docker image. + The `Jenkinsfile_docker_build` is used for this pipeline. + + - Processing: Runs the processing scripts. The `Jenkinsfile_processing` is used + for this pipeline. + +### Jenkins Master Setup + +1. Install Jenkins +2. Create three jobs: `M3ED-dispatcher`, `M3ED-docker-build`, `M3ED-processing` +3. Ensure that `M3ED-docker-build` and `M3ED-dispatcher` are executed for all + branches (Branch Specifier). +4. Choose the Branch Specifier that you want for `M3ED-dispatcher`. + +## Scripts + +- `preliminary.sh`: this script is executed in each build node and it verifies that +the docker image is properly built, we have access to the processing folder and +files. + +- `bag_processing/rosbag2verify.py`: verifies bag timestamps and integrity. + +- `bag_processing/rosbag2hdf5.py`: converts the bag data into HDF5. + +- `bag_processing/hdf52media.py`: generates media output from the HDF5 (videos, plots, images). + +## License +M3ED is released under the (Creative Commons Attribution-ShareAlike 4.0 International License)[https://creativecommons.org/licenses/by-sa/4.0/]. diff --git a/build_system/bag_processing/hdf52imu_plot.py b/build_system/bag_processing/hdf52imu_plot.py new file mode 100644 index 0000000..80b2ae2 --- /dev/null +++ b/build_system/bag_processing/hdf52imu_plot.py @@ -0,0 +1,120 @@ +import os +import pdb +import argparse + +import numpy as np +import h5py +import matplotlib.pyplot as plt + +from scipy.interpolate import splev, splrep, barycentric_interpolate +from scipy.spatial.transform import Rotation +from scipy import signal + +def resample_imu(imu, imu_ts, sample_times): + spl = splrep(imu_ts, imu) + return splev( sample_times, spl) + +def filter_imu_sample(imu, numtaps=7, f=0.10): + coeffs = signal.firwin(numtaps, f) + filtered = np.zeros(imu.shape) + filtered[:,0] = signal.lfilter(coeffs, 1.0, imu[:,0]) + filtered[:,1] = signal.lfilter(coeffs, 1.0, imu[:,1]) + filtered[:,2] = signal.lfilter(coeffs, 1.0, imu[:,2]) + return filtered + +def align_imus(ovc_omega, ovc_accel, ouster_omega, ouster_accel): + """ + Solving ouster_R_ovc * ovc_omega = ouster_omega + """ + ovc_measurements = np.concatenate( [ovc_omega] ).T + ouster_measurements = np.concatenate( [ouster_omega] ).T + + ouster_R_ovc = (ouster_measurements @ np.linalg.pinv(ovc_measurements)) + U,S,Vh = np.linalg.svd(ouster_R_ovc) + ouster_R_ovc = U@Vh + ouster_R_ovc[:,0] = -ouster_R_ovc[:,0] + ouster_R_ovc[:,2] = np.cross(ouster_R_ovc[:,0],ouster_R_ovc[:,1]) + + assert np.all(np.isclose( np.linalg.det(ouster_R_ovc), 1.0 )) + + return ouster_R_ovc + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Creates vids and imgs from h5.") + # input h5 file + parser.add_argument("--h5fn", help="The h5 file to convert.", + required=True) + args = parser.parse_args() + + if not os.path.isfile(args.h5fn): + sys.exit("The input h5 file %s does not exist." % args.h5fn) + + h5f = h5py.File(args.h5fn, 'r') + + ovc_mask = np.isfinite(h5f['/ovc/imu/accel'][:,0]) + ovc_ts = h5f['/ovc/imu/ts'][...][ovc_mask] + ovc_accel = h5f['/ovc/imu/accel'][...][ovc_mask,:] + ovc_omega = h5f['/ovc/imu/omega'][...][ovc_mask,:] + + ouster_mask = np.isfinite(h5f['/ouster/imu/accel'][1000:-1000,0]) + ouster_ts = h5f['/ouster/imu/ts'][1000:-1000][ouster_mask] + ouster_accel = h5f['/ouster/imu/accel'][1000:-1000][ouster_mask,:] + ouster_omega = h5f['/ouster/imu/omega'][1000:-1000][ouster_mask,:] + + ovc_resampled_omega = np.stack( [resample_imu(ovc_omega[:,i], ovc_ts, ouster_ts) for i in range(3)], axis=-1 ) + ovc_resampled_accel = np.stack( [resample_imu(ovc_accel[:,i], ovc_ts, ouster_ts) for i in range(3)], axis=-1 ) + + ouster_accel = filter_imu_sample(ouster_accel) + ouster_omega = filter_imu_sample(ouster_omega) + + ovc_omega = filter_imu_sample(ovc_resampled_omega) + ovc_accel = filter_imu_sample(ovc_resampled_accel) + ovc_ts = ouster_ts + + ouster_R_ovc = align_imus(ovc_omega, ovc_accel, ouster_omega, ouster_accel) + + print(ouster_R_ovc) + print(np.rad2deg(Rotation.from_matrix(ouster_R_ovc).as_euler('xyz'))) + print(np.linalg.det(ouster_R_ovc)) + + transformed_ovc_omega = (ouster_R_ovc @ ovc_omega.T).T + transformed_ovc_accel = (ouster_R_ovc @ ovc_accel.T).T + + fig, axes = plt.subplots(3,2,sharex=True,sharey=True) + + axes[0,0].set_title('OVC gyro') + axes[0,0].plot( ovc_ts, transformed_ovc_omega[:,0] ) + axes[1,0].plot( ovc_ts, transformed_ovc_omega[:,1] ) + axes[2,0].plot( ovc_ts, transformed_ovc_omega[:,2] ) + + axes[0,1].set_title('Ouster gyro') + axes[0,1].plot( ouster_ts[:-1], ouster_omega[:-1,0] ) + axes[1,1].plot( ouster_ts[:-1], ouster_omega[:-1,1] ) + axes[2,1].plot( ouster_ts[:-1], ouster_omega[:-1,2] ) + + fig, axes = plt.subplots(3,2,sharex=True,sharey=True) + + axes[0,0].set_title('OVC accel') + axes[0,0].plot( ovc_ts, transformed_ovc_accel[:,0] ) + axes[1,0].plot( ovc_ts, transformed_ovc_accel[:,1] ) + axes[2,0].plot( ovc_ts, transformed_ovc_accel[:,2] ) + + axes[0,1].set_title('Ouster accel') + axes[0,1].plot( ouster_ts[:-1], ouster_accel[:-1,0] ) + axes[1,1].plot( ouster_ts[:-1], ouster_accel[:-1,1] ) + axes[2,1].plot( ouster_ts[:-1], ouster_accel[:-1,2] ) + + plt.figure() + plt.plot(ovc_ts, transformed_ovc_omega[:,0] ) + plt.plot( ouster_ts, ouster_omega[:,0]) + + plt.figure() + plt.plot(ovc_ts, transformed_ovc_omega[:,1] ) + plt.plot( ouster_ts, ouster_omega[:,1]) + + plt.figure() + plt.plot(ovc_ts, transformed_ovc_omega[:,2] ) + plt.plot( ouster_ts, ouster_omega[:,2]) + + plt.show() diff --git a/build_system/bag_processing/hdf52media.bash b/build_system/bag_processing/hdf52media.bash new file mode 100755 index 0000000..1946b40 --- /dev/null +++ b/build_system/bag_processing/hdf52media.bash @@ -0,0 +1,32 @@ +#!/bin/bash + +source ./build_system/preamble.bash + +output_files=("$EVENTS_VIDEO_RAW" "$RGB_VIDEO_RAW") +check_files output_files +check_free_space fixed + +# create tmp variables for videos +EVENTS_VIDEO_RAW_TMP="${EVENTS_VIDEO_RAW%.*}_tmp.${EVENTS_VIDEO_RAW##*.}" +RGB_VIDEO_RAW_TMP="${RGB_VIDEO_RAW%.*}_tmp.${RGB_VIDEO_RAW##*.}" + +# Run bag processing +echo -e "${BLUE}Generating raw videos${NC}" +python3 build_system/bag_processing/hdf52media.py \ + --h5fn "$H5_PATH" \ + --out_events_gray "$EVENTS_VIDEO_RAW_TMP" \ + --out_rgb "$RGB_VIDEO_RAW_TMP" +if [ $? -ne 0 ]; then + echo -e "${RED}Error creating media files for $H5_PATH${NC}" + rm "$EVENTS_VIDEO_RAW_TMP" "$RGB_VIDEO_RAW_TMP" + exit 1 +fi +echo -e "${GREEN}Raw videos finished${NC}" +mv "$EVENTS_VIDEO_RAW_TMP" "$EVENTS_VIDEO_RAW" +mv "$RGB_VIDEO_RAW_TMP" "$RGB_VIDEO_RAW" + +# Compress videos +echo -e "${BLUE}Generating compressed videos${NC}" +compress_with_ffmpeg "$EVENTS_VIDEO_RAW" +compress_with_ffmpeg "$RGB_VIDEO_RAW" +echo -e "${GREEN}Compressed videos finished${NC}" diff --git a/build_system/bag_processing/hdf52media.py b/build_system/bag_processing/hdf52media.py new file mode 100644 index 0000000..1060ab6 --- /dev/null +++ b/build_system/bag_processing/hdf52media.py @@ -0,0 +1,233 @@ +#!/usr/bin/env python3 +import argparse +import os +import h5py +import numpy as np +import cv2 +import sys +import pdb +from colorama import Fore, Style + +# Hardcoded topic names from the h5 +OVC_LEFT_TOPIC = ["ovc", "left", "data"] +OVC_RIGHT_TOPIC = ["ovc", "right", "data"] +OVC_RGB_TOPIC = ["ovc", "rgb", "data"] +OVC_TS_TOPIC = ["ovc", "ts"] +OVC_MAP_LEFT = ["ovc", "ts_map_prophesee_left_t"] +OVC_MAP_RIGHT = ["ovc", "ts_map_prophesee_right_t"] +PROPHESEE_LEFT_TOPIC = ["prophesee", "left"] +PROPHESEE_RIGHT_TOPIC = ["prophesee", "right"] + + +def generate_rgb_gray_vid(h5, output_file, verbose=True): + print("SYNC RGB/GRAY Video Generation") + + # Filename is built based on the list of topics + # TODO: change this for other video topics + filename = "rgb_gray_" + ts = h5[OVC_TS_TOPIC[0]][OVC_TS_TOPIC[1]][:] + img_rgb = h5[OVC_RGB_TOPIC[0]][OVC_RGB_TOPIC[1]][OVC_RGB_TOPIC[2]] + img_left = h5[OVC_LEFT_TOPIC[0]][OVC_LEFT_TOPIC[1]][OVC_LEFT_TOPIC[2]] + img_right = h5[OVC_RIGHT_TOPIC[0]][OVC_RIGHT_TOPIC[1]][OVC_RIGHT_TOPIC[2]] + + # Get image properties from data + img_w = img_rgb[0].shape[1] + img_h = img_rgb[0].shape[0] + + # Get output image shape + out_w = img_w * 3 + out_h = img_h + fps = np.round(1e6/np.mean(np.diff(ts[:]))).astype(int) + + # Create VideoWriter and iterate over the frames + fourcc = cv2.VideoWriter_fourcc(*"FFV1") + video = cv2.VideoWriter(output_file, fourcc, + fps, (out_w, out_h), True) + + # Loop over the timestamps + for n, _ in enumerate(ts): + if verbose: + print(f"RGB - Processing frame {n}/{len(ts)}", end="\r") + write_frame = np.zeros((out_h,out_w,3) ,dtype=img_left[n,...].dtype) + write_frame[:,0:img_w,:] = img_left[n, ...] + write_frame[:,img_w:img_w*2,:] = img_rgb[n, ...] + write_frame[:,img_w*2:img_w*3,:] = img_right[n, ...] + + video.write(write_frame) + + video.release() + os.chmod(output_file, 0o666) + + +def generate_rgb_vid(h5, output_file, verbose=True): + print("RGB Video Generation") + + # Filename is built based on the list of topics + # TODO: change this for other video topics + filename = "rgb_" + ts = h5[OVC_TS_TOPIC[0]][OVC_TS_TOPIC[1]][:] + img = h5[OVC_RGB_TOPIC[0]][OVC_RGB_TOPIC[1]][OVC_RGB_TOPIC[2]] + + # Get image properties from data + img_w = img[0].shape[1] + img_h = img[0].shape[0] + + # Get output image shape + out_w = img_w + out_h = img_h + fps = np.round(1e6/np.mean(np.diff(ts[:]))).astype(int) + + # Create VideoWriter and iterate over the frames + fourcc = cv2.VideoWriter_fourcc(*"FFV1") + video = cv2.VideoWriter(output_file, fourcc, + fps, (out_w, out_h), True) + + # Loop over the timestamps + for n, _ in enumerate(ts): + if verbose: + print(f"RGB - Processing frame {n}/{len(ts)}", end="\r") + write_frame = img[n, ...] + video.write(write_frame) + video.release() + os.chmod(output_file, 0o666) + + +def generate_synchronized_vid(h5, output_file, verbose, events=True, stereo=True): + print("Synchronized Video Generation") + # imgs has the pointrs to the hdf5 datasets + imgs = [] + ev_datasets = [] + + # Filename is built based on the list of topics + # TODO: change this for other video topics + filename = "imgs_" + + # Check that we have all the topics we need for this + imgs.append(h5[OVC_LEFT_TOPIC[0]][OVC_LEFT_TOPIC[1]][OVC_LEFT_TOPIC[2]]) + imgs.append(h5[OVC_RIGHT_TOPIC[0]][OVC_RIGHT_TOPIC[1]][OVC_RIGHT_TOPIC[2]]) + ts = h5[OVC_TS_TOPIC[0]][OVC_TS_TOPIC[1]] + if events: + map_left = h5[OVC_MAP_LEFT[0]][OVC_MAP_LEFT[1]] + map_right = h5[OVC_MAP_RIGHT[0]][OVC_MAP_RIGHT[1]] + ev_datasets.append(h5[PROPHESEE_LEFT_TOPIC[0]][PROPHESEE_LEFT_TOPIC[1]]) + ev_datasets.append(h5[PROPHESEE_RIGHT_TOPIC[0]][PROPHESEE_RIGHT_TOPIC[1]]) + + # Get image properties from data + img_w = imgs[0].shape[2] + img_h = imgs[0].shape[1] + + # Get output image shape + dvs_width = 1280 + dvs_height = 720 + border = 10 + out_w = img_w * 2 + border + if events: + assert img_w == dvs_width + out_h = img_h + dvs_height + border + else: + out_h = img_h + fps = np.round(1e6/np.mean(np.diff(ts[:]))).astype(int) + + # Create VideoWriter and iterate over the frames + fourcc = cv2.VideoWriter_fourcc(*"FFV1") + video = cv2.VideoWriter(output_file, fourcc, + fps, (out_w, out_h), True) + + # Do not iterate over the last frame to avoid index problems + for n, _ in enumerate(ts[:-1]): + if verbose: + print(f"Synchronized - Processing frame {n}/{len(ts)}", end="\r") + write_frame = None + for j in range(len(imgs)): + # OVC Topics + # Find the index of the closest timestamp + out_img = imgs[j][n, ...] + + # The images are monocrome + out_img = np.dstack((out_img, out_img, out_img)) + + if j == 0: + write_frame = out_img + else: + write_frame = np.hstack((write_frame, + np.zeros((img_h, border, 3), + dtype=np.uint8), + out_img)) + + # Generate the frame with the event camera + if events: + x_l = ev_datasets[0]["x"][map_left[n]:map_left[n+1]] + y_l = ev_datasets[0]["y"][map_left[n]:map_left[n+1]] + p_l = ev_datasets[0]["p"][map_left[n]:map_left[n+1]] + x_r = ev_datasets[1]["x"][map_right[n]:map_right[n+1]] + y_r = ev_datasets[1]["y"][map_right[n]:map_right[n+1]] + p_r = ev_datasets[1]["p"][map_right[n]:map_right[n+1]] + + p_l_pos = (p_l[np.newaxis] > 0).astype(np.uint8)*255 + p_l_neg = (p_l[np.newaxis] == 0).astype(np.uint8)*255 + p_r_pos = (p_r[np.newaxis] > 0).astype(np.uint8)*255 + p_r_neg = (p_r[np.newaxis] == 0).astype(np.uint8)*255 + + ev_frame_l = np.zeros((dvs_height, dvs_width, 3), dtype=np.uint8) + ev_frame_l[y_l, x_l, :] = np.vstack((p_l_pos, + np.zeros_like(p_l_pos), + p_l_neg)).T + ev_frame_r = np.zeros((dvs_height, dvs_width, 3), dtype=np.uint8) + ev_frame_r[y_r, x_r, :] = np.vstack((p_r_pos, + np.zeros_like(p_r_pos), + p_r_neg)).T + + ev_f = np.hstack((ev_frame_l, + np.zeros((dvs_height, border, 3), dtype=np.uint8), + ev_frame_r)) + + write_frame = np.vstack((write_frame, + np.zeros((border, dvs_width*2 + border, 3), + dtype=np.uint8), + ev_f)) + video.write(write_frame) + video.release() + os.chmod(output_file, 0o666) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser(description="Creates vids and imgs from h5.") + # input h5 file + parser.add_argument("--h5fn", help="The h5 file to convert.", + required=True) + # output folder where we should store the results + parser.add_argument("--out_events_gray", + help="The output file to store the events/gray video. If not set, no events/gray video will be generated") + parser.add_argument("--no-events", action="store_false", dest="events", + help="Do not add events in out_events_gray.") + parser.add_argument("--out_rgb", required=True, + help="The output file to store the rgb video. If not set, no rgb video will be generated") + parser.add_argument("--out_all_ovc", action="store_true", + help="Output the synced video of all OVC sensors. If not set, no video will be generated.") + parser.add_argument("--verbose", action="store_true", dest="verbose", + help="Be verbose.") + args = parser.parse_args() + # Check that the file exists + if not os.path.isfile(args.h5fn): + sys.exit("The input h5 file %s does not exist." % args.h5fn) + + print("Converting %s to videos." % args.h5fn) + + # Get output folder name from input file + verbose = args.verbose + + # Open h5 file and run the generations + with h5py.File(args.h5fn, 'r') as h5: + # generate_plots_imu(h5, output_folder, [h5, output_folder, "data"]) + if (not ("calib" in args.h5fn and "lidar" in args.h5fn)): + if args.out_events_gray is not None: + # print in blue using colorama + print(Fore.BLUE + "Generating events/gray video" + Style.RESET_ALL) + generate_synchronized_vid(h5, args.out_events_gray, + verbose, args.events) + if args.out_rgb is not None: + generate_rgb_vid(h5, args.out_rgb, verbose) + print(Fore.BLUE + "Generating RGB video" + Style.RESET_ALL) + if args.out_all_ovc: + generate_rgb_gray_vid(h5, args.out_events_gray, verbose) + print(Fore.BLUE + "Generating all OVC video" + Style.RESET_ALL) diff --git a/build_system/bag_processing/hdf52stripped.py b/build_system/bag_processing/hdf52stripped.py new file mode 100644 index 0000000..af0e1f6 --- /dev/null +++ b/build_system/bag_processing/hdf52stripped.py @@ -0,0 +1,60 @@ +#!/usr/bin/env python3 + +import h5py +import os +import sys +import pdb +import subprocess +from datetime import datetime +from colorama import Fore, Back, Style + + +def copy_datasets(source_file, destination_file): + with h5py.File(source_file, 'r') as source_h5, h5py.File(destination_file, 'w') as destination_h5: + def copy_group(group, parent_group, parent_key): + for key in group.keys(): + if key == "ouster": + continue + if parent_key == "ovc" and key == "rgb": + continue + item = group[key] + if isinstance(item, h5py.Dataset): + # Get compression settings from the source dataset + compression = item.compression + compression_opts = item.compression_opts + shuffle = item.shuffle + + # Create the destination dataset with the same data and compression settings + parent_group.create_dataset(key, data=item[...], + compression=compression, + compression_opts=compression_opts, + shuffle=shuffle) + elif isinstance(item, h5py.Group): + print(Fore.BLUE + f"Copying {parent_key}/{key}..." + Style.RESET_ALL) + new_group = parent_group.create_group(key) + copy_group(item, new_group, key) + else: + sys.exit("Unknown type") + + copy_group(source_h5, destination_h5, "") + # Add metadata + version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() + destination_h5.attrs["version"] = version + destination_h5.attrs["creation_date"] = str(datetime.now()) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("--source", help="Source HDF5 file", + required=True) + parser.add_argument("--destination", help="Source HDF5 file", + required=True) + args = parser.parse_args() + + # Check that both files exists + if not os.path.isfile(args.source): + exit("Source file does not exist") + + copy_datasets(args.source, args.destination) + diff --git a/build_system/bag_processing/rosbag2hdf5.bash b/build_system/bag_processing/rosbag2hdf5.bash new file mode 100755 index 0000000..8aa32b6 --- /dev/null +++ b/build_system/bag_processing/rosbag2hdf5.bash @@ -0,0 +1,57 @@ +#!/bin/bash +source ./build_system/preamble.bash + +# H5_PATH is the only file in the output_files array +if [ "$IS_TEST" -eq 1 ]; then + output_files=("$H5_PATH" "$STRIPPED_H5_PATH") +else + output_files=("$H5_PATH") +fi + +check_files output_files +check_free_space bag_multiple + +H5_PATH_TMP="$H5_PATH.tmp" + +# Run bag processing +echo -e "${BLUE}Converting bag to hdf5${NC}" +python3 build_system/bag_processing/rosbag2hdf5.py --bag "$BAG_PATH" \ + --h5fn "$H5_PATH_TMP" \ + --offset "$TIME_CORRECTIONS_PATH" \ + --camchain "$CALIB_CAMCHAIN_PATH" \ + --lidar_calib "$CALIB_LIDAR_PATH" \ + --imu_chain "$CALIB_IMU_PATH" +if [ $? -ne 0 ]; then + echo -e "${RED}Error converting bag to hdf5${NC}" + exit 1 +fi +echo -e "${GREEN}Done converting bag to hdf5${NC}" + +# Move the temporary file to the final location +chmod 666 "$H5_PATH_TMP" +mv "$H5_PATH_TMP" "$H5_PATH" + +# Get the HDF5 size in KB +BAG_FILE_SIZE_KB=$(du -k "$BAG_PATH" | cut -f1) +H5_FILE_SIZE=$(du -k "$H5_PATH" | cut -f1) + +echo -e "${YELLOW}Bag file size in KB: $BAG_FILE_SIZE_KB${NC}" +echo -e "${YELLOW}H5 file size in KB: $H5_FILE_SIZE${NC}" + +# Calculate and print the ratio between file sizes +echo -e "${GREEN}Ratio: ${BOLD}$(echo "scale=2; $H5_FILE_SIZE / $BAG_FILE_SIZE_KB" | bc)${NC}" + +# check if IS_TEST is 1, and strip the output bag if so +if [ "$IS_TEST" -eq 1 ]; then + echo -e "${BLUE}Stripping output bag${NC}" + STRIPPED_H5_PATH_TMP="$STRIPPED_H5_PATH.tmp" + python3 build_system/bag_processing/hdf52stripped.py \ + --source "$H5_PATH" --destination $"$STRIPPED_H5_PATH_TMP" + if [ $? -ne 0 ]; then + echo -e "${RED}Error converting bag to hdf5${NC}" + exit 1 + fi + echo -e "${GREEN}Done stripping bag${NC}" + chmod 666 "$STRIPPED_H5_PATH_TMP" + mv "$STRIPPED_H5_PATH_TMP" "$STRIPPED_H5_PATH" +fi diff --git a/build_system/bag_processing/rosbag2hdf5.py b/build_system/bag_processing/rosbag2hdf5.py new file mode 100644 index 0000000..9bc46d7 --- /dev/null +++ b/build_system/bag_processing/rosbag2hdf5.py @@ -0,0 +1,1058 @@ +import roslib +import rospy +import rosbag +from sensor_msgs.msg import CompressedImage, Image +import subprocess +from datetime import datetime + +import argparse +import os +from colorama import Fore, Style + +import yaml +import h5py +import numpy as np +import math +from cv_bridge import CvBridge, CvBridgeError + +bridge = CvBridge() +import pdb +from collections import defaultdict +from mpl_toolkits.mplot3d import Axes3D +import matplotlib.pyplot as plt +import sys + +from tqdm import tqdm + +from threading import Thread, Condition + +from ouster.client import ( + LidarPacket, + ImuPacket, + SensorInfo, + Scans, + Packets, + ChanField, + XYZLut, + _utils, +) +from event_array_py import Decoder + + +class HDFHandler: + UNITS_TO_SECONDS = 0 # FORCE DIVIDE BY ZERO IF NOT DEFINED + + TOPIC_TO_GROUP = { + "/ublox/hp_fix" : "/ublox", + "/ublox/navclock" : "/ublox", + "/ovc/gps_trigger" : "/ublox", + "/ovc/right/image_mono/compressed" : "/ovc/right", + "/ovc/left/image_mono/compressed" : "/ovc/left", + "/ovc/rgb/image_color/compressed" : "/ovc/rgb", + "/ovc/vectornav/imu" : "/ovc/imu", + "/prophesee/left/events" : "/prophesee/left", + "/prophesee/right/events" : "/prophesee/right", + "/os_node/lidar_packets" : "/ouster", + "/os_node/imu_packets" : "/ouster/imu", + } + + SINGLE_TIME_DOMAIN = True + + def __init__(self, h5file, topics, bag, time_calib): + if type(topics) == str: + topics = [topics] + self.group_names = [self.TOPIC_TO_GROUP[t] for t in topics] + self.groups = {t: h5file.require_group(g) for t,g in zip(topics, self.group_names)} + self.group = h5file[self.get_root(self.group_names)] + + self.topics = topics + + self.topic_infos = [] + self.topic_counts = [] + + for t in topics: + self.topic_infos.append(bag.get_type_and_topic_info(t)[1][t]) + self.topic_counts.append(self.topic_infos[-1].message_count) + + if len(topics) == 1: + self.topic_count = self.topic_counts[0] + + topic_list = list(time_calib.keys()) + topic_matches = [self.get_closest_topic(t, topic_list) for t in topics] + + if not all([topic_matches[0] == tm for tm in topic_matches]): + if self.SINGLE_TIME_DOMAIN: + raise NotImplementedError("Individual handlers must all be in the same time domain - if you are sure turn it off") + + self.global_time_calib = time_calib + self.time_calib = time_calib[topic_matches[0]] + + self.skew_us_per_s = self.time_calib['correction']['skew'] + self.us_offset = self.time_calib['correction']['offset'] + + self.ovc_pps_offset = int(time_calib['/ovc/pps_cb']['decimator']['round_offsets'][0] * 2500) + + self.bag_name = os.path.basename(bag.filename) + + def sensor_to_global_us(self, sensor_time_us): + # Returns the ideal world time from the given sensor time + + # First we compute the offset time given the sensor offset and ovc offset + offset_us = sensor_time_us - self.us_offset - self.ovc_pps_offset + + # We compute the ideal time, but this has some bad floating point properties + # Accurate enough to find number of elapsed seconds + new_time_us = offset_us / (1 + (self.skew_us_per_s/1e6)) + + # We use the ideal number of seconds to directly compute the skew + # that has occured in us + skew_offset_us = (new_time_us/1e6) * self.skew_us_per_s + + # We return the final time + return offset_us - skew_offset_us + + def get_root(self, topic_list): + token_list = [t.split("/") for t in topic_list] + root = token_list[0] + + for tk in token_list: + tmp = [] + for r,t in zip(root,tk): + if r == t: + tmp.append(r) + root = tmp + + return "/" + "/".join(root) + + def get_closest_topic(self, topic, topic_list): + # We do not have an exact match across the topics + t_tokens = topic.split("/") + max_sim = 0 + for tl in topic_list: + tl_tokens = tl.split("/") + c = 0 + for a,b in zip(t_tokens, tl_tokens): + if a==b: + c += 1 + if c > max_sim: + max_sim = c + t = tl + + return t + + def process(self, msg, topic=None, bag_time=None): + return + + def finish(self): + return + + def primary_time_ds(self): + raise NotImplementedError("Sub classes need to return this") + + +class GPS(HDFHandler): + SINGLE_TIME_DOMAIN = False + def __init__(self, h5file, bag, time_calib): + topics = [ + "/ublox/hp_fix", + "/ublox/navclock", + "/ovc/gps_trigger", + ] + + super().__init__(h5file, topics, bag, time_calib) + + num_msgs = self.topic_counts[0] + + self.ts_ds = self.group.create_dataset("ts", (num_msgs,), dtype='i8', compression='lzf', maxshape=(None,)) + self.navclock_ds = self.group.create_dataset("navclock", (num_msgs,), dtype='i8', compression='lzf', maxshape=(None,)) + self.utm_ds = self.group.create_dataset("utm", (num_msgs,3), dtype='f8', compression='lzf', maxshape=(None,3)) + self.fix_ds = self.group.create_dataset("fix", (num_msgs,3), dtype='f8', compression='lzf', maxshape=(None,3)) + + self.message_buffers = defaultdict(list) + + def ovc_sensor_to_global_us(self, sensor_time_us): + # Refer to sensor_to_global_us for comments + time_calib = self.global_time_calib['/ovc/pps_cb'] + skew_us_per_s = time_calib['correction']['skew'] + us_offset = time_calib['correction']['offset'] + + offset_us = sensor_time_us - us_offset - self.ovc_pps_offset + new_time_us = offset_us / (1 + (skew_us_per_s/1e6)) + skew_offset_us = (new_time_us/1e6) * skew_us_per_s + return offset_us - skew_offset_us + + def associate_messages(self): + return + + def process(self, msg, topic=None, bag_time=None): + self.message_buffers[topic].append( (int(bag_time.to_nsec()/1e3), msg) ) + + def finish(self): + hpfix_bag_time = np.array([v[0] for v in self.message_buffers['/ublox/hp_fix']]) + hpfix_ros_time = np.array([int(v[1].header.stamp.to_nsec()/1e3) for v in self.message_buffers['/ublox/hp_fix']]) + + navclock_bag_time = np.array([v[0] for v in self.message_buffers['/ublox/navclock']]) + navclock_time = np.array([v[1].iTOW for v in self.message_buffers['/ublox/navclock']]) # This is global time + navclock_second_mark = (navclock_time == ((navclock_time // 1000) * 1000)) + + ovc_gps_trigger_bag_time = np.array([v[0] for v in self.message_buffers['/ovc/gps_trigger']]) + ovc_gps_trigger_time = np.array([int(v[1].stamp.to_nsec()/1e3) for v in self.message_buffers['/ovc/gps_trigger']]) + ovc_gps_trigger_time = self.ovc_sensor_to_global_us( ovc_gps_trigger_time ) + + ovc_retime_mapping = np.array(self.ovc.retime_history['/ovc/left/image_mono/compressed']) + pdb.set_trace() + return + + def primary_time_ds(self): + return self.ts_ds + + def attach_ovc(self, ovc): + self.ovc = ovc + +class Ouster(HDFHandler): + UNITS_TO_SECONDS = 1e9 + + def __init__(self, h5file, topic, bag, time_calib): + super().__init__(h5file, topic, bag, time_calib) + + self.lidar_packets = [] + self.id_init = None + ouster_info = self.get_ouster_info(bag) + self.ouster_info = SensorInfo(ouster_info) + + scan_count_est = math.floor(self.topic_counts[0] / 128) + + self.lidar_buf_ds = self.group.create_dataset('data', + (scan_count_est,128,12609), + compression='lzf', dtype='u1', + chunks=(1,128,12609), + maxshape=(None,128,12609)) + self.group.create_dataset("metadata", data=ouster_info) + self.lidar_start_ts_ds = self.group.create_dataset("ts_start", (scan_count_est,), dtype='i8', compression='lzf', maxshape=(None,)) + self.lidar_end_ts_ds = self.group.create_dataset("ts_end", (scan_count_est,), dtype='i8', compression='lzf', maxshape=(None,)) + + self.imu_group = self.groups[ topic[-1] ] + + self.imu_ds = {} + + imu_count = scan_count_est * 20 + + self.imu_ds['av'] = self.imu_group.create_dataset( + "omega", [imu_count, 3], dtype="f8", maxshape=(None,3) + ) + self.imu_ds['la'] = self.imu_group.create_dataset( + "accel", [imu_count, 3], dtype="f8", maxshape=(None,3) + ) + self.imu_ds['ts'] = self.imu_group.create_dataset( + "ts", [imu_count], dtype="i8", compression="lzf", maxshape=(None,) + ) + + self.imu_idx = 0 + + def primary_time_ds(self): + return self.lidar_start_ts_ds + + def get_ouster_info(self, bag, topic="/os_node/metadata"): + for topic, msg, t in bag.read_messages(topics=[topic]): + ouster_metadata = msg.data + return ouster_metadata + + def process(self, msg, topic=None, bag_time=None): + if 'lidar' in topic: + ouster_lp = LidarPacket(msg.buf, self.ouster_info) + self.lidar_packets.append( + (ouster_lp.measurement_id, ouster_lp.frame_id, msg.buf, ouster_lp.timestamp) + ) + + fp = self.lidar_packets[0] # first + cp = self.lidar_packets[-1] # current + + have_full_scan = (0 in fp[0]) and (2047 in cp[0]) + have_half_scan = (not 0 in fp[0]) and (2047 in cp[0]) + + if have_half_scan: + self.lidar_packets = [] + + if have_full_scan: + lidar_sweep_start_ts = self.sensor_to_global_us( self.lidar_packets[0][3][0] / 1e3 ) + lidar_sweep_end_ts = self.sensor_to_global_us( self.lidar_packets[-1][3][-1] / 1e3 ) + + # If the lidar sweep started before the start of all sensors, throw away + if lidar_sweep_start_ts < 0: + self.lidar_packets = [] + return + + # Only initialize the ID once we have started all sensors + if self.id_init is None: + self.id_init = cp[1] + self.cur_frame_id = cp[1] - self.id_init ## INDEX + + frame_buf = np.empty((128, 12609), dtype=np.uint8) + + for idx, lp in enumerate(self.lidar_packets): + frame_buf[idx, :] = np.frombuffer(lp[2], dtype=np.uint8, count=12609) + + self.lidar_buf_ds[self.cur_frame_id, ...] = frame_buf + + # The start of the sweep is at the first packet's first timestamp + self.lidar_start_ts_ds[self.cur_frame_id] = lidar_sweep_start_ts + # The end of the sweep is at the last packet's last timestamp + self.lidar_end_ts_ds[self.cur_frame_id] = lidar_sweep_end_ts + + self.lidar_packets = [] ## RESET + elif 'imu' in topic: + ouster_ip = ImuPacket(msg.buf, self.ouster_info) + + self.imu_ds['av'][self.imu_idx] = np.deg2rad(ouster_ip.angular_vel) + self.imu_ds['la'][self.imu_idx] = 9.81 * ouster_ip.accel + self.imu_ds['ts'][self.imu_idx] = self.sensor_to_global_us( ouster_ip.sys_ts / 1e3 ) + + self.imu_idx += 1 + + def finish(self): + if self.id_init is None: + return + + self.lidar_buf_ds.resize( (self.cur_frame_id+1,128,12609) ) + self.lidar_start_ts_ds.resize( (self.cur_frame_id+1,) ) + self.lidar_end_ts_ds.resize( (self.cur_frame_id+1,) ) + + self.imu_ds['av'].resize( (self.imu_idx+1,3) ) + self.imu_ds['la'].resize( (self.imu_idx+1,3) ) + self.imu_ds['ts'].resize( (self.imu_idx+1,) ) + + +class OVC(HDFHandler): + + def __init__(self, h5file, bag, time_calib): + ovc_topics = ["/ovc/right/image_mono/compressed", + "/ovc/left/image_mono/compressed", + "/ovc/rgb/image_color/compressed", + "/ovc/vectornav/imu"] + self.image_topics = ovc_topics[:-1] + self.imu_topic = ovc_topics[-1] + + super().__init__(h5file, ovc_topics, bag, time_calib) + + self.ds = {} + + self.image_sizes = {} + + for topic, topic_count in zip(ovc_topics[:-1], self.topic_counts): + image_size = self.get_image_size(topic, bag) + self.image_sizes[topic] = image_size + + self.ds[topic] = { + "shape": tuple([topic_count] + image_size), + "chunk_shape": tuple([1] + image_size), + "max_shape": tuple([None] + image_size), + } + + self.ds[topic]['ds'] = self.groups[topic].create_dataset( + "data", + self.ds[topic]['shape'], + compression="lzf", + dtype="u1", + chunks=self.ds[topic]['chunk_shape'], + maxshape=self.ds[topic]['max_shape'], + ) + self.ds['ts'] = self.group.create_dataset( + "ts", [topic_count], dtype="i8", compression="lzf", maxshape=(None,) + ) + + self.imu_group = self.groups[ ovc_topics[-1] ] + + self.imu_ds = {} + + self.imu_ds['av'] = self.imu_group.create_dataset( + "omega", [topic_count*16, 3], dtype="f8", maxshape=(None,3) + ) + self.imu_ds['la'] = self.imu_group.create_dataset( + "accel", [topic_count*16, 3], dtype="f8", maxshape=(None,3) + ) + self.imu_ds['ts'] = self.imu_group.create_dataset( + "ts", [topic_count*16], dtype="i8", compression="lzf", maxshape=(None,) + ) + + self.drift = 0.0 + self.retime_history = defaultdict(list) + + self.buffers = defaultdict(list) + + self.imager_idx = 0 + self.imu_idx = 0 + + self.init_id = None + self.rel_id = None + + self.has_started = False + + self.imu_problems = [] + + def get_image_type(self, topic): + return topic.split('/')[2] + + def get_image_size(self, topic, bag): + for topic, msg, t in bag.read_messages(topics=[topic]): + return list(self.get_image_from_msg(msg).shape) + + def sync_buffers(self, get_popped=False): + times = [buf[0][0] for buf in self.buffers.values() if len(buf)>0] + max_time = max(times) + + ret_val = True + popped = defaultdict(list) + + for topic, buf in self.buffers.items(): + while len(buf) > 1 and buf[0][0] < max_time: + popped[topic].append(buf.pop(0)) + if len(buf) == 1: + ret_val = (buf[0][0] == max_time) + + if get_popped: + return ret_val, popped + else: + return ret_val + + def wait_for_start(self): + ret_val = True + for topic, buf in self.buffers.items(): + while len(buf) > 0 and buf[0][0] < -250: + buf.pop(0) + ret_val = False + if len(buf) == 0: + ret_val = False + return ret_val + + def retime(self, seq_id, topic, original_time=None, use_original_time=False): + rel_id = (seq_id - self.init_id[topic] + self.rel_id[topic]) + + if use_original_time: + return original_time, rel_id + + if "imu" in topic: + new_time = rel_id * 2500 + self.retime_history[topic].append((original_time, new_time)) + return new_time, rel_id + elif "image" in topic: + new_time = rel_id * 40000 + self.retime_history[topic].append((original_time, new_time)) + self.drift = 0.5 * (new_time - original_time) + 0.5 * self.drift + if not ( 40000 * round(original_time / 40000) == new_time ): + assert self.bag_name in ["car_urban_night_penno_small_loop.bag", + "falcon_outdoor_day_penno_parking_3.bag"] + percent_error = ((new_time - original_time) - self.drift) / self.drift + if percent_error > 0.01: + raise NotImplementedError("High change in drift is invalid") + return new_time, rel_id + else: + raise NotImplementedError("Should not be possible to arrive here") + + def get_buf_times(self, idx, asdict=True): + if asdict: + return {k:buf[idx][0] for k,buf in self.buffers.items()} + else: + return [buf[idx][0] for buf in self.buffers.values()] + + + def get_and_sync(self): + # The data should be synced!! + times = [buf[0][0] for buf in self.buffers.values()] + + readings = {k:self.buffers[k].pop(0) for k in self.image_topics} + next_img_time = self.buffers[self.image_topics[0]][0][0] + readings[self.imu_topic] = [] + while self.buffers[self.imu_topic][0][0] < next_img_time: + readings[self.imu_topic].append( self.buffers[self.imu_topic].pop(0) ) + + return readings + + def process_buffer(self): + if len(self.buffers[self.imu_topic]) < 32: + return + + if any([len(b)<=2 for b in self.buffers.values()]): + return + + cur_data = self.get_and_sync() + + for img_topic in self.image_topics: + buf_info = cur_data[img_topic] + retime, rel_id = self.retime( buf_info[1], img_topic, buf_info[0] ) + self.ds[img_topic]['ds'][self.imager_idx] = buf_info[2] + self.ds['ts'][self.imager_idx] = retime + + assert self.imager_idx == (rel_id - self.rel_id[img_topic]) + + self.imager_idx += 1 + + # Pull into local read buffers + imu_local_read_bufs = {k: np.stack([buf[2][k] for buf in cur_data[self.imu_topic]], axis=0) for k in cur_data[self.imu_topic][0][2].keys()} + + # Create local write buffers + imu_local_write_bufs = {k: np.nan*np.zeros((16,v.shape[0])) for k,v in cur_data[self.imu_topic][0][2].items()} + imu_local_write_bufs['ts'] = np.nan*np.zeros((16,)) + + # Get all initial timing information + imu_ts = np.array([cd[0] for cd in cur_data[self.imu_topic]]) + imu_seq_id = np.array([cd[1] for cd in cur_data[self.imu_topic]]) + imu_retimed = np.array( [self.retime(seq_id, self.imu_topic, ts) for seq_id, ts in zip(imu_seq_id, imu_ts)] ) + imu_local_idx = ((imu_ts - imu_ts[0]) / 2500).round().astype(int) + + seq_vals, seq_ind, seq_count = np.unique(imu_local_idx, return_index=True, return_counts=True) + max_repeats = np.max(seq_count) + + # This is the aggregate change in the relative ID + self.rel_id[self.imu_topic] += 16 - imu_ts.shape[0] + + if imu_ts.shape[0] == 16: # 16 values map one way + for k in imu_local_read_bufs.keys(): + imu_local_write_bufs[k] = imu_local_read_bufs[k] + imu_local_write_bufs['ts'] = imu_retimed[:,0] + elif max_repeats == 1: # all unique - mapping can be trusted + idx_diff = np.diff(imu_local_idx) + for idx in np.where(idx_diff == np.max(idx_diff))[0]: + imu_retimed[(idx+1):,:] += (idx_diff[idx]-1) * np.array([2500,1]) + + for k in imu_local_read_bufs.keys(): + imu_local_write_bufs[k][imu_local_idx] = imu_local_read_bufs[k] + imu_local_write_bufs['ts'][imu_local_idx] = imu_retimed[:,0] + elif max_repeats == 2: # Repeats of 2 are from immediately before or immediately after + idx_diff = np.diff(imu_local_idx) + + for ind in np.where(seq_count==2)[0]: + ind_a = ind + ind_b = ind + 1 # np.unique returns the first - so the second should be immediately after + + if imu_local_idx[ind_a] != imu_local_idx[ind_b] and imu_local_idx[ind_b] == imu_local_idx[ind_b+1]: + imu_local_idx[ind_b] -= 1 + elif ind_b+1 >= imu_local_idx.shape[0]: # at the end + if imu_local_idx[ind_b] < 15: + imu_local_idx[ind_b] += 1 + elif imu_local_idx[ind_a-1]+1 == imu_local_idx[ind_a] and imu_local_idx[ind_b] != imu_local_idx[ind_b+1] - 1: + imu_local_idx[ind_b] += 1 + elif imu_local_idx[ind_a-1]+1 != imu_local_idx[ind_a] and imu_local_idx[ind_b] == imu_local_idx[ind_b+1] - 1: + imu_local_idx[ind_a] -= 1 + elif imu_local_idx[ind_a-1]+1 != imu_local_idx[ind_a] and imu_local_idx[ind_b] != imu_local_idx[ind_b+1] - 1: + imu_local_idx[ind_a] -= 1 + elif np.any(idx_diff > 1): + idx_errors = np.where(idx_diff > 1)[0] + idx_problem = np.argmin( np.abs( idx_errors - ind_a ) ) + closest_idx = idx_errors[idx_problem] + + if closest_idx < ind_a: + imu_local_idx[(closest_idx+1):ind_b] -= 1 + elif closest_idx > ind_b: + imu_local_idx[ind_a:closest_idx] += 1 + else: + print(imu_local_idx) + raise NotImplementedError("Imu Local Index Pattern Not Supported") + + idx_diff = np.diff(imu_local_idx) + + if np.any(idx_diff < 0): + print("==============") + print(imu_local_idx) + print(imu_ts) + print(imu_retimed.T) + print("==============") + raise NotImplementedError("IDX must be increasing") + + for idx in np.where(idx_diff == np.max(idx_diff))[0]: + imu_retimed[(idx+1):,:] += (idx_diff[idx]-1) * np.array([2500,1]) + + for k in imu_local_read_bufs.keys(): + imu_local_write_bufs[k][imu_local_idx] = imu_local_read_bufs[k] + imu_local_write_bufs['ts'][imu_local_idx] = imu_retimed[:,0] + else: + print("==============") + print(imu_local_idx) + print(imu_ts) + print(imu_retimed.T) + print("==============") + raise NotImplementedError("MISSING TOO MANY IMU READINGS!!!") + + for k,v in imu_local_write_bufs.items(): + # Skip q, as it is zero or non reliable + if k == 'q': + continue + self.imu_ds[k][self.imu_idx:self.imu_idx+16] = v + + self.imu_idx += 16 + + assert (self.imu_idx == self.imager_idx * 16) + assert ( self.imu_ds['ts'][self.imu_idx-16] == self.ds['ts'][self.imager_idx-1] ) + + def process(self, msg, topic=None, bag_time=None): + time = self.sensor_to_global_us(msg.header.stamp.to_nsec() / 1e3) + seq_id = msg.header.seq + + assert len(self.buffers[topic]) == 0 or seq_id-1 == self.buffers[topic][-1][1] + + if 'image' in topic: + self.buffers[topic].append( (time, seq_id, self.get_image_from_msg(msg)) ) + elif 'imu' in topic: + self.buffers[topic].append( (time, seq_id, self.get_imu_from_msg(msg)) ) + + if len(self.buffers) == len(self.topics): + if not self.has_started: + if self.sync_buffers(): + if self.wait_for_start(): + self.init_id = {k:buf[0][1] for k,buf in self.buffers.items()} + self.rel_id = {} + for k,buf in self.buffers.items(): + if 'image' in k: + self.rel_id[k] = round(buf[0][0] / 40000) + elif 'imu' in k: + self.rel_id[k] = round(buf[0][0] / 2500) + else: + raise NotImplementedError("Nothing but images and imus") + + self.has_started = True + else: + self.process_buffer() + + def finish(self): + for k, ds in self.ds.items(): + if 'ts' in k: + ds.resize((self.imager_idx,)) + continue + + final_ds_shape = tuple([self.imager_idx] + list(ds['shape'])[1:]) + ds['ds'].resize(final_ds_shape) + + self.imu_ds['av'].resize((self.imu_idx,3)) + self.imu_ds['la'].resize((self.imu_idx,3)) + self.imu_ds['ts'].resize((self.imu_idx,)) + + def primary_time_ds(self): + return self.ds['ts'] + + def get_image_from_msg(self, msg): + img = bridge.compressed_imgmsg_to_cv2(msg) + if len(img.shape) == 2: + img = img[:, :, None] + return img + + def get_imu_from_msg(self, msg): + return { + 'q': np.array( + [msg.orientation.x, msg.orientation.y, msg.orientation.z, msg.orientation.w] + ), + 'av': np.array( + [msg.angular_velocity.x, msg.angular_velocity.y, msg.angular_velocity.z] + ), + 'la': np.array( + [msg.linear_acceleration.x,msg.linear_acceleration.y,msg.linear_acceleration.z] + ), + } + + +class Event(HDFHandler): + UNITS_TO_SECONDS = 1e6 + + def __init__(self, h5file, topic, bag, time_calib): + super().__init__(h5file, topic, bag, time_calib) + + event_count = time_calib[topic]['nevents'] # self.get_event_count(bag, topic) + self.chunk_size = 40000 + + self.x_ds = self.group.create_dataset( + "x", + (event_count,), + compression="lzf", + dtype="u2", + chunks=(self.chunk_size,), + maxshape=(None,), + ) + self.y_ds = self.group.create_dataset( + "y", + (event_count,), + compression="lzf", + dtype="u2", + chunks=(self.chunk_size,), + maxshape=(None,), + ) + self.t_ds = self.group.create_dataset( + "t", + (event_count,), + compression="lzf", + dtype="i8", + chunks=(self.chunk_size,), + maxshape=(None,), + ) + self.p_ds = self.group.create_dataset( + "p", + (event_count,), + compression="lzf", + dtype="i1", + chunks=(self.chunk_size,), + maxshape=(None,), + ) + + self.x_buf = [] # + self.y_buf = [] # + self.t_buf = [] # + self.p_buf = [] # + self.count_buf = 0 + + self.start = 0 + self.prev_msg = None + + self.has_index_map = False + + self.decoder = Decoder() + + def get_event_count(self, bag, topic): + ## TODO - get count from verify or timing script + count = 0 + d = Decoder() + for topic, msg, t in bag.read_messages(topics=[topic]): + d.decode_bytes( + msg.encoding, msg.width, msg.height, msg.time_base, msg.events + ) + cd_events = d.get_cd_events() + count += cd_events.shape[0] + return count + + def add_events(self, x, y, t, p): + msg_shape = x.shape[0] + + if msg_shape == 0: + return + + self.x_buf.append(x) + self.y_buf.append(y) + self.t_buf.append(t) + self.p_buf.append(p) + self.count_buf += msg_shape + + def flush_buffer(self, ignore_chunk=False): + x = np.concatenate(self.x_buf) + y = np.concatenate(self.y_buf) + t = np.concatenate(self.t_buf) + p = np.concatenate(self.p_buf) + + if not ignore_chunk: + added_idx = self.chunk_size * (x.shape[0] // self.chunk_size) + else: + added_idx = x.shape[0] + + self.end_idx = self.start + added_idx + + self.x_ds[self.start : self.end_idx] = x[:added_idx] + self.y_ds[self.start : self.end_idx] = y[:added_idx] + self.t_ds[self.start : self.end_idx] = t[:added_idx] + self.p_ds[self.start : self.end_idx] = p[:added_idx] + + self.start = self.start + added_idx + + if not ignore_chunk: + self.x_buf = [x[added_idx:]] + self.y_buf = [y[added_idx:]] + self.t_buf = [t[added_idx:]] + self.p_buf = [p[added_idx:]] + self.count_buf = self.count_buf - added_idx + else: + self.x_buf = [] + self.y_buf = [] + self.t_buf = [] + self.p_buf = [] + self.count_buf = 0 + + def process(self, msg, topic=None, bag_time=None): + self.decoder.decode_bytes( + msg.encoding, msg.width, msg.height, msg.time_base, msg.events + ) + cd_events = self.decoder.get_cd_events() + trig_events = self.decoder.get_ext_trig_events() + + t = self.sensor_to_global_us( cd_events['t'] ).astype(int) + has_started = t >= 0 + + x = cd_events['x'][has_started] + y = cd_events['y'][has_started] + t = t[has_started] + p = cd_events['p'][has_started] + + self.add_events(x, y, t, p) + + if self.count_buf > self.chunk_size: + self.flush_buffer() + + def finish(self): + self.flush_buffer(ignore_chunk=True) + self.x_ds.resize( (self.end_idx,) ) + self.y_ds.resize( (self.end_idx,) ) + self.t_ds.resize( (self.end_idx,) ) + self.p_ds.resize( (self.end_idx,) ) + + self.compute_ms_index_map() + + def primary_time_ds(self): + raise NotImplementedError("This would lead to something very expensive being computed") + + def index_map(self, index_map_ds, time_ds): + index_map_ds_cl = 0 + + remaining_times = time_ds[...].copy() + cur_loc = 0 + chunk_size = 10000000 + num_events = self.t_ds.shape[0] + + while remaining_times.shape[0] > 0 and cur_loc < num_events: + end = min( num_events, cur_loc+chunk_size ) + idx = cur_loc + np.searchsorted(self.t_ds[cur_loc:end], remaining_times) + + idx_remaining = (idx == end) + idx_found = (idx < end) + + index_map_ds[index_map_ds_cl:index_map_ds_cl+idx_found.sum()] = idx[idx_found] + + remaining_times = remaining_times[idx_remaining] + cur_loc = cur_loc + chunk_size + index_map_ds_cl += idx_found.sum() + + def compute_ms_index_map(self): + time_ds = np.arange(0, int(np.floor(self.t_ds[-1] / 1e3)) ) * 1e3 + index_map_ds = self.group.create_dataset("ms_map_idx", time_ds.shape, dtype="u8") + + self.index_map( index_map_ds, time_ds ) + + def compute_index_map(self, sensor_processor): + time_ds = sensor_processor.primary_time_ds() + index_map_name = time_ds.name.split("/")[-1] + "_map" + self.t_ds.name.replace("/", "_") + index_map_ds = sensor_processor.group.create_dataset(index_map_name, time_ds.shape, dtype="u8") + + self.index_map( index_map_ds, time_ds ) + + +def compute_temporal_index_maps(event_processor, processors): + print("Starting index map computation") + processors = [p for p in processors if type(p) not in [Event]] + + for p in processors: + event_processor.compute_index_map( p ) + + +def plot_coordinate_frames(transformation_matrices): + """ plot the coordinate frames for verification of the transformations """ + def set_axes_equal(ax): + extents = np.array([getattr(ax, f"get_{dim}lim")() for dim in "xyz"]) + centers = np.mean(extents, axis=1) + radius = 0.5 * np.max(np.abs(extents[:, 1] - extents[:, 0])) + for center, dim in zip(centers, "xyz"): + getattr(ax, f"set_{dim}lim")(center - radius, center + radius) + + fig = plt.figure() + ax = fig.add_subplot(111, projection="3d") + + for T in transformation_matrices: + R = T[:3, :3] + t = T[:3, 3] + x_axis = R[:, 0] + y_axis = R[:, 1] + z_axis = R[:, 2] + + ax.quiver(t[0], t[1], t[2], x_axis[0], x_axis[1], x_axis[2], + color="r", length=0.1) + ax.quiver(t[0], t[1], t[2], y_axis[0], y_axis[1], y_axis[2], + color="g", length=0.1) + ax.quiver(t[0], t[1], t[2], z_axis[0], z_axis[1], z_axis[2], + color="b", length=0.1) + + ax.set_xlabel("X") + ax.set_ylabel("Y") + ax.set_zlabel("Z") + set_axes_equal(ax) + plt.show() + + +def insert_calibration(h5_file, camchain, lidar_calib, imu_chain): + # Check that camchain is not none and it exists, and read it + if camchain is None: + raise ValueError("camchain is None") + if not os.path.exists(camchain): + raise ValueError("camchain file does not exist") + + # Get the IMU to ovc left transformation + with open(imu_chain, "r") as yaml_file: + imuchain_data = yaml.safe_load(yaml_file) + assert "/ovc/left" in imuchain_data["cam0"]["rostopic"] + ol_T_imu = np.array(imuchain_data["cam0"]["T_cam_imu"], dtype=np.float64) + + # Read the YAML file with the camera calibrations + with open(camchain, "r") as yaml_file: + camchain_data = yaml.safe_load(yaml_file) + + # Open the npz file with the LiDAR calibrations + if lidar_calib is None: + raise ValueError("lidar_calib is None") + with np.load(lidar_calib) as lidar_data: + T_c_l = lidar_data["T_c_l"] + + # Hold all the transformation for viz purposes + transformation_matrices = [] + + # Write the data to an HDF5 file + with h5py.File(h5_file, "a") as hdf_file: + + # Write camera calibration data + for cam_key, cam_data in camchain_data.items(): + rostopic = cam_data.pop("rostopic") + + if not 'T_cn_cnm1' in cam_data.keys(): + # This is the reference camera + assert "prophesee" in rostopic and "left" in rostopic + T_to_prophesee_left = np.array([[1, 0, 0, 0],[0, 1, 0, 0], + [0, 0, 1, 0], [0, 0, 0, 1]]) + else: + T_to_prev = np.array(cam_data.pop("T_cn_cnm1")) + T_to_prophesee_left = T_to_prophesee_left @ T_to_prev + + if "prophesee" in rostopic: + camera_side = rostopic.split("/")[-2] + cam_group = hdf_file["prophesee"][camera_side] + elif "ovc" in rostopic: + camera_side = rostopic.split("/")[-3] + cam_group = hdf_file["ovc"][camera_side] + + calib_group = cam_group.create_group("calib") + + for data_key, data_value in cam_data.items(): + if data_key == "cam_overlaps": + continue + if data_key == "T_cn_cnm1": + continue + if isinstance(data_value, list): + data_value = np.array(data_value) + calib_group[data_key] = data_value + calib_group["T_to_prophesee_left"] = np.linalg.inv(T_to_prophesee_left) + transformation_matrices.append(calib_group["T_to_prophesee_left"][:]) + if "ovc" in rostopic and "left" in rostopic: + pl_T_ol = calib_group["T_to_prophesee_left"][:] + + # Compute the transofrmation from IMU to prophesee_left + pl_T_imu = pl_T_ol @ ol_T_imu + transformation_matrices.append(pl_T_imu) + imu_group = hdf_file["/ovc/imu"] + imu_calib_group = imu_group.create_group("calib") + imu_calib_group["T_to_prophesee_left"] = pl_T_imu + + # Compute the transformation from LiDAR to T_to_prophesee_left + pl_T_lid = pl_T_ol @ T_c_l + # print(pl_T_lid @ np.array([[0, 0, 0, 1]]).T*1000) + transformation_matrices.append(pl_T_lid) + lidar_group = hdf_file["ouster"] + lidar_calib_group = lidar_group.create_group("calib") + lidar_calib_group["T_to_prophesee_left"] = pl_T_lid + + # plot_coordinate_frames(transformation_matrices) + + +def process_bag(filename, h5fn=None, offsetfn=None, + camchainfn=None, lidar_calib=None, + imuchainfn=None, + verbose=True, disable_events=False): + # Get time offset info + if offsetfn is None: + offsetfn = os.path.splitext(args.bag)[0] + "_time_calib.yml" + + print(Fore.GREEN, "Time offset: ", offsetfn, Style.RESET_ALL) + with open(offsetfn, 'r') as f: + time_calib = yaml.safe_load(f) + + # print camchain and lidar_calib files + if camchainfn: + print(Fore.GREEN, "Camchain: ", camchainfn, Style.RESET_ALL) + + if imuchainfn: + print(Fore.GREEN, "Imu chain: ", imuchainfn, Style.RESET_ALL) + + if lidar_calib: + print(Fore.GREEN, "Lidar calib: ", lidar_calib, Style.RESET_ALL) + + # Open bag + bag = rosbag.Bag(filename) + if h5fn is None: + h5fn = os.path.splitext(args.bag)[0] + ".h5" + + print(Fore.GREEN, "Output file: ", h5fn, Style.RESET_ALL) + # Print only the bag without the path + bag_name = bag.filename.split('/')[-1] + + h5file = h5py.File(h5fn, "w") + + # Add metadata to h5 file + version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() + h5file.attrs["version"] = version + h5file.attrs["raw_bag_name"] = bag_name + h5file.attrs["creation_date"] = str(datetime.now()) + + processors = [ + Ouster(h5file, ["/os_node/lidar_packets","/os_node/imu_packets"], bag, time_calib), + OVC(h5file, bag, time_calib), + # GPS(h5file, bag, time_calib), + ] + + # processors[-1].attach_ovc(processors[-2]) + + if not disable_events and not ("calib" in bag_name and "lidar" in bag_name): + processors.append(Event(h5file, "/prophesee/right/events", bag, time_calib)) + processors.append(Event(h5file, "/prophesee/left/events", bag, time_calib)) + HAS_EVENTS = True + else: + print(Fore.YELLOW, "Events are not written for lidar_calib bags", + Style.RESET_ALL) + HAS_EVENTS = False + + # Get list of topics that will be processed + topic_to_processor = {} + for p in processors: + for t in p.topics: + topic_to_processor[t] = p + topics = list(topic_to_processor.keys()) + + print(Fore.GREEN, "Start processing...", Style.RESET_ALL) + for topic, msg, t in tqdm(bag.read_messages(topics=topics), + disable=not verbose,): + topic_to_processor[topic].process(msg, topic, t) + + for p in processors: + p.finish() + + if HAS_EVENTS: + print(Fore.GREEN, "Computing index maps for lidar and imagers") + compute_temporal_index_maps(processors[-1], processors) + compute_temporal_index_maps(processors[-2], processors) + + # Insert the calibration into the bags + if "calib" not in bag_name and args.camchain is not None: + insert_calibration(h5fn, camchainfn, lidar_calib, imuchainfn) + + # Set permissions + os.chmod(h5fn, 0o666) + print(Fore.GREEN, "Output file: ", h5fn, Style.RESET_ALL) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--bag", help="ROS bag name") + parser.add_argument("--h5fn", default=None, help="HDF5 file name") + parser.add_argument("--offset", default=None, help="Time offset File") + parser.add_argument("--camchain", default=None, help="Camchain file") + parser.add_argument("--lidar_calib", default=None, + help="LiDAR Calibration File") + parser.add_argument("--imu_chain", default=None, + help="IMU chain file") + parser.add_argument("--verbose", action="store_true", + help="Verbose output") + parser.add_argument("--disable_events", action="store_true", + help="Disable events. Useful for debugging.") + args = parser.parse_args() + + print(Fore.GREEN, "Processing bag: ", args.bag, Style.RESET_ALL) + + process_bag(args.bag, args.h5fn, args.offset, + args.camchain, args.lidar_calib, args.imu_chain, + args.verbose, args.disable_events) diff --git a/build_system/bag_processing/rosbag2timecorrection.bash b/build_system/bag_processing/rosbag2timecorrection.bash new file mode 100755 index 0000000..5253406 --- /dev/null +++ b/build_system/bag_processing/rosbag2timecorrection.bash @@ -0,0 +1,21 @@ +#!/bin/bash +source ./build_system/preamble.bash + +# TIME_CORRECTIONS_PATH is the only output file in the array +output_files=("$TIME_CORRECTIONS_PATH") +check_files output_files + + +# Run bag processing +echo -e "${BLUE}Extracting time corrections from $BAG_NAME${NC}" +python3 build_system/bag_processing/rosbag2timecorrection.py --bag "$BAG_PATH" --time_fn "$TIME_CORRECTIONS_PATH.tmp" +if [ $? -ne 0 ]; then + echo -e "${RED}Error extracting time corrections${NC}" + exit 1 +fi +echo -e "${GREEN}Time corrections extracted${NC}" + +# Move output file to correct location +mv "$TIME_CORRECTIONS_PATH.tmp" "$TIME_CORRECTIONS_PATH" + +echo -e "${YELLOW}Bag file $BAG_NAME time corrections saved to $TIME_CORRECTIONS_PATH${NC}" diff --git a/build_system/bag_processing/rosbag2timecorrection.py b/build_system/bag_processing/rosbag2timecorrection.py new file mode 100644 index 0000000..e8f35d5 --- /dev/null +++ b/build_system/bag_processing/rosbag2timecorrection.py @@ -0,0 +1,429 @@ +import roslib +import rospy +import rosbag +from sensor_msgs.msg import CompressedImage, Image + +import argparse +import os + +from pprint import pprint +import numpy as np +from cv_bridge import CvBridge, CvBridgeError +bridge = CvBridge() +import pdb +from collections import defaultdict + +from tqdm import tqdm + +from scipy.stats import linregress +from scipy.spatial.distance import cdist + +from event_array_py import Decoder + +import pandas as pd + +def event_frame(cd_events, width=1280, height=720): + frame = np.zeros((height, width, 3)) + frame[cd_events['y'].astype(int), cd_events['x'].astype(int)] = 1 + return frame + +def compute_skew_offset(fine_timing, verbose=True): + original_timing = fine_timing.copy() + + # Fix the start of the sequence + start_offset = original_timing.min() + offset_timing = original_timing - start_offset + + # Calculate an average skew + num_samples = offset_timing.shape[0] + # Dividing by a small integer is beneficial here + # So we divide by the index and subtract out a full second to know our potentail skews + skew_candidates = offset_timing.divide(offset_timing['idx'], axis='rows').tail(-1) - 1000000 + + skew_proposals = skew_candidates + skew = {} + + for k in skew_candidates.columns: + sensor_skew_proposals = skew_proposals[k] + inliers = (sensor_skew_proposals - sensor_skew_proposals.mean()).abs() <= (sensor_skew_proposals.std()) + 1 + skew[k] = [sensor_skew_proposals[inliers].mean()] + + skew = pd.DataFrame(data=skew) + + additive_skew = skew.loc[np.repeat(skew.index, num_samples)].reset_index(drop=True) + additive_skew = additive_skew.multiply(offset_timing['idx'], axis='rows').astype(int) + + skewed_timing = offset_timing - additive_skew + + fine_offsets = {} + + for k in skew_candidates.columns: + fine_offsets[k] = {"sensor": { + "offset": int(start_offset[k]), + "skew": float(skew[k]), + "info": "skew is us per sec - offset is in us", + }} + del fine_offsets['idx'] + + return fine_offsets + +def apply_static_per_topic(fine_timing): + # Fix the start + static_offset = fine_timing.copy() + return static_offset - static_offset.min() + +def get_sensor_correlated(timing_info, verbose=True): + matched_to = [t['correlation']['matched_to'] for t in timing_info.values()] + if not all([mt == matched_to[0] for mt in matched_to]): + raise ValueError("All must be matched to the same topic") + primary_topic = matched_to[0] + + num_samples = timing_info[primary_topic]['sensor'].shape[0] + num_sensors = len(timing_info)+2 # add one for IDEAL reference and one for IDX + + fine_time_info = defaultdict(list) + all_msgs = defaultdict(list) + + for i in range(num_samples): + # We track idx in case of lost + # We track an ideal pps for a reference + sample_set = {"idx": i, "ideal": i*1000000} + msgs = {} + + for topic, info in timing_info.items(): + # Grab mapping that is indexed by sample and returns a primary index (i.e. p_idx = matching_inds[s_idx]) + topic_idx = info['correlation']['matching_inds'][i] + # Get the locations of the samples that correlate with the primary + is_inlier = info['correlation']['inliers'][i] + if not is_inlier: + continue + sample_set[topic] = info['sensor'][topic_idx] + if verbose and 'msg' in info: + msgs[topic] = info['msg'][int(topic_idx)] + + + # If we don't have all the sensors, skip + if len(sample_set) == num_sensors: + for k,v in sample_set.items(): + fine_time_info[k].append(v) + for k,v in msgs.items(): + all_msgs[k].append(v) + + fine_timing = pd.DataFrame(data=fine_time_info) + + def plot_idx(idx, surrounding=3): + import matplotlib.pyplot as plt + fig, axes = plt.subplots(2,surrounding*2+1,sharex=True,sharey=True) + + for i in range(-surrounding,surrounding+1): + axes[0,i].imshow( all_msgs['/ovc/pps_cb'][i+idx] ) + axes[1,i].imshow( all_msgs['/prophesee/left/events'][i+idx] ) + + return fine_timing + +def match_rough_timings(time_a, time_b): + timing_distances = cdist( time_a.reshape(-1,1), time_b.reshape(-1,1) ) + matching_inds = np.argmin( timing_distances, axis=0 ).squeeze() + return time_a[matching_inds], time_b, matching_inds + +def get_rough_time_correlations(timing_info, primary_topic, verbose=True): + # Find offset from primary topic in ROS time + time_calibs = {} + + # We define the primary topic as the first topic in the list given + # in this case it will be ovc/pps + topics = list(timing_info.keys()) + for topic in topics: + A, B, matching_inds = match_rough_timings(timing_info[topic]['ros'], timing_info[primary_topic]['ros']) + bag_A, bag_B, matching_inds_bag = match_rough_timings(timing_info[topic]['bag'], timing_info[primary_topic]['bag']) + + if (matching_inds == 0).all(): + print("THIS IS A WARNING THAT WE ARE UTILIZING THE BAG TIMES TO COMPUTE THE CORRELATION") + matching_inds = matching_inds_bag + RELAX = True + else: + RELAX = False + + assert np.all(matching_inds_bag == matching_inds) + + offset = B - A + + inliers = (abs(offset) >= 0) if RELAX else (abs(offset) < 10000) # 10ms -> 10,000 us + + timing_info[topic]["rough_sync"] = { + 'offsets': offset, + 'mean': float(np.mean( offset[inliers] )), + 'min': float(np.min( offset[inliers] )), + 'max': float(np.max( offset[inliers] )), + 'std': float(np.std( offset[inliers] )), + 'inliers': float(np.sum( inliers ) / offset.shape[0]), + "relaxed": RELAX, + } + + timing_info[topic]["internal_clock"] = { + "mean": float(np.mean( np.diff( A ) )), + "min": float(np.min( np.diff( A ) )), + "max": float(np.max( np.diff( A ) )), + "std": float(np.std( np.diff( A ) )), + "relaxed": RELAX, + } + + timing_info[topic]["correlation"] = { + "matched_to": primary_topic, + "matching_inds": matching_inds, + "inliers": inliers, + "relaxed": RELAX, + } + + # import matplotlib.pyplot as plt + # pdb.set_trace() + # fig, axes = plt.subplots(1,2,sharex=True,sharey=True) + # axes[0].imshow( timing_info['/ovc/pps_cb']['msg'][ 10 ] ) + # axes[1].imshow( timing_info['/prophesee/left/events']['msg'][timing_info['/prophesee/left/events']['correlation']['matching_inds'][10]] ) + # pdb.set_trace() + + return timing_info + + +def get_timing_info(bag, topics=[], verbose=True): + timing_info = {} + + for topic in topics: + timing_info[topic] = {} + if 'events' in topic: + timing_info[topic]['decoder'] = Decoder() + timing_info[topic]['nevents'] = 0 + timing_info[topic]['ros'] = [] + timing_info[topic]['bag'] = [] + timing_info[topic]['sensor'] = [] + timing_info[topic]['msg'] = [] + else: + timing_info[topic]['ros'] = [] + timing_info[topic]['bag'] = [] + timing_info[topic]['sensor'] = [] + timing_info[topic]['seq'] = [] + timing_info[topic]['msg'] = [] + + # Pull relevant timing information from every sensor + for topic, msg, t in tqdm(bag.read_messages(topics=topics), + disable=not verbose): + if 'events' in topic: + d = timing_info[topic]['decoder'] + d.decode_bytes(msg.encoding, msg.width, msg.height, msg.time_base, msg.events) + + cd_events = d.get_cd_events() + timing_info[topic]['nevents'] += cd_events.shape[0] + trig_events = d.get_ext_trig_events() + + # No trigger event. Ignore. + if len(trig_events) == 0: + continue + + for e in trig_events: + if e['p'] == 1: + continue + + # Convert microseconds to us + cd_t = cd_events[0]['t'] + trig_t = e['t'] * 1 + + if trig_t <= cd_t: + timing_info[topic]['bag'].append(t.to_nsec() / 1000) + timing_info[topic]['ros'].append(msg.header.stamp.to_nsec() / 1000) + timing_info[topic]['sensor'].append(trig_t) + if verbose: + timing_info[topic]['msg'].append(event_frame(cd_events)) + else: + timing_info[topic]['bag'].append(t.to_nsec() / 1000) + timing_info[topic]['ros'].append(msg.header.stamp.to_nsec() / 1000 + (trig_t - cd_t) ) + timing_info[topic]['sensor'].append(trig_t) + if verbose: + timing_info[topic]['msg'].append(event_frame(cd_events)) + elif 'pps' in topic: + timing_info[topic]['bag'].append(t.to_nsec() / 1000) + timing_info[topic]['ros'].append(msg.stamp.to_nsec() / 1000) + timing_info[topic]['sensor'].append(msg.stamp.to_nsec() / 1000) + timing_info[topic]['seq'].append(msg.seq) + if verbose: + has_img = len(timing_info['/ovc/left/image_mono/compressed']['msg']) > 0 + img = np.zeros((720,1280)) if not has_img else timing_info['/ovc/left/image_mono/compressed']['msg'][-1] + timing_info[topic]['msg'].append(img) + elif 'vectornav' in topic: + timing_info[topic]['bag'].append(t.to_nsec() / 1000) + timing_info[topic]['ros'].append(msg.header.stamp.to_nsec() / 1000) + timing_info[topic]['sensor'].append(msg.header.stamp.to_nsec() / 1000) + timing_info[topic]['seq'].append(msg.header.seq) + elif 'image' in topic: + timing_info[topic]['bag'].append(t.to_nsec() / 1000) + timing_info[topic]['ros'].append(msg.header.stamp.to_nsec() / 1000) + timing_info[topic]['sensor'].append(msg.header.stamp.to_nsec() / 1000) + if verbose: + timing_info[topic]['msg'].append(bridge.compressed_imgmsg_to_cv2(msg)) + elif 'fix' in topic: + timing_info[topic]['bag'].append(t.to_nsec() / 1000) + timing_info[topic]['ros'].append(msg.header.stamp.to_nsec() / 1000) + timing_info[topic]['sensor'].append(msg.header.stamp.to_nsec() / 1000) + elif 'sys_time' in topic: + timing_info[topic]['bag'].append(t.to_nsec() / 1000) + timing_info[topic]['ros'].append(msg.header.stamp.to_nsec() / 1000) + timing_info[topic]['sensor'].append(msg.sensor_time / 1000) + + # Convert all lists to numpy arrays + for topic in timing_info.keys(): + if "events" in topic: + del timing_info[topic]['decoder'] + for k in timing_info[topic].keys(): + if not 'msg' in k: + timing_info[topic][k] = np.array(timing_info[topic][k]).astype(int) + + # Ouster IMU needs to be resampled to 1Hz on the top of each sensor second + for topic in timing_info.keys(): + if 'sys_time' in topic: # For ouster times + # The goal is to generate fake PPS signals to sync other sensors to + # This is done by computing where the pulse would be at the top of each second + sensor_to_ros_lr = linregress( timing_info[topic]['sensor'], timing_info[topic]['ros'] ) + sensor_times_to_query = np.arange( int(np.min(timing_info[topic]['sensor']/1000000))*1000000, + int(np.max(timing_info[topic]['sensor']/1000000))*1000000, 1000000 ).astype(int) + ros_times = (sensor_to_ros_lr.intercept + sensor_to_ros_lr.slope * sensor_times_to_query).astype(int) + + # repeat for bag times + sensor_to_bag_lr = linregress( timing_info[topic]['sensor'], timing_info[topic]['bag'] ) + sensor_times_to_query = np.arange( int(np.min(timing_info[topic]['sensor']/1000000))*1000000, + int(np.max(timing_info[topic]['sensor']/1000000))*1000000, 1000000 ).astype(int) + bag_times = (sensor_to_bag_lr.intercept + sensor_to_bag_lr.slope * sensor_times_to_query).astype(int) + + A = timing_info[topic] + if verbose: + timing_info[topic] = {'bag': bag_times, 'ros': ros_times, 'sensor': sensor_times_to_query, 'original': A} + else: + timing_info[topic] = {'bag': bag_times, 'ros': ros_times, 'sensor': sensor_times_to_query} + + return timing_info + +def correct_ovc(timing_info, ovc_pps="/ovc/pps_cb", ovc_imager="/ovc/left/image_mono/compressed", poll_offset=60, interupt_offset=13, verbose=True): + # The OVC PPS and imager are out of phase by an integer number of vectornav clock cycles + ovc_pps_info = timing_info[ovc_pps] + ovc_pps_info['sensor'] -= poll_offset + + ovc_imager_info = timing_info[ovc_imager] + ovc_imager_info['sensor'] -= interupt_offset + + matching_inds = np.searchsorted( ovc_imager_info['ros'], ovc_pps_info['ros'] ) - 1 + ovc_img_A = ovc_imager_info['ros'][matching_inds][2:-2] # Ignore the first and last two seconds + ovc_pps_B = ovc_pps_info['ros'][2:-2] + + assert np.all(np.diff(ovc_pps_info['seq'])==1) + assert np.all(np.diff(ovc_imager_info['seq'])==1) + + del timing_info[ovc_imager] + + ovc_offset = (ovc_img_A - ovc_pps_B) / 2500 + round_offset = ovc_offset.round() % 16 + ovc_info = { + "dense_offsets": ovc_offset, + "round_offsets": round_offset, + "round_offset": int(round_offset[0]), + } + inliers = ovc_info["round_offsets"] == ovc_info['round_offset'] + assert (inliers.sum() / inliers.shape[0]) > 0.9 + + timing_info[ovc_pps]['decimator_info'] = ovc_info + + return ovc_info + +def compute_time_calibs(bag, topics=[], verbose=True, visualize=False): + if visualize: + import matplotlib.pyplot as plt + + timing_info = get_timing_info(bag, topics, verbose) + + ovc_dec_info = correct_ovc(timing_info, verbose=verbose) + + rough_time_info = get_rough_time_correlations(timing_info, topics[0], verbose=verbose) + fine_time_info = get_sensor_correlated(timing_info, verbose=verbose) + + fine_offset = compute_skew_offset(fine_time_info, verbose=verbose) + + if visualize: + plt.show() + + results = {} + + is_relaxed = max([rough_time_info[k]['rough_sync']['relaxed'] for k in rough_time_info.keys()]) + + for k in rough_time_info.keys(): + # NOTE: These bags have bad synchronization for the PTP in ROS time, so we fail back to + # bag time for the rough synchronization + + if is_relaxed: + fn = bag.filename + fn_key = os.path.basename(fn) + assert fn_key in ['falcon_outdoor_day_penno_parking_1.bag', + 'falcon_outdoor_day_penno_parking_2.bag', + 'falcon_outdoor_day_penno_plaza.bag' ] + # NOTE: This bag has a large skew from PTP + elif fine_offset[k]['sensor']['skew'] > 1000: + fn = bag.filename + fn_key = os.path.basename(fn) + assert fn_key in ['car_urban_night_penno_small_loop.bag'] + + results[k] ={ + 'correction': fine_offset[k]['sensor'], + 'rough_correction': rough_time_info[k]['rough_sync'], + 'bag_time': rough_time_info[k]['bag'], + 'ros_time': rough_time_info[k]['ros'], + 'sensor_time': rough_time_info[k]['sensor'], + 'ros_correlation': rough_time_info[k]['correlation'], + } + results['/ovc/pps_cb']['decimator'] = ovc_dec_info + results['/prophesee/left/events']['nevents'] = rough_time_info['/prophesee/left/events']['nevents'] + results['/prophesee/right/events']['nevents'] = rough_time_info['/prophesee/right/events']['nevents'] + + return results + +def remove_np(container): + if type(container) == list: + return [remove_np(c) for c in container] + elif type(container) == dict: + return {k:remove_np(c) for k,c in container.items()} + elif type(container) == np.ndarray: + return container.tolist() + else: + return container + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--bag', help='ROS bag name') + parser.add_argument('--verbose', action='store_true', help='Set Verbose Mode') + parser.add_argument('--no_file', action='store_true', help='Set to hold back time offset file') + parser.add_argument('--time_fn', default=None, help="Name of the time offset file") + args = parser.parse_args() + + # Open bag + input_bag = rosbag.Bag(args.bag) + + timing_topics = [ + "/ovc/pps_cb", + "/ovc/left/image_mono/compressed", + "/os_node/sys_time", + "/prophesee/right/events", + "/prophesee/left/events", + ] + + time_calibs = compute_time_calibs(input_bag, topics=timing_topics, verbose=args.verbose) + + time_calibs = remove_np(time_calibs) + + from yaml import load, dump + if not args.no_file: + if args.time_fn is None: + time_calib_file = os.path.splitext(args.bag)[0] + "_time_calib.yml" + else: + time_calib_file = args.time_fn + with open(time_calib_file, 'w') as f: + f.write(dump(time_calibs, default_flow_style=None)) + else: + print(dump(time_calibs)) + + input_bag.close() diff --git a/build_system/bag_processing/rosbag2verify.bash b/build_system/bag_processing/rosbag2verify.bash new file mode 100755 index 0000000..e0df881 --- /dev/null +++ b/build_system/bag_processing/rosbag2verify.bash @@ -0,0 +1,23 @@ +#!/bin/bash +source ./build_system/preamble.bash + +# Set the output files to check +if [[ "$BAG_PATH" == *"camera_calib"* ]]; then + # For camera calibration bags, check for CAMCHAIN_PATH, REPORT_CAM_PATH, REPORT_CAM_PATH + output_files=( "$CAMCHAIN_PATH" "$REPORT_CAM_PATH" "$RESULTS_CAM_PATH" ) + check_files output_files +elif [[ "$BAG_PATH" == *"imu_calib"* ]]; then + output_files=( "$IMU_CHAIN_PATH" "$IMU_RESULTS_PATH") + check_files output_files +elif [[ "$BAG_PATH" == *"lidar_calib"* ]]; then + # TODO: skip files for lidar_calib. Empty array + echo -e "${YELLOW}TODO: Implement skip for lidar_calib${NC}" +else + # Data bags + output_files=("$H5_PATH") + check_files output_files +fi + +echo -e "${BLUE}Verifying bag file $BAG_PATH${NC}" +python3 build_system/bag_processing/rosbag2verify.py --bag "$BAG_PATH" +exit $? diff --git a/build_system/bag_processing/rosbag2verify.py b/build_system/bag_processing/rosbag2verify.py new file mode 100644 index 0000000..9656b53 --- /dev/null +++ b/build_system/bag_processing/rosbag2verify.py @@ -0,0 +1,230 @@ +import rosbag +import sys +from event_array_py import Decoder +from collections import defaultdict +from tqdm import tqdm +import numpy as np +import os +import pdb +from colorama import Fore, Style +from scipy.spatial.distance import cdist + + +def topic_count_verify(bag_name, bag, topic, expected_freq, tolerance=0.02): + print(Fore.GREEN, bag_name, topic, "[starting]", Style.RESET_ALL) + info = bag.get_type_and_topic_info(topic)[1][topic] + duration = bag.get_end_time() - bag.get_start_time() + count = info.message_count + expected_count = duration * expected_freq + + if abs((count - expected_count)/expected_count) > tolerance: + print(Fore.RED, bag_name, topic, "[failed]", Style.RESET_ALL) + print(abs((count - expected_count)/expected_count)) + + return False, topic, info + + print(Fore.GREEN, bag_name, topic, "[completed]", Style.RESET_ALL) + return True, topic, info + + +def ovc_timing_verify(bag_name, bag, tolerance=0.1, verbose=False): + print(Fore.GREEN, bag_name, "[OVC TIMING CHECK]", "[starting]", Style.RESET_ALL) + ovc_topics = ["/ovc/pps_cb", "/ovc/vectornav/imu", "/ovc/left/image_mono/compressed"] + ros_timing_info = defaultdict(list) + ros_seq_info = defaultdict(list) + for topic, msg, t in tqdm(bag.read_messages(topics=ovc_topics), + disable=not verbose): + if topic == "/ovc/pps_cb": + ros_timing_info[topic].append(msg.stamp.to_nsec()) + ros_seq_info[topic].append(msg.seq) + else: + ros_timing_info[topic].append(msg.header.stamp.to_nsec()) + ros_seq_info[topic].append(msg.header.seq) + + ros_timing_info = {k:np.array(v) for k,v in ros_timing_info.items()} + ros_seq_info = {k:np.array(v) for k,v in ros_seq_info.items()} + + # Fix this!! Idea -> searchsorted + matching_inds = np.searchsorted( ros_timing_info['/ovc/left/image_mono/compressed'], ros_timing_info['/ovc/pps_cb'] ) - 1 + + imager_ts = ros_timing_info['/ovc/left/image_mono/compressed'][matching_inds][2:-2] + pps_cb_ts = ros_timing_info['/ovc/pps_cb'][2:-2] + + vnav_offset = (pps_cb_ts - imager_ts) / 2500000 + vnav_int_offset = vnav_offset.round() % 16 + + inliers = vnav_int_offset == vnav_int_offset[0] + + passed = (inliers.sum() / inliers.shape[0]) > 0.9 + + info = { + "imager_ts": imager_ts.tolist(), + "pps_cb_ts": pps_cb_ts.tolist(), + "vnav_offset": vnav_offset.tolist(), + "vnav_int_offset": vnav_int_offset.tolist(), + } + + if not passed: + print(Fore.RED, bag_name, "OVC_TIMING", "[failed]", Style.RESET_ALL) + + return passed, "OVC_TIMING", info + + +def event_trigger_verify(bag_name, bag, topic, verbose=True): + print(Fore.GREEN, bag_name, topic, "[starting]", Style.RESET_ALL) + decoder = Decoder() + + stats = defaultdict(int) + triggers = [] + event_rate = [] + + for topic, msg, t in tqdm(bag.read_messages(topics=topic), + disable=not verbose): + decoder.decode_bytes(msg.encoding, msg.width, msg.height, + msg.time_base, msg.events) + cd_events = decoder.get_cd_events() + stats["cd_event_count"] += len(cd_events) + event_rate.append(stats['cd_event_count'] / (cd_events['t'][-1]-cd_events['t'][0])) + + trig_events = decoder.get_ext_trig_events() + stats["trig_event_count"] += len(trig_events) + for e in trig_events: + triggers.append(e) + + triggers = np.array(triggers) + + pos_dt = np.diff(triggers[triggers['p'] == 0]['t']).astype(float) / 1e6 + neg_dt = np.diff(triggers[triggers['p'] == 1]['t']).astype(float) / 1e6 + + stats['event_rate_mean'] = np.mean(event_rate) + stats['event_rate_std'] = np.std(event_rate) + stats['event_rate_max'] = np.max(event_rate) + stats['event_rate_min'] = np.min(event_rate) + + stats['pos_trigger_dt_mean'] = np.mean(pos_dt) + stats['neg_trigger_dt_mean'] = np.mean(neg_dt) + stats['pos_trigger_dt_std'] = np.std(pos_dt) + stats['neg_trigger_dt_std'] = np.std(neg_dt) + stats['pos_trigger_dt_max'] = np.max(pos_dt) + stats['neg_trigger_dt_max'] = np.max(neg_dt) + + if (np.any(stats['pos_trigger_dt_max'] > 1.5) or + np.any(stats['neg_trigger_dt_max'] > 1.5)): + print(Fore.RED, bag_name, topic, "[failed]", Style.RESET_ALL) + return False, topic, stats + + print(Fore.GREEN, bag_name, topic, "[completed]", Style.RESET_ALL) + + return True, topic, stats + + +def verifications(bag): + # Check if bag exists in the filesystem + if not os.path.exists(bag): + print("Bag not found: {}".format(bag)) + sys.exit(1) + + # Create a rosbag object + bag = rosbag.Bag(bag) + + # Print only the bag without the path + bag_name = bag.filename.split('/')[-1] + + verifications = [] + try: + # calib_lidar bags are generally shorter, and thus the tolerance + # calculation can fail (as we only have a few messages) + # Increasing the tolerance for those bags helps + if "calib" in bag_name and "lidar" in bag_name: + print(Fore.YELLOW, "Increasing tolerance due to lidar calib bag", + Style.RESET_ALL) + tolerance = 0.5 + else: + tolerance = 0.02 + + verifications.append(ovc_timing_verify(bag_name, bag)) + + # Do not run GPS tests on calibration, indoor, or tunnel bags + if ("calib" not in bag_name and + "indoor" not in bag_name and + "schuylkill_tunnel" not in bag_name): + verifications.append(topic_count_verify(bag_name, bag, + "/ovc/gps_trigger", + 1.0, tolerance)) + verifications.append(topic_count_verify(bag_name, bag, + "/ublox/fix", + 5.0, tolerance)) + else: + print(Fore.YELLOW, "Gps checks skipped in indoor bags", + "or in Schuylkill tunnel bags", Style.RESET_ALL) + + # Check all the other common topics + verifications.append(topic_count_verify(bag_name, bag, + "/ovc/pps_cb", + 1.0, tolerance)) + verifications.append(topic_count_verify(bag_name, bag, + "/ovc/vectornav/imu", + 400.0, tolerance)) + verifications.append(topic_count_verify(bag_name, bag, + "/ovc/vectornav/mag", + 400.0, tolerance)) + verifications.append(topic_count_verify(bag_name, bag, + "/ovc/left/image_mono/compressed", + 25.0, tolerance)) + verifications.append(topic_count_verify(bag_name, bag, + "/ovc/right/image_mono/compressed", + 25.0, tolerance)) + verifications.append(topic_count_verify(bag_name, bag, + "/ovc/rgb/image_color/compressed", + 25.0, tolerance)) + + verifications.append(topic_count_verify(bag_name, bag, + "/os_node/imu_packets", + 100.0, tolerance)) + verifications.append(topic_count_verify(bag_name, bag, + "/os_node/sys_time", + 100.0, tolerance)) + verifications.append(topic_count_verify(bag_name, bag, + "/os_node/lidar_packets", + 1280.0, tolerance)) + + # Do not check events for lidar_calib bags + if not ("calib" in bag_name and "lidar" in bag_name): + verifications.append(event_trigger_verify(bag_name, bag, + "/prophesee/right/events", + args.verbose)) + verifications.append(event_trigger_verify(bag_name, bag, + "/prophesee/left/events", + args.verbose)) + else: + print(Fore.YELLOW, "Event checks skipped in lidar_calib bags", + Style.RESET_ALL) + + except Exception as e: + print(Fore.RED, bag_name, "Exception: ", e, Style.RESET_ALL) + exit_code = 1 + else: + exit_code = 0 + + for v in verifications: + if not v[0] or args.verbose: + print("========================") + print(v[0]) + print(v[1]) + print(v[2]) + print("========================") + if not v[0]: + exit_code = 1 + sys.exit(exit_code) + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--bag', + help='List of bags to process, separated by a space') + # verbose mode + parser.add_argument('--verbose', action='store_true', + help='Set Verbose Mode') + args = parser.parse_args() + verifications(args.bag) diff --git a/build_system/calibration/aprilgrid.yaml b/build_system/calibration/aprilgrid.yaml new file mode 100644 index 0000000..6830c52 --- /dev/null +++ b/build_system/calibration/aprilgrid.yaml @@ -0,0 +1,6 @@ +target_type: 'aprilgrid' #gridtype +tagCols: 7 #number of apriltags +tagRows: 5 #number of apriltags +tagSize: 0.04 #size of apriltag, edge to edge [m] +tagSpacing: 0.25 #ratio of space between tags to tagSize + #example: tagSize=2m, spacing=0.5m --> tagSpacing=0.25[-] diff --git a/build_system/calibration/event_bag_convert/CMakeLists.txt b/build_system/calibration/event_bag_convert/CMakeLists.txt new file mode 100644 index 0000000..0487579 --- /dev/null +++ b/build_system/calibration/event_bag_convert/CMakeLists.txt @@ -0,0 +1,135 @@ +cmake_minimum_required(VERSION 3.0.2) +project(event_bag_convert) +add_compile_options(-Wall -Wextra -Wpedantic -Werror) +add_definitions(-DUSING_ROS_1) +set(CMAKE_CXX_STANDARD 17) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) +find_package(simple_image_recon_lib) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + event_array_msgs + event_array_codecs + geometry_msgs + rosbag + roscpp + sensor_msgs + simple_image_recon + std_msgs +) + +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES event_bag_convert +# CATKIN_DEPENDS event_array_msgs geometry_msgs rosbag roscpp sensor_msgs simple_image_recon std_msgs +# DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( + include + ${catkin_INCLUDE_DIRS} + ${simple_image_recon_lib_INCLUDE_DIRS} +) + +add_executable(event_bag_converter src/bag_to_all_frames.cpp) + +target_link_libraries(event_bag_converter + ${catkin_LIBRARIES} + simple_image_recon_lib::simple_image_recon_lib + yaml-cpp +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/event_bag_convert.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +# add_executable(${PROJECT_NAME}_node src/event_bag_convert_node.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +# target_link_libraries(${PROJECT_NAME}_node +# ${catkin_LIBRARIES} +# ) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# catkin_install_python(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_event_bag_convert.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/build_system/calibration/event_bag_convert/include/event_bag_convert/prettyprint.hpp b/build_system/calibration/event_bag_convert/include/event_bag_convert/prettyprint.hpp new file mode 100644 index 0000000..f75683c --- /dev/null +++ b/build_system/calibration/event_bag_convert/include/event_bag_convert/prettyprint.hpp @@ -0,0 +1,445 @@ +// Copyright Louis Delacroix 2010 - 2014. +// Distributed under the Boost Software License, Version 1.0. +// (See accompanying file LICENSE_1_0.txt or copy at +// http://www.boost.org/LICENSE_1_0.txt) +// +// A pretty printing library for C++ +// +// Usage: +// Include this header, and operator<< will "just work". + +#ifndef H_PRETTY_PRINT +#define H_PRETTY_PRINT + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace pretty_print +{ + namespace detail + { + // SFINAE type trait to detect whether T::const_iterator exists. + + struct sfinae_base + { + using yes = char; + using no = yes[2]; + }; + + template + struct has_const_iterator : private sfinae_base + { + private: + template static yes & test(typename C::const_iterator*); + template static no & test(...); + public: + static const bool value = sizeof(test(nullptr)) == sizeof(yes); + using type = T; + }; + + template + struct has_begin_end : private sfinae_base + { + private: + template + static yes & f(typename std::enable_if< + std::is_same(&C::begin)), + typename C::const_iterator(C::*)() const>::value>::type *); + + template static no & f(...); + + template + static yes & g(typename std::enable_if< + std::is_same(&C::end)), + typename C::const_iterator(C::*)() const>::value, void>::type*); + + template static no & g(...); + + public: + static bool const beg_value = sizeof(f(nullptr)) == sizeof(yes); + static bool const end_value = sizeof(g(nullptr)) == sizeof(yes); + }; + + } // namespace detail + + + // Holds the delimiter values for a specific character type + + template + struct delimiters_values + { + using char_type = TChar; + const char_type * prefix; + const char_type * delimiter; + const char_type * postfix; + }; + + + // Defines the delimiter values for a specific container and character type + + template + struct delimiters + { + using type = delimiters_values; + static const type values; + }; + + + // Functor to print containers. You can use this directly if you want + // to specificy a non-default delimiters type. The printing logic can + // be customized by specializing the nested template. + + template , + typename TDelimiters = delimiters> + struct print_container_helper + { + using delimiters_type = TDelimiters; + using ostream_type = std::basic_ostream; + + template + struct printer + { + static void print_body(const U & c, ostream_type & stream) + { + using std::begin; + using std::end; + + auto it = begin(c); + const auto the_end = end(c); + + if (it != the_end) + { + for ( ; ; ) + { + stream << *it; + + if (++it == the_end) break; + + if (delimiters_type::values.delimiter != NULL) + stream << delimiters_type::values.delimiter; + } + } + } + }; + + print_container_helper(const T & container) + : container_(container) + { } + + inline void operator()(ostream_type & stream) const + { + if (delimiters_type::values.prefix != NULL) + stream << delimiters_type::values.prefix; + + printer::print_body(container_, stream); + + if (delimiters_type::values.postfix != NULL) + stream << delimiters_type::values.postfix; + } + + private: + const T & container_; + }; + + // Specialization for pairs + + template + template + struct print_container_helper::printer> + { + using ostream_type = typename print_container_helper::ostream_type; + + static void print_body(const std::pair & c, ostream_type & stream) + { + stream << c.first; + if (print_container_helper::delimiters_type::values.delimiter != NULL) + stream << print_container_helper::delimiters_type::values.delimiter; + stream << c.second; + } + }; + + // Specialization for tuples + + template + template + struct print_container_helper::printer> + { + using ostream_type = typename print_container_helper::ostream_type; + using element_type = std::tuple; + + template struct Int { }; + + static void print_body(const element_type & c, ostream_type & stream) + { + tuple_print(c, stream, Int<0>()); + } + + static void tuple_print(const element_type &, ostream_type &, Int) + { + } + + static void tuple_print(const element_type & c, ostream_type & stream, + typename std::conditional, std::nullptr_t>::type) + { + stream << std::get<0>(c); + tuple_print(c, stream, Int<1>()); + } + + template + static void tuple_print(const element_type & c, ostream_type & stream, Int) + { + if (print_container_helper::delimiters_type::values.delimiter != NULL) + stream << print_container_helper::delimiters_type::values.delimiter; + + stream << std::get(c); + + tuple_print(c, stream, Int()); + } + }; + + // Prints a print_container_helper to the specified stream. + + template + inline std::basic_ostream & operator<<( + std::basic_ostream & stream, + const print_container_helper & helper) + { + helper(stream); + return stream; + } + + + // Basic is_container template; specialize to derive from std::true_type for all desired container types + + template + struct is_container : public std::integral_constant::value && + detail::has_begin_end::beg_value && + detail::has_begin_end::end_value> { }; + + template + struct is_container : std::true_type { }; + + template + struct is_container : std::false_type { }; + + template + struct is_container> : std::true_type { }; + + template + struct is_container> : std::true_type { }; + + template + struct is_container> : std::true_type { }; + + + // Default delimiters + + template struct delimiters { static const delimiters_values values; }; + template const delimiters_values delimiters::values = { "[", ", ", "]" }; + template struct delimiters { static const delimiters_values values; }; + template const delimiters_values delimiters::values = { L"[", L", ", L"]" }; + + + // Delimiters for (multi)set and unordered_(multi)set + + template + struct delimiters< ::std::set, char> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::set, char>::values = { "{", ", ", "}" }; + + template + struct delimiters< ::std::set, wchar_t> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::set, wchar_t>::values = { L"{", L", ", L"}" }; + + template + struct delimiters< ::std::multiset, char> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::multiset, char>::values = { "{", ", ", "}" }; + + template + struct delimiters< ::std::multiset, wchar_t> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::multiset, wchar_t>::values = { L"{", L", ", L"}" }; + + template + struct delimiters< ::std::unordered_set, char> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::unordered_set, char>::values = { "{", ", ", "}" }; + + template + struct delimiters< ::std::unordered_set, wchar_t> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::unordered_set, wchar_t>::values = { L"{", L", ", L"}" }; + + template + struct delimiters< ::std::unordered_multiset, char> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::unordered_multiset, char>::values = { "{", ", ", "}" }; + + template + struct delimiters< ::std::unordered_multiset, wchar_t> { static const delimiters_values values; }; + + template + const delimiters_values delimiters< ::std::unordered_multiset, wchar_t>::values = { L"{", L", ", L"}" }; + + + // Delimiters for pair and tuple + + template struct delimiters, char> { static const delimiters_values values; }; + template const delimiters_values delimiters, char>::values = { "(", ", ", ")" }; + template struct delimiters< ::std::pair, wchar_t> { static const delimiters_values values; }; + template const delimiters_values delimiters< ::std::pair, wchar_t>::values = { L"(", L", ", L")" }; + + template struct delimiters, char> { static const delimiters_values values; }; + template const delimiters_values delimiters, char>::values = { "(", ", ", ")" }; + template struct delimiters< ::std::tuple, wchar_t> { static const delimiters_values values; }; + template const delimiters_values delimiters< ::std::tuple, wchar_t>::values = { L"(", L", ", L")" }; + + + // Type-erasing helper class for easy use of custom delimiters. + // Requires TCharTraits = std::char_traits and TChar = char or wchar_t, and MyDelims needs to be defined for TChar. + // Usage: "cout << pretty_print::custom_delims(x)". + + struct custom_delims_base + { + virtual ~custom_delims_base() { } + virtual std::ostream & stream(::std::ostream &) = 0; + virtual std::wostream & stream(::std::wostream &) = 0; + }; + + template + struct custom_delims_wrapper : custom_delims_base + { + custom_delims_wrapper(const T & t_) : t(t_) { } + + std::ostream & stream(std::ostream & s) + { + return s << print_container_helper, Delims>(t); + } + + std::wostream & stream(std::wostream & s) + { + return s << print_container_helper, Delims>(t); + } + + private: + const T & t; + }; + + template + struct custom_delims + { + template + custom_delims(const Container & c) : base(new custom_delims_wrapper(c)) { } + + std::unique_ptr base; + }; + + template + inline std::basic_ostream & operator<<(std::basic_ostream & s, const custom_delims & p) + { + return p.base->stream(s); + } + + + // A wrapper for a C-style array given as pointer-plus-size. + // Usage: std::cout << pretty_print_array(arr, n) << std::endl; + + template + struct array_wrapper_n + { + typedef const T * const_iterator; + typedef T value_type; + + array_wrapper_n(const T * const a, size_t n) : _array(a), _n(n) { } + inline const_iterator begin() const { return _array; } + inline const_iterator end() const { return _array + _n; } + + private: + const T * const _array; + size_t _n; + }; + + + // A wrapper for hash-table based containers that offer local iterators to each bucket. + // Usage: std::cout << bucket_print(m, 4) << std::endl; (Prints bucket 5 of container m.) + + template + struct bucket_print_wrapper + { + typedef typename T::const_local_iterator const_iterator; + typedef typename T::size_type size_type; + + const_iterator begin() const + { + return m_map.cbegin(n); + } + + const_iterator end() const + { + return m_map.cend(n); + } + + bucket_print_wrapper(const T & m, size_type bucket) : m_map(m), n(bucket) { } + + private: + const T & m_map; + const size_type n; + }; + +} // namespace pretty_print + + +// Global accessor functions for the convenience wrappers + +template +inline pretty_print::array_wrapper_n pretty_print_array(const T * const a, size_t n) +{ + return pretty_print::array_wrapper_n(a, n); +} + +template pretty_print::bucket_print_wrapper +bucket_print(const T & m, typename T::size_type n) +{ + return pretty_print::bucket_print_wrapper(m, n); +} + + +// Main magic entry point: An overload snuck into namespace std. +// Can we do better? + +namespace std +{ + // Prints a container to the stream using default delimiters + + template + inline typename enable_if< ::pretty_print::is_container::value, + basic_ostream &>::type + operator<<(basic_ostream & stream, const T & container) + { + return stream << ::pretty_print::print_container_helper(container); + } +} + + + +#endif // H_PRETTY_PRINT diff --git a/build_system/calibration/event_bag_convert/package.xml b/build_system/calibration/event_bag_convert/package.xml new file mode 100644 index 0000000..9b7336a --- /dev/null +++ b/build_system/calibration/event_bag_convert/package.xml @@ -0,0 +1,80 @@ + + + event_bag_convert + 0.0.0 + The event_bag_convert package + + + + + ken + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + event_array_msgs + geometry_msgs + rosbag + roscpp + sensor_msgs + simple_image_recon + std_msgs + event_array_msgs + geometry_msgs + rosbag + roscpp + sensor_msgs + simple_image_recon + std_msgs + event_array_msgs + geometry_msgs + rosbag + roscpp + sensor_msgs + simple_image_recon + std_msgs + + + + + + + + diff --git a/build_system/calibration/event_bag_convert/src/bag_to_all_frames.cpp b/build_system/calibration/event_bag_convert/src/bag_to_all_frames.cpp new file mode 100644 index 0000000..60f4e1e --- /dev/null +++ b/build_system/calibration/event_bag_convert/src/bag_to_all_frames.cpp @@ -0,0 +1,254 @@ +// -*-c++-*--------------------------------------------------------------------------------------- +// Copyright 2023 Bernd Pfrommer +// +// Edited by Kenneth Chaney for dataset support +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include +#include +#include + +#include + +#include +#include + +#include +#include + +using event_array_msgs::EventArray; +using sensor_msgs::Image; +using sensor_msgs::CompressedImage; +using ApproxRecon = simple_image_recon::ApproxReconstructor< + EventArray, EventArray::ConstPtr, Image, Image::ConstPtr>; + +int64_t retime_ovc(int64_t time_nsec, int64_t offset, double skew){ + int64_t new_time = time_nsec-offset; + + int64_t skew_change = (new_time / 1e9) * skew; + new_time += skew_change; + + // new_time /= 2500000; + // new_time *= 2500000; + + return new_time; +} + +void usage() +{ + std::cout << "usage:" << std::endl; + std::cout << "bag_to_frames -i input_bag -o output_bag -t time_sync_file" + "[-c cutoff_period] [-d image_decimator]" + << std::endl; +} + +class OutBagWriter : public simple_image_recon::FrameHandler +{ +public: + explicit OutBagWriter(const std::string & bagName) + { + outBag_.open(bagName, rosbag::bagmode::Write); + } + void frame(const Image::ConstPtr & img, const std::string & topic) override + { + outBag_.write(topic, img->header.stamp, img); + numFrames_++; + if (numFrames_ % 100 == 0) { + std::cout << "wrote " << numFrames_ << " frames " << std::endl; + } + } + + rosbag::Bag outBag_; + size_t numFrames_{0}; +}; + +using simple_image_recon::ApproxReconstructor; + +int main(int argc, char ** argv) +{ + int opt; + std::string inBagName; + std::string outBagName; + std::string timeSyncFileName; + + std::vector eventTopics{"/prophesee/left/events","/prophesee/right/events"}; + std::vector ovcTopics{"/ovc/left/image_mono/compressed","/ovc/right/image_mono/compressed","/ovc/rgb/image_color/compressed"}; + + int cutoff_period(45); + + int image_decimator(1); + + while ((opt = getopt(argc, argv, "i:o:O:t:c:d:h")) != -1) { + switch (opt) { + case 'i': + inBagName = optarg; + break; + case 'o': + outBagName = optarg; + break; + case 't': + timeSyncFileName = optarg; + break; + case 'c': + cutoff_period = atoi(optarg); + break; + case 'd': + image_decimator = atoi(optarg); + break; + case 'h': + usage(); + return (-1); + break; + default: + std::cout << "unknown option: " << opt << std::endl; + usage(); + return (-1); + break; + } + } + + if (inBagName.empty()) { + std::cout << "missing input bag file name!" << std::endl; + usage(); + return (-1); + } + + if (outBagName.empty()) { + std::cout << "missing output bag file name!" << std::endl; + usage(); + return (-1); + } + + if (timeSyncFileName.empty()) { + std::cout << "missing time sync file name!" << std::endl; + usage(); + return (-1); + } + + std::map offsets; + std::map skews; + + std::cout << "Opening " << timeSyncFileName << std::endl; + + // This file is in micro-seconds + YAML::Node time_sync_config = YAML::LoadFile(timeSyncFileName); + + auto ovc_topic_node = time_sync_config["/ovc/pps_cb"]; + int vnav_offset = 2500*1000*ovc_topic_node["decimator"]["round_offset"].as(); + + offsets["ovc"] = 1000*ovc_topic_node["correction"]["offset"].as() + vnav_offset; + skews["ovc"] = 1000*ovc_topic_node["correction"]["skew"].as(); + + auto left_topic_node = time_sync_config[eventTopics[0]]; + offsets[eventTopics[0]] = 1000*left_topic_node["correction"]["offset"].as() + vnav_offset; + skews[eventTopics[0]] = 1000*left_topic_node["correction"]["skew"].as(); + + auto right_topic_node = time_sync_config[eventTopics[1]]; + offsets[eventTopics[1]] = 1000*right_topic_node["correction"]["offset"].as() + vnav_offset; + skews[eventTopics[1]] = 1000*right_topic_node["correction"]["skew"].as(); + + std::cout << "Retrieved time synchronizations" << std::endl; + std::cout << "- vnav phase offset " << vnav_offset << std::endl; + std::cout << "- Offsets : " << offsets << std::endl; + std::cout << "- Skews : " << skews << std::endl; + std::cout << "" << std::endl; + + std::cout << "Settings" << std::endl; + std::cout << "- Image Decimator " << image_decimator << std::endl; + std::cout << "- FilterCutoff " << cutoff_period << std::endl; + + OutBagWriter writer(outBagName); + + // Grab Frame times from original bag and copy images from OVC over + std::vector ovc_times; + + std::cout << "Opening " << inBagName << std::endl; + rosbag::Bag inBag; + inBag.open(inBagName, rosbag::bagmode::Read); + std::cout << "- Opened" << std::endl; + + ros::Time first_image_time; + bool set_first_image = false; + + std::cout << "Get frame times to write to bag" << std::endl; + rosbag::View view_primary(inBag, rosbag::TopicQuery(std::vector{ovcTopics[2]})); + int image_decimator_counter(0); + for (const rosbag::MessageInstance & msg : view_primary) { + auto m = msg.instantiate(); + if(!set_first_image){ + first_image_time = m->header.stamp; + if( static_cast(first_image_time.toNSec()) - static_cast(offsets["ovc"]) < 0 ) { + continue; + }else{ + set_first_image = true; + } + } + if(set_first_image && image_decimator_counter++ % image_decimator == 0){ + ovc_times.emplace_back( offsets["ovc"] + retime_ovc(m->header.stamp.toNSec(), offsets["ovc"], skews["ovc"]) ); + } + } + + std::cout << "- Requesting " << ovc_times.size() << " frames" << std::endl; + std::cout << "- Start : " << ovc_times[0] << std::endl; + std::cout << "- End : " << ovc_times[ovc_times.size()-1] << std::endl; + std::cout << "- Start (rel) : " << ovc_times[0]-offsets["ovc"] << std::endl; + std::cout << "- End (rel) : " << ovc_times[ovc_times.size()-1]-offsets["ovc"] << std::endl; + + std::cout << "Copying OVC frames" << std::endl; + rosbag::View view_ovc(inBag, rosbag::TopicQuery(ovcTopics)); + for (const rosbag::MessageInstance & msg : view_ovc) { + auto m = msg.instantiate(); + auto retimed = offsets["ovc"] + retime_ovc(m->header.stamp.toNSec(), offsets["ovc"], skews["ovc"]); + + auto find_res = std::find(ovc_times.begin(), + ovc_times.end(), + retimed); + if(find_res != ovc_times.end()){ + m->header.stamp.fromNSec( retimed ); + writer.outBag_.write(msg.getTopic(), m->header.stamp, m); + } + } + + std::cout << "Building Reconstructors" << std::endl; + const double fillRatio = 0.6; + const int tileSize = 2; + std::unordered_map recons; + for (size_t i = 0; i < eventTopics.size(); i++) { + + std::cout << " Final topic offset " << eventTopics[i] << " : " << offsets[eventTopics[i]] << std::endl; + recons.insert( + {eventTopics[i], ApproxRecon( + &writer, eventTopics[i], cutoff_period, 0.1, fillRatio, + tileSize, offsets["ovc"]-offsets[eventTopics[i]], ovc_times)}); + } + + size_t numMessages(0); + rosbag::View view_events(inBag, rosbag::TopicQuery(eventTopics)); + for (const rosbag::MessageInstance & msg : view_events) { + auto it = recons.find(msg.getTopic()); + if (it != recons.end()) { + auto m = msg.instantiate(); + it->second.processMsg(m); + numMessages++; + } + } + std::cout << "processed " << numMessages << " number of messages" + << std::endl; + + return (0); +} diff --git a/build_system/calibration/kalibr_all b/build_system/calibration/kalibr_all new file mode 100755 index 0000000..c0b5c3e --- /dev/null +++ b/build_system/calibration/kalibr_all @@ -0,0 +1,27 @@ +#!/bin/bash +# +# +# ./kalibr_events +# +# + +IMAGER_BAG=$1 +TARGET=$2 +CAMCHAIN="${IMAGER_BAG%.bag}-camchain.yaml" + +# Check if target is empty, and replace it with aprilgrid.yaml +if [ -z "$TARGET" ]; then + echo "No target specified, using aprilgrid.yaml" + TARGET=aprilgrid.yaml +fi + +if [ -f "$CAMCHAIN" ]; then + echo "$CAMCHAIN exists - please remove to rerun" +else + export KALIBR_MANUAL_FOCAL_LENGTH_INIT=1035 + rosrun kalibr kalibr_calibrate_cameras \ + --bag "$IMAGER_BAG" --target "$TARGET" \ + --models pinhole-radtan pinhole-radtan pinhole-radtan pinhole-radtan pinhole-radtan \ + --topics /prophesee/left/events /prophesee/right/events /ovc/left/image_mono/compressed /ovc/right/image_mono/compressed /ovc/rgb/image_color/compressed \ + --dont-show-report +fi diff --git a/build_system/calibration/kalibr_events b/build_system/calibration/kalibr_events new file mode 100755 index 0000000..022855c --- /dev/null +++ b/build_system/calibration/kalibr_events @@ -0,0 +1,20 @@ +#!/bin/bash +# +# +# ./kalibr_events +# +# + +IMAGER_BAG=$1 +CAMCHAIN="${IMAGER_BAG%.bag}-camchain.yaml" + + +if [ -f $CAMCHAIN ]; then + echo "$CAMCHAIN exists - please remove to rerun" +else + rosrun kalibr kalibr_calibrate_cameras \ + --bag $IMAGER_BAG --target aprilgrid.yaml \ + --models pinhole-radtan pinhole-radtan \ + --topics /prophesee/left/events /prophesee/right/events \ + --dont-show-report +fi diff --git a/build_system/calibration/kalibr_ovc b/build_system/calibration/kalibr_ovc new file mode 100755 index 0000000..cfb491a --- /dev/null +++ b/build_system/calibration/kalibr_ovc @@ -0,0 +1,21 @@ +#!/bin/bash +# +# +# ./kalibr_stereo_mono.bash +# +# + +IMAGER_BAG=$1 +IMU_BAG=$2 +CAMCHAIN="${IMAGER_BAG%.bag}-camchain.yaml" +IMUCALIB="${IMU_BAG%.bag}-imu.yaml" +IMUCAMCHAIN="${IMU_BAG%.bag}-camchain-imucam.yaml" + +if [ -f $IMUCAMCHAIN ]; then + echo "$IMUCAMCHAIN exists - please remove to rerun" +else + rosrun kalibr kalibr_calibrate_imu_camera \ + --bag $IMU_BAG --target aprilgrid.yaml \ + --cam $CAMCHAIN --imu vn100.yaml \ + --dont-show-report +fi diff --git a/build_system/calibration/only_ovc_calib.py b/build_system/calibration/only_ovc_calib.py new file mode 100644 index 0000000..64203d8 --- /dev/null +++ b/build_system/calibration/only_ovc_calib.py @@ -0,0 +1,37 @@ +#!/usr/bin/env python3 +import numpy as np +import yaml +import pdb + + + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--calib_input', type=str, required=True) + parser.add_argument('--calib_output', type=str, required=True) + args = parser.parse_args() + + # Get input yaml file onto var + with open(args.calib_input, 'r') as stream: + input_yaml = yaml.safe_load(stream) + + output_dict = {} + + for cam in input_yaml: + # Skip prophesee cameras + if cam in ["cam0", "cam1"]: + continue + cam_number = int(cam.split("cam")[1]) + new_cam_number = cam_number - 2 + new_cam = f"cam{new_cam_number}" + output_dict[new_cam] = input_yaml[cam] + if new_cam == "cam0": + del output_dict[new_cam]["T_cn_cnm1"] + overlaps = list(range(3)) + overlaps.remove(new_cam_number) + output_dict[new_cam]["cam_overlaps"] = overlaps + + # Save output yaml file + with open(args.calib_output, 'w') as outfile: + outfile.write(yaml.dump(output_dict, default_flow_style=False)) diff --git a/build_system/calibration/rosbag2camcalibration.bash b/build_system/calibration/rosbag2camcalibration.bash new file mode 100755 index 0000000..f4ac902 --- /dev/null +++ b/build_system/calibration/rosbag2camcalibration.bash @@ -0,0 +1,29 @@ +#!/bin/bash +source ./build_system/preamble.bash + +output_files=("$CAMCHAIN_PATH" "$REPORT_CAM_PATH" "$RESULTS_CAM_PATH") +check_files output_files + +# Link the event_bag_convert directory to the current ROS src dir and build +if [ ! -L ~/catkin_ws/src/event_bag_convert ]; then + ln -s "$(pwd)"/build_system/calibration/event_bag_convert ~/catkin_ws/src/event_bag_convert +fi +pushd ~/catkin_ws/src +catkin build --no-status +popd +. ~/catkin_ws/devel/setup.bash + +# Run the conversion script +TMP_BAG="$TMP_PATH/calibration.bag" +rosrun event_bag_convert event_bag_converter -i "$BAG_PATH" -o "$TMP_BAG" -t "$TIME_CORRECTIONS_PATH" -c 30 -d 5 + +# Run the calibration script +./calibration/kalibr_all "$TMP_BAG" "./calibration/aprilgrid.yaml" + +# Move the result to the ouptut folder +CAMCHAIN_TMP="${TMP_BAG%.bag}-camchain.yaml" +REPORTS_TMP="${TMP_BAG%.bag}-report-cam.pdf" +RESULTS_TMP="${TMP_BAG%.bag}-results-cam.txt" +mv "$CAMCHAIN_TMP" "$CAMCHAIN_PATH" +mv "$REPORTS_TMP" "$REPORT_CAM_PATH" +mv "$RESULTS_TMP" "$RESULTS_CAM_PATH" diff --git a/build_system/calibration/rosbag2imucalibration.bash b/build_system/calibration/rosbag2imucalibration.bash new file mode 100755 index 0000000..6ea3f66 --- /dev/null +++ b/build_system/calibration/rosbag2imucalibration.bash @@ -0,0 +1,32 @@ +#!/bin/bash +source ./build_system/preamble.bash + +output_files=("$IMU_CHAIN_PATH" "$IMU_RESULTS_PATH" "$IMU_REPORT_PATH") +check_files output_files + +# Run the conversion script +TMP_CHAIN="$TMP_PATH/strip_camchain.yaml" +TMP_BAG="$TMP_PATH/ovc_only.bag" + +rosbag filter "$BAG_PATH" "$TMP_BAG" "topic.startswith('/ovc')" + +python3 build_system/calibration/only_ovc_calib.py \ + --calib_input "$CALIB_CAMCHAIN_PATH" \ + --calib_output "$TMP_CHAIN" + +sleep 60 + +rosrun kalibr kalibr_calibrate_imu_camera \ + --bag "$TMP_BAG" \ + --cam "$TMP_CHAIN" \ + --imu build_system/calibration/vn100.yaml \ + --target build_system/calibration/aprilgrid.yaml \ + --dont-show-report + +# Move the result to the ouptut folder +CAMCHAIN_TMP="${TMP_BAG%.bag}-camchain-imucam.yaml" +RESULTS_TMP="${TMP_BAG%.bag}-results-imucam.txt" +REPORT_TMP="${TMP_BAG%.bag}-report-imucam.pdf" +mv "$CAMCHAIN_TMP" "$IMU_CHAIN_PATH" +mv "$RESULTS_TMP" "$IMU_RESULTS_PATH" +mv "$REPORT_TMP" "$IMU_REPORT_PATH" diff --git a/build_system/calibration/vn100.yaml b/build_system/calibration/vn100.yaml new file mode 100644 index 0000000..12d00bc --- /dev/null +++ b/build_system/calibration/vn100.yaml @@ -0,0 +1,20 @@ +#Accelerometers +# Accelerometer noise density. [m/s^2*1/sqrt(Hz)] +# Datasheet: 0.14 mg/√Hz = 1.372e-3 m/s^2 * 1/sqrt(Hz) +accelerometer_noise_density: 1.372e-3 +# Accelerometer bias random walk. [m/s^3*1/sqrt(Hz)] +# VNav technical support: Velocity random walk (VRW): 1.4e-3 m/s^2/sqrt(Hz) +# Bias Random Walk: accel : 5e-5 [m/s^3/sqrt(Hz)] +accelerometer_random_walk: 5.0e-5 + +#Gyroscopes +## Gyro noise density. [rad/s*1/sqrt(Hz)] +# Datasheet: 0.0035 °/s /√Hz = 6.10866e-05 +gyroscope_noise_density: 6.10866e-05 #Noise density (continuous-time) +# Gyro bias random walk. [rad/s^2*1/sqrt(Hz)] +# VNav technical support: Angle random walk (ARW): 6.1e-5 rad/s/sqrt(Hz) +# Bias Random Walk: gyro : 4e-6 [rad/s^2/sqrt(Hz)] +gyroscope_random_walk: 4.0e-6 + +rostopic: /ovc/vectornav/imu #the IMU ROS topic +update_rate: 400.0 #Hz (for discretization of the values above) diff --git a/build_system/dataset_paths.py b/build_system/dataset_paths.py new file mode 100755 index 0000000..bf3a236 --- /dev/null +++ b/build_system/dataset_paths.py @@ -0,0 +1,381 @@ +#!/usr/bin/env python3 +import pdb +import os +import sys +import yaml +from pathlib import Path +from colorama import Fore, Style +import argparse + +DATASET_LIST_PATH = "dataset_list.yaml" + +# List of all paths used in the pipeline +INPUT_FOLDER_PATH = Path("/M3ED_Build/input") +BAG_FOLDER_PATH = INPUT_FOLDER_PATH / "raw_bags" +TEST_BAG_FOLDER_PATH = INPUT_FOLDER_PATH / "raw_test_bags" +LIDAR_CALIB_FOLDER_PATH = INPUT_FOLDER_PATH / "lidar_calibrations" +TMP_FOLDER_PATH = Path("/M3ED_Build/tmp") +OUTPUT_FOLDER_PATH = Path("/M3ED_Build/output") +TEST_OUTPUT_FOLDER_PATH = Path("/M3ED_Build/test_output") + +# SHORT_LIST keeps a list of sequences that are used to test the pipeline. +# These sequenceces are used only during the debug builds +SHORT_LIST = ['car_forest_camera_calib_5.bag', + 'tower_lidar_calib_icp_2.bag', + 'falcon_outdoor_night_penno_parking_1.bag', + 'tower_imu_calib_1.bag', + 'spot_outdoor_day_skatepark_3.bag', + 'falcon_lidar_calib_icp_3.bag', + 'falcon_outdoor_night_camera_calib_2.bag', + 'spot_outdoor_day_srt_green_loop.bag', + 'spot_outdoor_day_camera_calib_3.bag'] + + +class DataFile: + def __init__(self, name, is_test=False): + self.name = name + self.is_test = is_test + + # Create a dictionary to store the environments + self.env = {} + + # Get bag path from name + self.env['bag_name'] = self.name + + # Bag path depends if the file is a test file or not + if self.is_test: + self.env['bag_path'] = TEST_BAG_FOLDER_PATH / f"{self.name}.bag" + else: + self.env['bag_path'] = BAG_FOLDER_PATH / f"{self.name}.bag" + + # the tmp path is the bag path without the extension + self.env['tmp_path'] = TMP_FOLDER_PATH / name + + # the output path is the bag path without the extension + self.env['output_path'] = OUTPUT_FOLDER_PATH / name + + # if the bag is a test one, there is a test output path where some + # results will be saved + if self.is_test: + self.env['test_output_path'] = TEST_OUTPUT_FOLDER_PATH / name + + # Get time_corrections.yaml file in tmp_path + self.env['time_corrections_path'] = self.env['tmp_path'] / (name + "_time_corrections.yaml") + + # H5 output path + if self.is_test: + self.env['h5_path'] = self.env['test_output_path'] / (name + "_data.h5") + self.env['is_test'] = 1 + self.env['stripped_h5_path'] = self.env['output_path'] / (name + ".h5") + else: + self.env['h5_path'] = self.env['output_path'] / (name + "_data.h5") + self.env['is_test'] = 0 + self.env['stats_path'] = self.env['output_path'] / (name + "_stats.yaml") + + # video output_path + self.env['events_video_raw'] = self.env['output_path'] / (name + "_events_gray.avi") + self.env['rgb_video_raw'] = self.env['output_path'] / (name + "_rgb.avi") + + def fileExists(self): + if not self.env['bag_path'].exists(): + sys.exit(f"Bag file {self.env['bag_path']} does not exist") + + def printEnvs(self): + self.fileExists() + # printEnvs is only called when we get the envs for a particular file. + # We can run functions for the particular file here, so we do not run + # them for all the files when we populate the database + if not self.env['tmp_path'].exists(): + os.mkdir(self.env['tmp_path']) + os.chmod(self.env['tmp_path'], 0o777) + if not self.env['output_path'].exists(): + os.mkdir(self.env['output_path']) + os.chmod(self.env['output_path'], 0o777) + if self.is_test and not self.env['test_output_path'].exists(): + os.mkdir(self.env['test_output_path']) + os.chmod(self.env['test_output_path'], 0o777) + + # Print the environment variables + for key in self.env: + print(f"export {key.upper()}={self.env[key]}") + + +class CamCalibration(DataFile): + def __init__(self, name): + super().__init__(name) + self.env['camchain_path'] = self.env['output_path'] / "camchain.yaml" + self.env['report_cam_path'] = self.env['output_path'] / "report-cam.pdf" + self.env['results_cam_path'] = self.env['output_path'] / "results-cam.txt" + + +class ImuCalibration(DataFile): + def __init__(self, name, cam_calibration): + super().__init__(name) + self.cam_calibration = cam_calibration + self.env['imu_chain_path'] = self.env['output_path'] / 'imu_chain.yaml' + self.env['imu_results_path'] = self.env['output_path'] / 'imu_results.txt' + self.env['imu_report_path'] = self.env['output_path'] / 'imu_report.pdf' + self.env['calib_camchain_path'] = self.cam_calibration.env['camchain_path'] + + def printEnvs(self): + super().printEnvs() + + +class DataRecording(DataFile): + def __init__(self, camera_calib, imu_calib, **kwargs): + is_test = kwargs['is_test_file'] + assert isinstance(is_test, bool) + super().__init__(kwargs['file'], is_test=is_test) + self.cam_calibration = camera_calib + # assert lidar_calib in self.lidar_calib_files, f"Lidar calibration file {lidar_calib} not found" + self.imu_calibration = imu_calib + self.lidar_calibration = kwargs['lidar_calib_reference'] + self.depth_scan = kwargs['depth_scan_aggregation'] + self.internimage = kwargs['internimage_semantics'] + self.check_pose_return = kwargs['check_pose_return'] + assert isinstance(self.internimage, bool) + assert (isinstance(self.check_pose_return, bool) and + not self.check_pose_return) or \ + isinstance(self.check_pose_return, dict) + assert self.depth_scan >= 4 and self.depth_scan <= 400, \ + f"Invalid depth_scan {self.depth_scan}" + + # FasterLIO env variables + self.env['converted_bag_path'] = self.env['tmp_path'] / (self.name + "_converted.bag") + if self.is_test: + out_path = self.env['test_output_path'] + else: + out_path = self.env['output_path'] + + self.env['pcd_global_path'] = out_path / (self.name + "_global.pcd") + self.env['pcd_local_path'] = out_path / "local_scans" + self.env['traj_path'] = self.env['tmp_path'] / (self.name + ".traj") + self.env['gt_pose_path'] = out_path / (self.name + "_pose_gt.h5") + self.env['gt_depth_path'] = out_path / (self.name + "_depth_gt.h5") + self.env['depth_video_raw'] = out_path / (self.name + "_depth_gt.avi") + self.env['depth_events_video_raw'] = out_path / (self.name + "_depth_gt_events.avi") + + self.faster_lio_config = kwargs['faster_lio_config'] + assert self.faster_lio_config in ["long_range_ouster64", + "short_range_ouster64"], \ + f"Invalid faster lio config {self.faster_lio_config}" + self.env['faster_lio_config'] = self.faster_lio_config + self.env['depth_scan_aggregation'] = self.depth_scan + + # Semantics config + if self.internimage: + self.is_internimage = True + if self.is_test: + self.env['internimage_path'] = self.env['test_output_path'] / (self.name + "_semantics.h5") + self.env['semantics_video_raw'] = self.env['test_output_path'] / (self.name + "_semantics.avi") + else: + self.env['internimage_path'] = self.env['output_path'] / (self.name + "_semantics.h5") + self.env['semantics_video_raw'] = self.env['output_path'] / (self.name + "_semantics.avi") + else: + self.is_internimage = False + + if self.check_pose_return: + self.check_pose_return = True + self.env['check_pose_return'] = 1 + self.env['absolute_position_error'] = kwargs['check_pose_return']['absolute_position_error'] + else: + self.check_pose_return = False + self.env['check_pose_return'] = 0 + + def printEnvs(self): + # Check that the camera camchain exists and add it to the file + self.cam_calibration.fileExists() + self.env['calib_camchain_path'] = self.cam_calibration.env['camchain_path'] + + self.imu_calibration.fileExists() + self.env['calib_imu_path'] = self.imu_calibration.env['imu_chain_path'] + + # TODO Lidar calib is obtained manualy. It would be great to get it + # automatically from the file as we do with IMU and camera + assert isinstance(self.lidar_calibration, str) + self.env['calib_lidar_path'] = LIDAR_CALIB_FOLDER_PATH / self.lidar_calibration + # Check that the Lidar calibration file exists + if not self.env['calib_lidar_path'].exists(): + sys.exit(f"Lidar calibration file {self.env['calib_lidar_path']} does not exist") + + # Print all the environment variables + super().printEnvs() + + def isInternimage(self): + return self.is_internimage + + +class Dataset(): + def __init__(self, debug=False): + # Read yaml and parse it + self.camera_calib_files = {} + self.lidar_calib_files = {} + self.imu_calib_files = {} + self.data_recording_files = {} + + # Read the yaml file and create the objects for the dataset + with open(DATASET_LIST_PATH, 'r') as stream: + yml = yaml.load(stream, Loader=yaml.FullLoader) + + # Need to split them as we need to process calib before data + cam_calib_yml = [i for i in yml + if i['filetype'] == "camera_calib"] + imu_calib_yml = [i for i in yml + if i['filetype'] == "imu_calib"] + lidar_calib_yml = [i for i in yml + if i['filetype'] == "lidar_calib"] + data_yml = [i for i in yml + if i['filetype'] == "data"] + + all_calib_len = len(cam_calib_yml) + len(imu_calib_yml) + len(lidar_calib_yml) + if debug: + print(f"Found {Fore.GREEN}{all_calib_len}{Style.RESET_ALL}", + "calibration files and", + f"{Fore.GREEN}{len(data_yml)}{Style.RESET_ALL} data files") + + assert all_calib_len + len(data_yml) == len(yml) + + for file in cam_calib_yml: + filename = file['file'] + obj = CamCalibration(filename) + self.camera_calib_files[filename] = obj + + for file in imu_calib_yml: + filename = file['file'] + camera_calib = file['camera_calib_reference'] + obj = ImuCalibration(filename, + self.camera_calib_files[camera_calib]) + self.imu_calib_files[filename] = obj + + # We are not using automatic lidar calibration for the time being + + for file in data_yml: + # Check that the calibration files exist + filename = file['file'] + camera_calib = file['camera_calib_reference'] + imu_calib = file['imu_calib_reference'] + assert camera_calib in self.camera_calib_files, \ + f"Camera calibration file {camera_calib} not found" + assert imu_calib in self.imu_calib_files, \ + f"IMU calibration file {imu_calib} not found" + file_obj = DataRecording(self.camera_calib_files[camera_calib], + self.imu_calib_files[imu_calib], **file) + self.data_recording_files[filename] = file_obj + + def is_valid(self, name): + return name in self.data_recording_files \ + or name in self.camera_calib_files \ + or name in self.imu_calib_files \ + or name in self.lidar_calib_files + + def get_file(self, name): + if not self.is_valid(name): + sys.exit(f"File {name} not found in dataset") + if name in self.data_recording_files: + return self.data_recording_files[name] + elif name in self.camera_calib_files: + return self.camera_calib_files[name] + elif name in self.imu_calib_files: + return self.imu_calib_files[name] + elif name in self.lidar_calib_files: + return self.lidar_calib_files[name] + + def get_file_list(self, ft): + assert isinstance(ft, str) and ft in ["camera_calib", "imu_calib", + "lidar_calib", "data", + "data_semantics", "all"] + if ft == "camera_calib": + return ".bag,".join(self.camera_calib_files.keys())+".bag" + elif ft == "imu_calib": + return ".bag,".join(self.imu_calib_files.keys())+".bag" + elif ft == "lidar_calib": + sys.exit("Lidar calibration files not supported yet") + elif ft == "data": + return ".bag,".join(self.data_recording_files.keys())+".bag" + elif ft == "data_semantics": + semantic_list = [] + for file in self.data_recording_files: + if self.data_recording_files[file].isInternimage(): + semantic_list.append(file) + return ".bag,".join(semantic_list)+".bag" + elif ft == "all": + return ".bag,".join(self.data_recording_files.keys())+".bag," + \ + ".bag,".join(self.camera_calib_files.keys())+".bag," + \ + ".bag,".join(self.imu_calib_files.keys())+".bag" + else: + sys.exit(f"File type {ft} not supported") + + +if __name__ == '__main__': + # Check that the TMP_PATH exists + if not TMP_FOLDER_PATH.exists(): + sys.exit(f"TMP_PATH {TMP_FOLDER_PATH} does not exist") + + # Check that the output folder exists + if not OUTPUT_FOLDER_PATH.exists(): + sys.exit(f"OUTPUT_PATH {OUTPUT_FOLDER_PATH} does not exist") + + # Argparse + parser = argparse.ArgumentParser(description='Generate dataset exports') + parser.add_argument('--bag_name', metavar='bag_name', type=str, + required=False, + help='Name of the bag to generate exports for') + parser.add_argument('--debug', action='store_true', help='Debug mode') + parser.add_argument('--get_files', action='store', type=str, + help='Get a list of files. Options are: lidar_calib, imu_calib, camera_calib, data, data_semantics') + parser.add_argument('--short_list', action='store_true', + help='Print a short list of files (for debugging)') + args = parser.parse_args() + + if args.debug: + ds = Dataset(debug=True) + else: + ds = Dataset() + + if args.debug: + # Run only on the first 6 test files + for file in SHORT_LIST[:6]: + # FIXME: skip lidar calib files for now + if "lidar_calib" in file: + continue + stem = file.split(".")[0] + print(f"\n{Fore.GREEN}{file}{Style.RESET_ALL}") + if not ds.is_valid(stem): + sys.exit(f"File {file} not found in dataset. Strange.") + dataFile = ds.get_file(stem) + dataFile.printEnvs() + elif args.get_files is not None: + if args.get_files == "data_semantics": + file_list = ds.get_file_list("data_semantics") + elif args.get_files == "data": + file_list = ds.get_file_list("data") + elif args.get_files == "lidar_calib": + file_list = ds.get_file_list("lidar_calib") + elif args.get_files == "imu_calib": + file_list = ds.get_file_list("imu_calib") + elif args.get_files == "camera_calib": + file_list = ds.get_file_list("camera_calib") + elif args.get_files == "all": + file_list = ds.get_file_list("all") + else: + sys.exit(f"Unknown argument {args.get_files}") + if args.short_list: + filtered = [] + for item in file_list.split(","): + if item in SHORT_LIST: + filtered.append(item) + print(",".join(filtered)) + else: + print(file_list) + + else: + # The bag name is the first argument + if args.bag_name is None: + sys.exit("Bag name not provided") + bag_name = args.bag_name + # Check that the bag is in the dataset + stem = bag_name.split(".")[0] + if not ds.is_valid(stem): + sys.exit(f"Bag {bag_name} is not in the dataset") + dataFile = ds.get_file(stem) + dataFile.printEnvs() diff --git a/build_system/docker/Dockerfile b/build_system/docker/Dockerfile new file mode 100644 index 0000000..4af3d37 --- /dev/null +++ b/build_system/docker/Dockerfile @@ -0,0 +1,247 @@ +# Based on https://raw.githubusercontent.com/iandouglas96/jackal_master/master/jackal/Dockerfile +FROM nvidia/cuda:11.3.1-devel-ubuntu20.04 + +SHELL ["/bin/bash", "-c"] + +RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2004/x86_64/3bf863cc.pub + +#Run the frontend first so it doesn't throw an error later +RUN export DEBIAN_FRONTEND=noninteractive \ + && apt-get update \ + && export TZ="America/New_York" \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y keyboard-configuration \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y tzdata \ + && ln -fs "/usr/share/zoneinfo/$TZ" /etc/localtime \ + && dpkg-reconfigure --frontend noninteractive tzdata \ + && apt-get clean + +# Install general development tools +RUN apt-get update \ + && DEBIAN_FRONTEND=noninteractive apt-get install -y \ + build-essential \ + cmake \ + cppcheck \ + clang-tidy \ + gdb \ + git \ + libcwiid-dev \ + libgoogle-glog-dev \ + libspnav-dev \ + libusb-dev \ + lsb-release \ + python3-dbg \ + python3-dev \ + python3-empy \ + python3-numpy \ + python3-pip \ + python3-venv \ + python3-h5py \ + python3-matplotlib \ + python3-wxgtk4.0 \ + python3-tk \ + python3-igraph \ + python3-pyx \ + ipython3 \ + software-properties-common \ + sudo \ + wget \ + locales \ + iputils-ping \ + netcat \ + hdf5-tools \ + iproute2 \ + dbus-x11 \ + dialog \ + fontconfig \ + apt-utils \ + ripgrep \ + curl \ + bc \ + psmisc \ + pybind11-dev \ + && apt-get clean + +# Set locales +RUN sed -i -e 's/# en_US.UTF-8 UTF-8/en_US.UTF-8 UTF-8/' /etc/locale.gen && \ + locale-gen +ENV LC_ALL en_US.UTF-8 +ENV LANG en_US.UTF-8 +ENV LANGUAGE en_US:en + + +# Install ROS noetic +RUN sudo apt-get update \ + && sudo sh -c 'echo "deb http://packages.ros.org/ros/ubuntu $(lsb_release -sc) main" > /etc/apt/sources.list.d/ros-latest.list' \ + && curl -s https://raw.githubusercontent.com/ros/rosdistro/master/ros.asc | sudo apt-key add - \ + && sudo apt-get update \ + && sudo DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ + python3-catkin-tools \ + python3-rosdep \ + python3-rosinstall \ + python3-vcstool \ + ros-noetic-desktop-full \ + ros-noetic-pointcloud-to-laserscan \ + ros-noetic-robot-localization \ + ros-noetic-spacenav-node \ + ros-noetic-tf2-sensor-msgs \ + ros-noetic-twist-mux \ + ros-noetic-velodyne-simulator \ + ros-noetic-vision-msgs \ + ros-noetic-mavros \ + ros-noetic-mavros-dbgsym \ + ros-noetic-rtcm-msgs \ + libjsoncpp-dev \ + libspdlog-dev \ + python3-yaml \ + python3-pycryptodome \ + python3-gnupg \ + libsuitesparse-dev \ + libv4l-dev \ + libceres-dev \ + python3-colorama \ + ros-noetic-random-numbers \ + ros-noetic-pybind11-catkin \ + ros-noetic-pcl-ros \ + ros-noetic-rviz \ + ros-noetic-tf2-geometry-msgs \ + pcl-tools + +# Install python tools with pip +RUN python3 -m pip install rospkg defusedxml osrf-pycommon tqdm + +# Rosdep initialization +RUN sudo rosdep init \ + && sudo apt-get clean \ + rosdep update + +# Install the metavision SDK +RUN sudo apt-get update \ + && sudo sh -c 'echo "deb [arch=amd64 trusted=yes] https://apt.prophesee.ai/dists/public/baiTh5si/ubuntu focal sdk" > /etc/apt/sources.list.d/metavision.list' \ + && sudo sh -c 'echo "deb [trusted=yes] http://ppa.launchpad.net/s-schmeisser/ogre-1.12/ubuntu focal main" >> /etc/apt/sources.list.d/metavision.list' \ + && sudo apt-get update \ + && sudo DEBIAN_FRONTEND=noninteractive apt-get install -y \ + libcanberra-gtk-module \ + mesa-utils \ + python3.8-dev \ + libboost-program-options-dev \ + libeigen3-dev \ + libpcl-dev \ + libyaml-cpp-dev \ + metavision-sdk \ + metavision-sdk-samples \ + metavision-sdk-advanced-samples \ + metavision-sdk-cv-samples \ + libogre-1.12-dev \ + ffmpeg \ + libx264-dev + +# Dependencies required by metavision +RUN python3 -m pip install "opencv-python==4.5.5.64" "sk-video==1.1.10" "fire==0.4.0" "numpy==1.23.4" pandas scipy + +# Install open3d requirement for LiDAR calibration +RUN python3 -m pip install 'open3d==0.13.0' + +# Install the SilkyEV Driver +# RUN wget https://centuryarks.com/en/wp-content/uploads/2021/11/SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4.zip \ +# && unzip SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4.zip \ +# && sed -i '/reload/d' SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4/CA_Silky_installer.sh\ +# && sed -i '/^source/d' SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4/CA_Silky_installer.sh\ +# && cd SilkyEvCam_G31_Installer_for_ubuntu_v2.5.4 \ +# && sh ./CA_Silky_installer.sh + + +# Add a user with the same user_id as the user outside the container +ARG user_id +ARG user_name + +env USER $user_name + +RUN useradd -U --uid ${user_id} -m -s /bin/bash $USER \ + && echo "$USER:$USER" | chpasswd \ + && adduser $USER sudo \ + && echo "$USER ALL=NOPASSWD: ALL" >> /etc/sudoers.d/$USER + +# Comands below are run as USER +USER $USER + +# Create a ROS environment +RUN mkdir -p /home/$USER/catkin_ws/src \ + && cd /home/$USER/catkin_ws \ + && catkin init \ + && catkin config --extend /opt/ros/noetic \ + && catkin config --merge-devel \ + && catkin build -j 48 --no-status -DCMAKE_BUILD_TYPE=Release + +# Load ROS in environment +RUN /bin/sh -c 'echo ". /home/$USER/catkin_ws/devel/setup.bash" >> ~/.bashrc' + +# Install Kalibr +RUN cd /home/$USER/catkin_ws/src \ + && git clone https://github.com/k-chaney/kalibr.git \ + && cd kalibr \ + && git checkout 21ba7fa35d52bf6702f7e53807f1138f5162e5e2 \ + && cd ../.. \ + && catkin build --no-status + +# Install metavision_ros_driver and metavision tools +RUN cd /home/$USER/catkin_ws/src \ + && git clone https://github.com/berndpfrommer/metavision_ros_driver.git \ + && cd metavision_ros_driver \ + && git checkout ab2932035200eccefcc63d943643a9020e836c61 \ + && cd .. \ + && git clone https://github.com/berndpfrommer/event_array_py.git \ + && cd event_array_py \ + && git checkout 3c8578f66610493a11c6208376a4a22870162c37 \ + && cd .. \ + && git clone https://github.com/k-chaney/simple_image_recon.git \ + && cd simple_image_recon \ + && git checkout 3ba9d1aa964e6d493d9ec8d52e8c71ea3e8f4976 \ + && cd .. \ + && git clone https://github.com/berndpfrommer/simple_image_recon_lib.git \ + && cd simple_image_recon_lib \ + && git checkout 328d723f2941131b317471d97a534aaba5438003 \ + && cd ../.. \ + && wstool init src src/metavision_ros_driver/metavision_ros_driver.rosinstall \ + && wstool merge -t src src/event_array_py/event_array_py.rosinstall \ + && wstool update -t src \ + && catkin build --no-status + +# Install Ouster ROS packages +RUN cd /home/$USER/catkin_ws/src \ + && python3 -m pip install 'ouster-sdk[examples]' \ + && git clone https://github.com/ouster-lidar/ouster-ros.git \ + && cd ouster-ros \ + && git checkout 208ee15a2a773d21194e3775d64a5b06d59e7310 \ + && git submodule update --init --recursive \ + && cd ../.. \ + && catkin build --no-status + +# Install FasterLIO +RUN cd /home/$USER/catkin_ws/src \ + && git clone https://github.com/k-chaney/faster-lio.git \ + && cd faster-lio \ + && git checkout abe2ccef26650bb66a454388e2bccaf648783e11 \ + && catkin build --no-status + +# Install InternImage following instructions from +# https://github.com/OpenGVLab/InternImage/tree/master/segmentation +RUN cd /home/$USER \ + && wget -q https://repo.anaconda.com/archive/Anaconda3-2023.03-1-Linux-x86_64.sh \ + && bash ./Anaconda3-2023.03-1-Linux-x86_64.sh -b -p /home/$USER/anaconda3 \ + && git clone https://github.com/OpenGVLab/InternImage.git \ + && cd InternImage \ + && git checkout 631a5159e2c4e4bda16c732e64fa9584b38859ea \ + && source /home/$USER/anaconda3/bin/activate \ + && conda create -n internimage python=3.7 -y \ + && conda activate internimage \ + && pip install torch==1.11.0+cu113 torchvision==0.12.0+cu113 -f https://download.pytorch.org/whl/torch_stable.html \ + && pip install -U openmim \ + && mim install mmcv-full==1.5.0 \ + && pip install timm==0.6.11 mmdet==2.28.1 \ + && pip install h5py mmsegmentation==0.27.0 \ + && pip install opencv-python termcolor yacs pyyaml scipy +#&& cd ./segmentation/ops_dcnv3 \ +#&& sh ./make.sh + +# When running a container start in the home folder +WORKDIR /home/$USER diff --git a/build_system/docker/docker_checks.sh b/build_system/docker/docker_checks.sh new file mode 100755 index 0000000..a931941 --- /dev/null +++ b/build_system/docker/docker_checks.sh @@ -0,0 +1,109 @@ +#!/bin/bash +set -eo pipefail + +# M3ED Processing Preliminary Script +# Run: +# ./preliminary.sh +# +# You can get the branch name with +# git rev-parse --abbrev-ref HEAD +# +# You can get the commit number with +# git rev-parse HEAD +# +# This script will check that the sample data exists in the local disk and the +# ROS environment is set up + +# Terminal colors +export RED='\033[0;31m' +export GREEN='\033[0;32m' +export BOLD='\033[1m' +export VIOLET='\033[0;35m' +export BLUE='\033[0;34m' +export NC='\033[0m' # No Color + +# Set the branch name and hash as arguments +BRANCH=$1 +HASH=$2 + +# Sample bag used to test that the dataset is loaded +DATA_PATH=/M3ED_Build/input/raw_bags +# Create array +SAMPLE_BAGS=("car_urban_day_1_penno_small_loop.bag" "car_urban_day_1_horse.bag") + +# Fancy banner when the script starts +echo -e "${VIOLET}${BOLD}------------------------------${NC}" +echo -e "${VIOLET}${BOLD}M3ED Container and host checks${NC}" +echo -e "${VIOLET}${BOLD}------------------------------${NC}" + +# ===================== PRELIMINARY CHECKS ===================== + +# Print the current date and time +echo -e "${GREEN}Date: $(date)${NC}" + +# Print the current user +echo -e "${GREEN}User: $(whoami)${NC}" +# + +# Print the current branch name and hash name +if [ -z "$BRANCH" ] +then + echo -e "${RED}Branch argument is NULL. Quitting.${NC}" + exit 1 +fi +echo -e "${GREEN}Branch: ${BRANCH}${NC}" +if [ -z "$HASH" ] +then + echo -e "${RED}Hash argument is NULL. Quitting.${NC}" + exit 1 +fi +echo -e "${GREEN}Hash: ${HASH}${NC}" + +# Source ROS environment +. ~/catkin_ws/devel/setup.bash +RVERSION=$(rosversion -d) +echo -e "${GREEN}ROS version: ${RVERSION}${NC}" + +# Check that the folder exists +if [ ! -d "${DATA_PATH}" ] +then + echo -e "${RED}Data path ${DATA_PATH} not found. Is the NFS share mounted? Quitting.${NC}" + exit 1 +else + echo -e "${GREEN}Data path ${DATA_PATH} found.${NC}" +fi + +# Check that each one of the sample bags exists +for SAMPLE_BAG in "${SAMPLE_BAGS[@]}" +do + if [ ! -f "${DATA_PATH}/${SAMPLE_BAG}" ] + then + echo -e "${RED}Sample bag ${SAMPLE_BAG} not found. Quitting.${NC}" + exit 1 + else + echo -e "${GREEN}Sample bag ${SAMPLE_BAG} found.${NC}" + fi +done + +# Test if nvidia-smi is present and executable +if ! command -v nvidia-smi &> /dev/null + then + echo -e "${RED}nvidia-smi could not be executed.${NC}" + exit 1 + else + echo -e "${GREEN}nvidia-smi found.${NC}" +fi + +# Test for presence of GPUs +gpu_count=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) +if [ "$gpu_count" -gt 0 ] +then + echo -e "${GREEN}Valid GPUs detected: $gpu_count${NC}" +else + echo -e "${RED}No valid GPUs detected.${NC}" + exit 1 +fi + + +echo -e "${GREEN}${BOLD}Preliminary checks passed.\n${NC}" +exit 0 diff --git a/build_system/jenkins/Jenkinsfile_CPU_stage1 b/build_system/jenkins/Jenkinsfile_CPU_stage1 new file mode 100644 index 0000000..559b509 --- /dev/null +++ b/build_system/jenkins/Jenkinsfile_CPU_stage1 @@ -0,0 +1,78 @@ +pipeline { + agent { + docker { + image 'm3ed/m3ed:latest' + args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' + label 'CPU_node' + } + } + parameters { + string(name: 'CHECKOUT_COMMIT', defaultValue: '', + description: 'Commit to checkout') + string(name: 'BAG_NAME', defaultValue: '', description: 'Input bag name') + } + options { + ansiColor('xterm') + } + stages { + stage('Checkout') { + steps { + sh "git checkout ${params.CHECKOUT_COMMIT}" + } + } + stage('Verify Bag') { + steps { + sh "./build_system/bag_processing/rosbag2verify.bash ${params.BAG_NAME}" + } + } + stage('Time Corrections') { + steps { + script { + sh "./build_system/bag_processing/rosbag2timecorrection.bash ${params.BAG_NAME}" + } + } + } + stage('Parallel 1') { + parallel { + stage('HDF5') { + steps { + sh "./build_system/bag_processing/rosbag2hdf5.bash ${params.BAG_NAME}" + } + } + stage('Ouster bag and Faster-LIO') { + when { + expression { !params.BAG_NAME.contains('calib') } + } + steps { + sh "./build_system/lidar_depth/rosbag2pcd.bash ${params.BAG_NAME}" + } + } + stage('Kalibr Cam') { + when { + expression { params.BAG_NAME.contains('camera_calib') } + } + steps { + sh "./build_system/calibration/rosbag2camcalibration.bash ${params.BAG_NAME}" + } + } + stage('Kalibr Imu') { + when { + expression { params.BAG_NAME.contains('imu_calib') } + } + steps { + sh "./build_system/calibration/rosbag2imucalibration.bash ${params.BAG_NAME}" + } + } + } + } + + stage('Generate GT') { + when { + expression { !params.BAG_NAME.contains('calib') } + } + steps { + sh "./build_system/lidar_depth/pcd2gt.bash ${params.BAG_NAME}" + } + } + } +} diff --git a/build_system/jenkins/Jenkinsfile_CPU_stage2 b/build_system/jenkins/Jenkinsfile_CPU_stage2 new file mode 100644 index 0000000..555c572 --- /dev/null +++ b/build_system/jenkins/Jenkinsfile_CPU_stage2 @@ -0,0 +1,45 @@ +pipeline { + agent { + docker { + image 'm3ed/m3ed:latest' + args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' + label 'CPU_node' + } + } + parameters { + string(name: 'CHECKOUT_COMMIT', defaultValue: '', + description: 'Commit to checkout') + string(name: 'BAG_NAME', defaultValue: '', description: 'Input bag name') + } + options { + ansiColor('xterm') + } + stages { + stage('Checkout') { + steps { + sh "git checkout ${params.CHECKOUT_COMMIT}" + } + } + stage('Parallel 1') { + parallel { + stage('Image Videos') { + when { + expression { !params.BAG_NAME.contains('calib') } + } + steps { + sh "./build_system/bag_processing/hdf52media.bash ${params.BAG_NAME}" + } + } + stage('Depth Video') { + when { + expression { !params.BAG_NAME.contains('calib') } + } + steps { + sh "./build_system/lidar_depth/gt2media.bash ${params.BAG_NAME}" + } + } + } + } + + } +} diff --git a/build_system/jenkins/Jenkinsfile_CPU_stage3 b/build_system/jenkins/Jenkinsfile_CPU_stage3 new file mode 100644 index 0000000..59f381b --- /dev/null +++ b/build_system/jenkins/Jenkinsfile_CPU_stage3 @@ -0,0 +1,34 @@ +pipeline { + agent { + docker { + image 'm3ed/m3ed:latest' + args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' + label 'CPU_node' + } + } + parameters { + string(name: 'CHECKOUT_COMMIT', defaultValue: '', + description: 'Commit to checkout') + string(name: 'BAG_NAME', defaultValue: '', description: 'Input bag name') + } + options { + ansiColor('xterm') + } + stages { + stage('Checkout') { + steps { + sh "git checkout ${params.CHECKOUT_COMMIT}" + } + } + stage('Check GT loop error') { + steps { + sh "./build_system/lidar_depth/gt2verify.bash ${params.BAG_NAME}" + } + } + stage('Get Stats') { + steps { + sh "./build_system/stats_and_summary/getStats.bash ${params.BAG_NAME}" + } + } + } +} diff --git a/build_system/jenkins/Jenkinsfile_GPU b/build_system/jenkins/Jenkinsfile_GPU new file mode 100644 index 0000000..b2cf817 --- /dev/null +++ b/build_system/jenkins/Jenkinsfile_GPU @@ -0,0 +1,35 @@ +pipeline { + agent { + docker { + image 'm3ed/m3ed:latest' + args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' + label 'GPU_node' + } + } + parameters { + string(name: 'CHECKOUT_COMMIT', defaultValue: '', description: 'Commit to checkout') + string(name: 'BAG_NAME', defaultValue: '', description: 'Input bag name') + string(name: 'GPU_TARGET', defaultValue: '', description: 'GPU to run the code') + } + options { + ansiColor('xterm') + } + stages { + stage('Checkout') { + steps { + sh "git checkout ${params.CHECKOUT_COMMIT}" + } + } + stage ('InternImage') { + steps { + echo "GPU_TARGET: ${params.GPU_TARGET}" + sh "./build_system/semantics/hdf52internimage.bash ${params.BAG_NAME} ${params.GPU_TARGET}" + } + } + stage('Semantics Video') { + steps { + sh "./build_system/semantics/internimage2media.bash ${params.BAG_NAME}" + } + } + } +} diff --git a/build_system/jenkins/Jenkinsfile_dispatcher b/build_system/jenkins/Jenkinsfile_dispatcher new file mode 100644 index 0000000..ac098a8 --- /dev/null +++ b/build_system/jenkins/Jenkinsfile_dispatcher @@ -0,0 +1,209 @@ +pipeline { + agent any + environment { + BAG_PATH = "/M3ED_Build/input/raw_bags" + } + + stages { + //// We first build the docker image in each of the hosts by executing the M3ED-docker-build pipeline + //stage('Build Docker Image on Host and Test') { + // steps { + // script { + // echo "Docker container build" + // // Get list of all hosts with label 'runner' + // def label = 'docker' + // def nodesWithLabel = Jenkins.getInstance().getLabel(label).getNodes() + // def nodeNamesWithLabel = nodesWithLabel.collect { node -> node.nodeName } + // echo "Nodes with Label '${label}': ${nodeNamesWithLabel}" + // for (int i =0; i < nodeNamesWithLabel.size(); i++) { + // def nodeName = nodeNamesWithLabel[i] + // build job: 'M3ED-docker-build', wait: true, parameters: [ + // string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}") + // ] + // } + // } + // } + //} + stage('Set Files for Other Branches') { + // Only run when branch is not master or origin/master + when { + not { + expression { + return env.GIT_BRANCH == "origin/master" + } + } + } + steps { + script { + env.DATA_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files data --short", returnStdout: true).trim() + env.DATA_SEMANTICS_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files data_semantics --short", returnStdout: true).trim() + env.IMU_CALIB_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files imu_calib --short", returnStdout: true).trim() + env.CAMERA_CALIB_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files camera_calib --short", returnStdout: true).trim() + } + } + } + stage('Set Files for Master Branch') { + // Only run when branch is origin/master + when { + expression { + return env.GIT_BRANCH == "origin/master" + } + } + steps { + script { + env.DATA_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files data", returnStdout: true).trim() + env.DATA_SEMANTICS_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files data_semantics", returnStdout: true).trim() + env.IMU_CALIB_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files imu_calib", returnStdout: true).trim() + env.CAMERA_CALIB_FILES = sh(script: "python3 build_system/dataset_paths.py --get_files camera_calib", returnStdout: true).trim() + } + } + } + stage('CPU Stage 1') { + steps { + script { + // Process "calib" files first, order is defined by allFiles order + def camera_calib_files = env.CAMERA_CALIB_FILES.split(',') + def imu_calib_files = env.IMU_CALIB_FILES.split(',') + def dataFiles = env.DATA_FILES.split(',') + def allFiles = [camera_calib_files, imu_calib_files, dataFiles] + + echo "CPU Stage 1" + for (int f = 0; f < 3; f++) { + files = allFiles[f] + // Dispatch files to nodes + // https://www.jenkins.io/doc/pipeline/examples/#jobs-in-parallel + def builds = [:] + def statuses = [:] + for (int i = 0; i < files.size(); i++) { + def index = i + def file = files[index] + builds[file] = { + // Avoid issues with wait period + def status = build job: 'M3ED-CPU-Stage1', wait: true, parameters: [ + string(name: 'BAG_NAME', value: file), + string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}"), + string(name: 'dummy', value: "${index}")], quietPeriod: 2 + statuses[file]=status + } + } + parallel builds + sleep 30 // Kill issues with container + for (int i=0; i < files.size(); i++) { + def index = i + def file = files[index] + if (statuses[file].result != 'SUCCESS') { + error "Pipeline ${file} failed with status ${statuses[file].result}" + } + } + } + } + } + } + stage('Parallel') { + parallel { + stage('GPU') { + steps { + script { + echo "GPU" + def sem_files = env.DATA_SEMANTICS_FILES.split(',') + def par_jobs = 8 + def num_gpu = 4 + def N = sem_files.size() + def builds = [:] + def statuses = [:] + + for (int i = 0; i < N; i+= par_jobs) { + builds.clear() + statuses.clear() + for (int j = i; j < i + par_jobs && j < N; j++) { + def index = j + def file = sem_files[index] + def GPU = j % num_gpu + builds[file] = { + // Avoid issues with wait period + def status = build job: 'M3ED-GPU', wait: true, parameters: [ + string(name: 'BAG_NAME', value: file), + string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}"), + string(name: 'GPU_TARGET', value: "${GPU}"), + string(name: 'dummy', value: "${index}")], quietPeriod: 2 + statuses[file]=status + } + } + parallel builds + for (int j = i; j < i + par_jobs && j < N; j++) { + def index = j + def file = sem_files[index] + if (statuses[file].result != 'SUCCESS') { + error "Pipeline ${file} failed with status ${statuses[file].result}" + } + } + } + } + } + } + stage('CPU Stage 2') { + steps { + script { + echo "CPU Stage 2" + def dataFiles = env.DATA_FILES.split(',') + + def builds = [:] + def statuses = [:] + for (int i =0; i < dataFiles.size(); i++) { + def index = i + def file = files[index] + builds[file] = { + // Avoid issues with wait period + def status = build job: 'M3ED-CPU-Stage2', wait: true, parameters: [ + string(name: 'BAG_NAME', value: file), + string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}"), + string(name: 'dummy', value: "${index}")], quietPeriod: 2 + statuses[file]=status + } + } + parallel builds + for (int i =0; i < files.size(); i++) { + def index = i + def file = files[index] + if (statuses[file].result != 'SUCCESS') { + error "Pipeline ${file} failed with status ${statuses[file].result}" + } + } + } + } + } + } + } + stage('CPU Stage 3') { + steps { + script { + echo "CPU Stage 3" + def dataFiles = env.DATA_FILES.split(',') + def builds = [:] + def statuses = [:] + for (int i =0; i < dataFiles.size(); i++) { + def index = i + def file = files[index] + builds[file] = { + // Avoid issues with wait period + def status = build job: 'M3ED-CPU-Stage3', wait: true, parameters: [ + string(name: 'BAG_NAME', value: file), + string(name: 'CHECKOUT_COMMIT', value: "${env.GIT_COMMIT}"), + string(name: 'dummy', value: "${index}")], quietPeriod: 2 + statuses[file]=status + } + } + parallel builds + for (int i =0; i < files.size(); i++) { + def index = i + def file = files[index] + if (statuses[file].result != 'SUCCESS') { + error "Pipeline ${file} failed with status ${statuses[file].result}" + } + } + } + } + } + } +} + diff --git a/build_system/jenkins/Jenkinsfile_docker_build b/build_system/jenkins/Jenkinsfile_docker_build new file mode 100644 index 0000000..d6348bf --- /dev/null +++ b/build_system/jenkins/Jenkinsfile_docker_build @@ -0,0 +1,40 @@ +def user_id +def user_name +node { + user_id = sh(returnStdout: true, script: 'id -u').trim() + user_name = sh(returnStdout: true, script: 'whoami').trim() +} + +pipeline { + agent { + dockerfile { + dir 'build_system/docker' + additionalBuildArgs "--build-arg user_id=${user_id} --build-arg user_name=${user_name} -t m3ed/m3ed:latest" + args '-v /M3ED_Build:/M3ED_Build --runtime=nvidia --gpus all' + + } + } + parameters { + string(name: 'CHECKOUT_COMMIT', defaultValue: '', + description: 'Commit to checkout') + } + options { + ansiColor('xterm') + } + stages { + stage('Checkout') { + steps { + sh "git checkout ${params.CHECKOUT_COMMIT}" + } + } + stage ('Check System') { + steps { + script { + def hash = sh(returnStdout: true, script: 'git rev-parse --abbrev-ref HEAD').trim() + def branch = sh(returnStdout: true, script: 'git rev-parse HEAD').trim() + sh "./build_system/docker/docker_checks.sh ${hash} ${branch}" + } + } + } + } +} diff --git a/build_system/lidar_depth/concatenate_pcd_uncompressed/CMakeLists.txt b/build_system/lidar_depth/concatenate_pcd_uncompressed/CMakeLists.txt new file mode 100644 index 0000000..6881f0f --- /dev/null +++ b/build_system/lidar_depth/concatenate_pcd_uncompressed/CMakeLists.txt @@ -0,0 +1,204 @@ +cmake_minimum_required(VERSION 3.0.2) +project(concatenate_pcd_uncompressed) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + roscpp + pcl_ros +) + +## System dependencies are found with CMake's conventions +# find_package(Boost REQUIRED COMPONENTS system) + + +## Uncomment this if the package has a setup.py. This macro ensures +## modules and global scripts declared therein get installed +## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html +# catkin_python_setup() + +################################################ +## Declare ROS messages, services and actions ## +################################################ + +## To declare and build messages, services or actions from within this +## package, follow these steps: +## * Let MSG_DEP_SET be the set of packages whose message types you use in +## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). +## * In the file package.xml: +## * add a build_depend tag for "message_generation" +## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET +## * If MSG_DEP_SET isn't empty the following dependency has been pulled in +## but can be declared for certainty nonetheless: +## * add a exec_depend tag for "message_runtime" +## * In this file (CMakeLists.txt): +## * add "message_generation" and every package in MSG_DEP_SET to +## find_package(catkin REQUIRED COMPONENTS ...) +## * add "message_runtime" and every package in MSG_DEP_SET to +## catkin_package(CATKIN_DEPENDS ...) +## * uncomment the add_*_files sections below as needed +## and list every .msg/.srv/.action file to be processed +## * uncomment the generate_messages entry below +## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) + +## Generate messages in the 'msg' folder +# add_message_files( +# FILES +# Message1.msg +# Message2.msg +# ) + +## Generate services in the 'srv' folder +# add_service_files( +# FILES +# Service1.srv +# Service2.srv +# ) + +## Generate actions in the 'action' folder +# add_action_files( +# FILES +# Action1.action +# Action2.action +# ) + +## Generate added messages and services with any dependencies listed here +# generate_messages( +# DEPENDENCIES +# std_msgs # Or other packages containing msgs +# ) + +################################################ +## Declare ROS dynamic reconfigure parameters ## +################################################ + +## To declare and build dynamic reconfigure parameters within this +## package, follow these steps: +## * In the file package.xml: +## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" +## * In this file (CMakeLists.txt): +## * add "dynamic_reconfigure" to +## find_package(catkin REQUIRED COMPONENTS ...) +## * uncomment the "generate_dynamic_reconfigure_options" section below +## and list every .cfg file to be processed + +## Generate dynamic reconfigure parameters in the 'cfg' folder +# generate_dynamic_reconfigure_options( +# cfg/DynReconf1.cfg +# cfg/DynReconf2.cfg +# ) + +################################### +## catkin specific configuration ## +################################### +## The catkin_package macro generates cmake config files for your package +## Declare things to be passed to dependent projects +## INCLUDE_DIRS: uncomment this if your package contains header files +## LIBRARIES: libraries you create in this project that dependent projects also need +## CATKIN_DEPENDS: catkin_packages dependent projects also need +## DEPENDS: system dependencies of this project that dependent projects also need +catkin_package( + INCLUDE_DIRS + LIBRARIES concatenate_pcd_uncompressed + CATKIN_DEPENDS roscpp + DEPENDS roscpp pcl_ros pcl_conversions +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( + ${catkin_INCLUDE_DIRS} +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/concatenate_pcd_uncompressed.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +add_executable(${PROJECT_NAME} src/pcl_concatenate_points_pcd.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(${PROJECT_NAME}_node ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +target_link_libraries(${PROJECT_NAME} + ${catkin_LIBRARIES} +) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# catkin_install_python(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_concatenate_pcd_uncompressed.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/build_system/lidar_depth/concatenate_pcd_uncompressed/package.xml b/build_system/lidar_depth/concatenate_pcd_uncompressed/package.xml new file mode 100644 index 0000000..ae0b3f9 --- /dev/null +++ b/build_system/lidar_depth/concatenate_pcd_uncompressed/package.xml @@ -0,0 +1,66 @@ + + + concatenate_pcd_uncompressed + 0.0.0 + The concatenate_pcd_uncompressed package + + + + + fclad + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + roscpp + pcl_ros + pcl_conversions + roscpp + roscpp + pcl_ros + pcl_conversions + + + + + + + + diff --git a/build_system/lidar_depth/concatenate_pcd_uncompressed/src/pcl_concatenate_points_pcd.cpp b/build_system/lidar_depth/concatenate_pcd_uncompressed/src/pcl_concatenate_points_pcd.cpp new file mode 100644 index 0000000..de3fecc --- /dev/null +++ b/build_system/lidar_depth/concatenate_pcd_uncompressed/src/pcl_concatenate_points_pcd.cpp @@ -0,0 +1,152 @@ +/* + * Software License Agreement (BSD License) + * + * Point Cloud Library (PCL) - www.pointclouds.org + * Copyright (c) 2010, Willow Garage, Inc. + * Copyright (c) 2012-, Open Perception, Inc. + * + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above + * copyright notice, this list of conditions and the following + * disclaimer in the documentation and/or other materials provided + * with the distribution. + * * Neither the name of the copyright holder(s) nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS + * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE + * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, + * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, + * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER + * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT + * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN + * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE + * POSSIBILITY OF SUCH DAMAGE. + * + * $Id$ + * + */ + +/** + +@b pcd_concatenate_points exemplifies how to concatenate the points of two PointClouds having the same fields. + +**/ + +#include +#include +#include + +Eigen::Vector4f translation; +Eigen::Quaternionf orientation; + +//////////////////////////////////////////////////////////////////////////////// +/** \brief Parse command line arguments for file names. + * Returns: a vector with file names indices. + * \param argc + * \param argv + * \param extension + */ +std::vector +parseFileExtensionArgument (int argc, char** argv, std::string extension) +{ + std::vector indices; + for (int i = 1; i < argc; ++i) + { + std::string fname = std::string (argv[i]); + + // Needs to be at least 4: .ext + if (fname.size () <= 4) + continue; + + // For being case insensitive + std::transform (fname.begin (), fname.end (), fname.begin (), tolower); + std::transform (extension.begin (), extension.end (), extension.begin (), tolower); + + // Check if found + std::string::size_type it; + if ((it = fname.find (extension)) != std::string::npos) + { + // Additional check: we want to be able to differentiate between .p and .png + if ((extension.size () - (fname.size () - it)) == 0) + indices.push_back (i); + } + } + return (indices); +} + +bool +loadCloud (const std::string &filename, pcl::PCLPointCloud2 &cloud) +{ + using namespace pcl::console; + TicToc tt; + print_highlight ("Loading "); print_value ("%s ", filename.c_str ()); + + tt.tic (); + if (pcl::io::loadPCDFile (filename, cloud, translation, orientation) < 0) + return (false); + print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", cloud.width * cloud.height); print_info (" points]\n"); + print_info ("Available dimensions: "); print_value ("%s\n", pcl::getFieldsList (cloud).c_str ()); + + return (true); +} + +void +saveCloud (const std::string &filename, const pcl::PCLPointCloud2 &output) +{ + using namespace pcl::console; + TicToc tt; + tt.tic (); + + print_highlight ("Saving "); print_value ("%s ", filename.c_str ()); + + pcl::PCDWriter w; + w.writeBinary (filename, output, translation, orientation); + + print_info ("[done, "); print_value ("%g", tt.toc ()); print_info (" ms : "); print_value ("%d", output.width * output.height); print_info (" points]\n"); +} + + +/* ---[ */ +int +main (int argc, char** argv) +{ + if (argc < 2) + { + std::cerr << "Syntax is: " << argv[0] << " " << std::endl; + std::cerr << "Result will be saved to output.pcd" << std::endl; + return (-1); + } + + std::vector file_indices = parseFileExtensionArgument (argc, argv, ".pcd"); + + //pcl::PointCloud cloud_all; + pcl::PCLPointCloud2 cloud_all; + for (const int &file_index : file_indices) + { + // Load the Point Cloud + pcl::PCLPointCloud2 cloud; + loadCloud (argv[file_index], cloud); + //pcl::PointCloud cloud; + //pcl::io::loadPCDFile (argv[file_indices[i]], cloud); + //cloud_all += cloud; + pcl::concatenate (cloud_all, cloud, cloud_all); + PCL_INFO ("Total number of points so far: %u. Total data size: %lu bytes.\n", cloud_all.width * cloud_all.height, cloud_all.data.size ()); + } + + saveCloud ("output.pcd", cloud_all); + + return (0); +} +/* ]--- */ diff --git a/build_system/lidar_depth/fasterlio_gt.py b/build_system/lidar_depth/fasterlio_gt.py new file mode 100644 index 0000000..7d54638 --- /dev/null +++ b/build_system/lidar_depth/fasterlio_gt.py @@ -0,0 +1,289 @@ +import os +import pdb +from functools import reduce +import multiprocessing +import subprocess +from datetime import datetime + +import argparse + +import h5py +import numpy as np +import open3d as o3d +import pandas as pd +import cv2 +import matplotlib.pyplot as plt +from scipy.spatial.transform import Rotation + +from tqdm import tqdm + +from util import transform_inv, merge_pcs, load_clouds, load_trajectory, filter_global_cloud + +def get_view(L0_points, camera_location, camera_calib): + + scan_aggregation = camera_calib["scan_aggregation"] + + assert isinstance(scan_aggregation, int) and scan_aggregation >= 4 and scan_aggregation <= 400 + + # print(f" - scan_aggregation: {scan_aggregation}") + + cam_idx = camera_location['idx'] + start_idx = max(0,cam_idx-scan_aggregation) + stop_idx = min(cam_idx+scan_aggregation, len(L0_points)) + + L0_cloud = o3d.geometry.PointCloud() + L0_cloud.points = o3d.utility.Vector3dVector(np.concatenate( L0_points[start_idx:stop_idx] )) + + Ln_cloud = L0_cloud.transform(camera_location['Ln_T_L0']) + Cn_cloud = Ln_cloud.transform(camera_calib["Cn_T_lidar"]) + + z_limit = 250 + camera_fov = 64/180*np.pi # The ec FoV is 62, but we are taking a bit more + xy_limit = z_limit * np.tan(camera_fov/2) + + bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound=(-xy_limit, -xy_limit, 0.05), + max_bound=(xy_limit, xy_limit, z_limit)) + Cn_cloud_crop = Cn_cloud.crop(bbox) + + diameter = np.linalg.norm(np.array(Cn_cloud_crop.get_max_bound()) - + np.array(Cn_cloud_crop.get_min_bound())) + # https://github.com/isl-org/Open3D/blob/ff22900c958d0216c85305ee8b3841e5699f9d57/examples/python/geometry/point_cloud_hidden_point_removal.py#L24C4-L24C4 + hpr_radius = diameter * 100 + + mesh, pt_map = Cn_cloud_crop.hidden_point_removal(np.zeros((3,)), hpr_radius) + Cn_cloud_HPR = Cn_cloud_crop.select_by_index(pt_map) + + camera_points = np.array(Cn_cloud_HPR.points) + + rvecs = np.zeros((3,1)) # cv2.Rodrigues(np.eye(3))[0] + tvecs = np.zeros((3,1)) + + imgpts, _ = cv2.projectPoints(camera_points, rvecs, tvecs, + camera_calib['K'], camera_calib['D']) + + width, height = camera_calib['resolution'] + + imgpts = imgpts[:,0,:] + valid_points = (imgpts[:, 1] >= 0) & (imgpts[:, 1] < height) & \ + (imgpts[:, 0] >= 0) & (imgpts[:, 0] < width) + imgpts = imgpts[valid_points,:] + depth = camera_points[valid_points,2] + + idx_sorted = np.argsort(depth) + + depth_sorted = depth[idx_sorted] + imgpts_sorted = imgpts[idx_sorted,:] + + images = [] + + scales = 4 + + for i in range(scales): + images = [np.repeat(img,2,axis=1).repeat(2,axis=0) for img in images] + + cscale = 2 ** (scales - i - 1) + image = np.zeros([height // cscale, width // cscale]) + np.inf + image[imgpts[:,1].astype(int) // cscale, imgpts[:,0].astype(int) // cscale] = depth + images.append(image) + + image[ image==0.0 ] = np.inf + + return image + +def get_view_mp(mp_tuple): + return get_view(*mp_tuple) + +def index_map(event_t_ds, index_map_ds, time_ds): + index_map_ds_cl = 0 + + remaining_times = time_ds[...].copy() + cur_loc = 0 + chunk_size = 10000000 + num_events = event_t_ds.shape[0] + + while remaining_times.shape[0] > 0 and cur_loc < num_events: + end = min( num_events, cur_loc+chunk_size ) + idx = cur_loc + np.searchsorted(event_t_ds[cur_loc:end], remaining_times) + + idx_remaining = (idx == end) + idx_found = (idx < end) + + index_map_ds[index_map_ds_cl:index_map_ds_cl+idx_found.sum()] = idx[idx_found] + + remaining_times = remaining_times[idx_remaining] + cur_loc = cur_loc + chunk_size + index_map_ds_cl += idx_found.sum() + +def create_h5(h5fn, flio_trajectory, Cn_T_lidar, topic): + Ln_T_cam = transform_inv(Cn_T_lidar) + number_samples = len(flio_trajectory) + + h5file = h5py.File(h5fn, 'w') + # Add metadata + version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() + h5file.attrs["version"] = version + h5file.attrs["creation_date"] = str(datetime.now()) + + lidar_trajectory = h5file.create_dataset("Ln_T_L0", (number_samples,4,4), dtype='f8') + cam_trajectory = h5file.create_dataset("Cn_T_C0", (number_samples,4,4), dtype='f8') + h5file.attrs['cam_name'] = topic + times = h5file.create_dataset("ts", (number_samples,), dtype='f8') + + for i, ft in enumerate(flio_trajectory): + lidar_trajectory[i] = ft['Ln_T_L0'] + cam_trajectory[i] = Cn_T_lidar @ ft['Ln_T_L0'] @ Ln_T_cam + times[i] = ft['timestamp'] + + return h5file, number_samples + +def save_depth_to_h5(h5file, num_samples, topic, map_data, resolution, num_mp=1, + verbose=False): + depth = h5file.create_group("depth") + + topic = "/".join( topic.split("/")[0:3] ) + + depth_shape = (num_samples,resolution[1],resolution[0]) + chunk_shape = (1,resolution[1],resolution[0]) + topic_ds = depth.create_dataset(topic[1:], depth_shape, dtype='f4', chunks=chunk_shape, compression='lzf' ) + + if num_mp == 1: + for i, packed_data in tqdm(enumerate(map_data), total=num_samples, + disable=not verbose): + topic_ds[i,...] = get_view_mp(packed_data) + else: + pool = multiprocessing.Pool(processes=num_mp) + + pool_iter = pool.imap(get_view_mp, map_data) + + for i, img in tqdm(enumerate(pool_iter), total=num_samples, + disable=not verbose): + topic_ds[i,...] = img + + pool.close() + +def check_trajectory(h5file, Cn_T_lidar): + frames_a = [o3d.geometry.TriangleMesh.create_coordinate_frame()] + frames_a_record = [] + frames_b = [] + frames_b_record = [] + for i in range(h5file['Ln_T_L0'].shape[0]): + frames_a_record.append( transform_inv(h5file['Ln_T_L0'][i] @ transform_inv(Cn_T_lidar) ) ) + frames_a.append(o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1).transform(frames_a_record[-1] ) ) + frames_a[-1].paint_uniform_color( np.array( [1.0, 0.647, 0.0] )) + + for i in range(h5file['Cn_T_C0'].shape[0]): + frames_b_record.append( transform_inv(h5file['Cn_T_C0'][i]) ) + frames_b.append(o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1).transform( frames_b_record[-1] )) + o3d.visualization.draw_geometries( frames_a + frames_b ) + + diff = [] + for a, b in zip(frames_a_record, frames_b_record): + diff.append( transform_inv(a) @ b ) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--pcd_folder", required=True, + help="Dir containing the pcd") + + parser.add_argument("--traj_fn", required=True, + help="Trajectory file name") + + parser.add_argument("--timesync_fn", required=True, + help="Time synchronization file") + + parser.add_argument("--calib_h5", required=True, + help="LiDAR calibration h5 file") + + parser.add_argument("--pose_h5_fn", required=True, + help="h5 file name") + + parser.add_argument("--depth_h5_fn", + help="h5 file name") + + parser.add_argument("--scan_aggregation", required=True, + help="scan aggregation") + + parser.add_argument("--cam", default="/prophesee/left", + help="Reference cam") + + parser.add_argument("--num_mp", default=1, type=int, + help="Number of processes. Default is 1") + + parser.add_argument("--skip", default=1, type=int, + help="Number of frames to skip. Default is 1") + + parser.add_argument("--only_traj", action='store_true', + help="If set, only trajectory will be processed") + parser.add_argument("--verbose", action='store_true', + help="If set, will print progress") + + args = parser.parse_args() + + import yaml + with open(args.timesync_fn, 'r') as f: + timesync = yaml.safe_load(f) + + print("Loading trajectory") + skip = args.skip + flio_trajectory = load_trajectory(args.traj_fn, timesync)[::skip] + + print("Loading point clouds") + clouds = load_clouds(args.pcd_folder) + print("Filtering point clouds") + clouds = filter_global_cloud(clouds) + + print("Loading calibs") + calib_h5 = h5py.File(args.calib_h5, 'r') + cam_calib = calib_h5[args.cam + "/calib"] + + prophesee_left_T_cam = calib_h5[args.cam + "/calib/T_to_prophesee_left"] + prophesee_left_T_lidar = calib_h5["/ouster/calib/T_to_prophesee_left"] + + Cn_T_lidar = transform_inv(prophesee_left_T_cam) @ prophesee_left_T_lidar + + scan_aggregation = int(args.scan_aggregation) + + camera_calib = {"D": cam_calib['distortion_coeffs'][...], + "K": np.eye(3), + "model": cam_calib['distortion_model'][...], + "Cn_T_lidar": Cn_T_lidar, + "resolution": cam_calib['resolution'][...], + "scan_aggregation": scan_aggregation} + + camera_calib['K'][0, 0] = cam_calib['intrinsics'][0] + camera_calib['K'][1, 1] = cam_calib['intrinsics'][1] + camera_calib['K'][0, 2] = cam_calib['intrinsics'][2] + camera_calib['K'][1, 2] = cam_calib['intrinsics'][3] + + print("Creating h5 file") + h5file, nsamples = create_h5(args.pose_h5_fn, flio_trajectory, + Cn_T_lidar, args.cam) + check_trajectory(h5file, Cn_T_lidar) + + print("Computing time maps") + time_map = h5file.create_dataset("ts_map" + args.cam.replace("/", '_'), + h5file['ts'].shape, dtype='u8') + index_map(calib_h5[args.cam+"/t"], time_map, h5file['ts']) + h5file.close() + + if not args.only_traj: + print("Starting depth image generation") + map_data = [(clouds, t, camera_calib) for t in flio_trajectory] + + # Create depth h5 file and copy the contents from the trajectory h5 + print("Copying pose file into depth") + depth_h5 = h5py.File(args.depth_h5_fn, 'w') + with h5py.File(args.pose_h5_fn, 'r') as f: + for k in f: + f.copy(k, depth_h5) + + print("Processing depth to h5") + # Add metadata + version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() + depth_h5.attrs["version"] = version + depth_h5.attrs["creation_date"] = str(datetime.now()) + save_depth_to_h5(depth_h5, nsamples, args.cam, + map_data, cam_calib['resolution'], args.num_mp, + args.verbose) + depth_h5.close() diff --git a/build_system/lidar_depth/fasterlio_plot.py b/build_system/lidar_depth/fasterlio_plot.py new file mode 100755 index 0000000..b1dcfbc --- /dev/null +++ b/build_system/lidar_depth/fasterlio_plot.py @@ -0,0 +1,66 @@ +#!/usr/bin/env python3 + +import h5py +import pdb +import os +import numpy as np +import cv2 +from scipy.spatial.transform import Rotation + +from ouster.sdk.examples.open3d import viewer_3d +from ouster.client import LidarPacket, SensorInfo, Scans, Packets, ChanField, XYZLut, _utils +import open3d as o3d + +from util import kalibr_to_opencv, get_cloud, load_clouds, load_trajectory, transform_inv, filter_global_cloud + +def get_info_via_fasterlio(args): + exp_name = os.path.basename(args.tmp_folder) + + import yaml + with open(args.tmp_folder+"/"+exp_name+"_time_corrections.yaml", 'r') as f: + timesync = yaml.safe_load(f) + + traj_file = args.tmp_folder+"/"+exp_name+".traj" + traj = load_trajectory(traj_file, timesync) + times = np.array([t['timestamp'] for t in traj]) + orig_times = np.array([t['orig_ts'] for t in traj]) + + + inds = list(range(traj[0]['idx'], traj[-1]['idx'], 20)) + print(inds) + + cloud = o3d.geometry.PointCloud() + points = [load_clouds(args.out_folder+"/local_scans", idx) for idx in inds] + # points = filter_global_cloud(points, method='dbscan', method_kwargs={"eps":0.10, "min_points":10}) + points = np.concatenate(points) + cloud.points = o3d.utility.Vector3dVector(points) + cloud.paint_uniform_color([1, 0.706, 0]) + cloud.estimate_normals() + + frames = [] + for idx in inds: + coord = o3d.geometry.TriangleMesh.create_coordinate_frame(size=1.0).transform( traj[idx]['L0_T_lidar'] ) + frames.append(coord) + + # points = (points @ traj_pose['Ln_T_L0'][:3,:3].T) + traj_pose['Ln_T_L0'][:3,3] + # cloud = cloud.transform( traj_pose['Ln_T_L0'] ) + + return cloud, frames + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--h5fn', help='HDF5 File') + parser.add_argument('--percentage', type=float, help='Lidar index as percentage of sequence') + parser.add_argument('--verbose', action='store_true', help='Set Verbose Mode') + parser.add_argument('--confirm_only', action='store_true', help='Set to only confirm') + parser.add_argument('--confirm_fn', help='Filename of figure to save out', default='tmp.pdf') + parser.add_argument('--npz_fn', help='Save settings for the npz') + parser.add_argument('--use_pcd', action='store_true', help="Use PCD from fasterlio") + parser.add_argument('--tmp_folder', help='Tmp folder within M3ED_Build') + parser.add_argument('--out_folder', help='Tmp folder within M3ED_Build') + args = parser.parse_args() + + flio_cloud, flio_traj = get_info_via_fasterlio(args) + + o3d.visualization.draw_geometries([flio_cloud]+flio_traj) diff --git a/build_system/lidar_depth/gt2media.bash b/build_system/lidar_depth/gt2media.bash new file mode 100755 index 0000000..7445a72 --- /dev/null +++ b/build_system/lidar_depth/gt2media.bash @@ -0,0 +1,45 @@ +#!/bin/bash +source ./build_system/preamble.bash + +output_files=("$DEPTH_VIDEO_RAW" "$DEPTH_EVENTS_VIDEO_RAW") +check_files output_files + +check_free_space fixed + +DEPTH_VIDEO_RAW_TMP="${DEPTH_VIDEO_RAW%.*}_tmp.${DEPTH_VIDEO_RAW##*.}" +DEPTH_EVENTS_VIDEO_RAW_TMP="${DEPTH_EVENTS_VIDEO_RAW%.*}_tmp.${DEPTH_EVENTS_VIDEO_RAW##*.}" + +# Run media generation +echo -e "${BLUE}Starting depth video generation${NC}" +python3 build_system/lidar_depth/gt2media.py \ + --h5_depth "$GT_DEPTH_PATH" --out "$DEPTH_VIDEO_RAW_TMP" +# Check if program exited with error +if [ $? -ne 0 ]; then + echo -e "${RED}Error creating media files for $H5_PATH${NC}" + exit 1 +fi +mv "$DEPTH_VIDEO_RAW_TMP" "$DEPTH_VIDEO_RAW" +echo -e "${GREEN}Depth video raw created${NC}" + +# Compress video +echo -e "${BLUE}Compressing${NC}" +compress_with_ffmpeg "$DEPTH_VIDEO_RAW" +echo -e "${GREEN}Compression finished${NC}" + +echo -e "${BLUE}Starting depth events video generation${NC}" +python3 build_system/lidar_depth/gt2media.py \ + --h5_depth "$GT_DEPTH_PATH" \ + --h5_events "$H5_PATH" \ + --out "$DEPTH_EVENTS_VIDEO_RAW_TMP" +# Check if program exited with error +if [ $? -ne 0 ]; then + echo -e "${RED}Error creating media files for $H5_PATH${NC}" + exit 1 +fi +mv "$DEPTH_EVENTS_VIDEO_RAW_TMP" "$DEPTH_EVENTS_VIDEO_RAW" +echo -e "${GREEN}Depth video raw created${NC}" + +# Compress video +echo -e "${BLUE}Compressing${NC}" +compress_with_ffmpeg "$DEPTH_EVENTS_VIDEO_RAW" +echo -e "${GREEN}Compression finished${NC}" diff --git a/build_system/lidar_depth/gt2media.py b/build_system/lidar_depth/gt2media.py new file mode 100644 index 0000000..5c86a99 --- /dev/null +++ b/build_system/lidar_depth/gt2media.py @@ -0,0 +1,115 @@ +#!/usr/bin/env python3 +import os +import sys +import yaml +import pdb +import numpy as np +import cv2 +import h5py +from matplotlib import cm +from colorama import Fore, Style + +NAX_NUM_EVENTS = 1000000 + +if __name__ == "__main__": + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--h5_depth", help="Input containing the depth", required=True) + ap.add_argument("--outfn", help="Output video file", required=True) + ap.add_argument("--h5_events", help="Input containing the events") + # Add a debug flag argument + ap.add_argument("--debug", help="Debug flag", action="store_true") + ap = ap.parse_args() + + # Check that the input depth file exists + if not os.path.exists(ap.h5_depth): + sys.exit(f"Input file does not exist: {ap.h5_depth}") + + # Check that the input for events exists + if ap.h5_events is not None: + if not os.path.exists(ap.h5_events): + sys.exit("Input file does not exist: {}".format(ap.h5_events)) + else: + use_events = True + print(Fore.YELLOW + "Creating video with events" + Style.RESET_ALL) + else: + use_events = False + print(Fore.YELLOW + "Creating video without events" + Style.RESET_ALL) + + print(Fore.BLUE + "Loading depth file..." + Style.RESET_ALL) + with h5py.File(ap.h5_depth, "r") as f: + imgs = f["/depth/prophesee/left"][:] + map = f["/ts_map_prophesee_left"][:] + num_frames, height, width = imgs.shape + + # replace all 0 with np.inf + imgs[imgs == 0] = np.inf + + min_depth = np.min(imgs) + # Get the max depth as the percentile 99 that is not inf + max_depth = np.percentile(imgs[imgs != np.inf], 99.7) + + # Initialize the video writer + fourcc = cv2.VideoWriter_fourcc(*"FFV1") + fps = 10 + video_writer = cv2.VideoWriter(ap.outfn, fourcc, + fps, (width, height), + isColor=True) + + if use_events: + events_file = h5py.File(ap.h5_events, "r") + g = events_file["/prophesee/left"] + cmap = cm.get_cmap("summer") + else: + cmap = cm.get_cmap("jet") + + # Process each frame and write to the video + for i in range(num_frames-1): + # Print without newline + if ap.debug: + print("\rProcessing frame {}/{}".format(i, num_frames), end="") + + + # Read the frame from the dataset + depth_frame = imgs[i, :, :] + depth_frame_clipped = np.clip(depth_frame, min_depth, max_depth) + depth_frame_clipped[depth_frame == np.inf] = np.inf + + # Map the depth values to a colormap + depth_frame_normalized = (depth_frame_clipped - min_depth) / (max_depth - min_depth) + depth_frame_colored = (cmap(depth_frame_normalized)[:, :, :3] * 255).astype(np.uint8) + + # Map all the inf values to black + depth_frame_colored[depth_frame == np.inf] = 0 + + if use_events: + ev_x = g["x"][map[i]:map[i+1]] + ev_y = g["y"][map[i]:map[i+1]] + ev_p = g["p"][map[i]:map[i+1]] + ev_t = g["t"][map[i]:map[i+1]] + + min_t = np.min(ev_t) + max_t = min_t + 5e3 + idx = np.logical_and(ev_t > min_t, ev_t < max_t) + + # Create indices to sample the event stream + # idx = np.random.randint(0, len(ev_x), + # np.min((NAX_NUM_EVENTS, len(ev_x)))) + + ev_x = ev_x[idx] + ev_y = ev_y[idx] + ev_p = ev_p[idx] + # Color the events + ev_p = ev_p*255 + color = np.array([ev_p, np.zeros_like(ev_p), 255-ev_p]) + color = color // 2 + depth_frame_colored[ev_y, ev_x, :] = color.T + + # Write the frame to the video + video_writer.write(depth_frame_colored) + if ap.debug: + print("") + # Release the video writer and close the HDF5 file + video_writer.release() + if use_events: + events_file.close() diff --git a/build_system/lidar_depth/gt2verify.bash b/build_system/lidar_depth/gt2verify.bash new file mode 100755 index 0000000..dbba44b --- /dev/null +++ b/build_system/lidar_depth/gt2verify.bash @@ -0,0 +1,22 @@ +#!/bin/bash +source ./build_system/preamble.bash + +# output_files=("$DEPTH_VIDEO_RAW" "$DEPTH_EVENTS_VIDEO_RAW") +# check_files output_files +# if CHECK_POSE_RETURN is 0, skip this file +if [ $CHECK_POSE_RETURN -eq 0 ]; then + echo -e "${BLUE}Skipping GT Check for $H5_PATH${NC}" + exit 0 +fi + +# Run media generation +echo -e "${BLUE}Starting GT Check${NC}" +python3 build_system/lidar_depth/gt2verify.py \ + --h5_depth "$GT_POSE_PATH" \ + --absolute_error "$ABSOLUTE_POSITION_ERROR" \ + --pose_stats "$TMP_PATH/pose_stats.txt" +# Check if program exited with error +if [ $? -ne 0 ]; then + echo -e "${RED}Error creating media files for $H5_PATH${NC}" + exit 1 +fi diff --git a/build_system/lidar_depth/gt2verify.py b/build_system/lidar_depth/gt2verify.py new file mode 100644 index 0000000..3548ac9 --- /dev/null +++ b/build_system/lidar_depth/gt2verify.py @@ -0,0 +1,67 @@ +#!/usr/bin/env python3 +import os +import sys +import pdb +import numpy as np +import h5py +import matplotlib.pyplot as plt +from matplotlib import cm +from colorama import Fore, Style + +if __name__ == "__main__": + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--h5_depth", help="Input containing the depth", + required=True) + ap.add_argument("--relative_error", type=float, default=0.002, + help="Relative error allowed - cumulative distance") + ap.add_argument("--absolute_error", type=float, required=True, + help="Absolute error allowed") + ap.add_argument("--pose_stats", type=str, + help="Filename where to save debug information") + # Add a debug flag argument + ap.add_argument("--debug", help="Debug flag", action="store_true") + ap = ap.parse_args() + + # Check that the input depth file exists + if not os.path.exists(ap.h5_depth): + sys.exit(f"Input file does not exist: {ap.h5_depth}") + + print(Fore.BLUE + "Loading depth file..." + Style.RESET_ALL) + with h5py.File(ap.h5_depth, "r") as f: + Cn_T_C0 = f["/Cn_T_C0"][:] + C0_T_Cn = np.linalg.inv( Cn_T_C0 ) + Ln_T_L0 = f["/Ln_T_L0"][:] + L0_T_Ln = np.linalg.inv( Ln_T_L0 ) + + if ap.debug: + print("Start") + print(C0_T_Cn[0]) + print("Stop") + print(C0_T_Cn[-1]) + + t0_T_tn = C0_T_Cn[:,:3,3] + + frame_translation = np.diff(t0_T_tn, axis=0) + frame_dist = np.sqrt(np.sum(frame_translation**2, axis=1)) + cumulative_distance = np.sum(frame_dist) + error = np.sum(np.sqrt(np.sum(np.diff(t0_T_tn[[0,-1],:], axis=0)**2, axis=1))) + + print(Fore.YELLOW + f"Cumulative distance: {cumulative_distance}" + Style.RESET_ALL) + print(Fore.YELLOW + f"Error: {error}" + Style.RESET_ALL) + + error_threshold = cumulative_distance * ap.relative_error + ap.absolute_error + print(Fore.YELLOW + f"Error threshold: {error_threshold}" + Style.RESET_ALL) + with open(ap.pose_stats, "w") as f: + f.write(f"Input file: {ap.h5_depth}\n") + f.write(f"Cumulative distance: {cumulative_distance}\n") + f.write(f"Error threshold: {error_threshold}\n") + f.write(f"Error: {error}\n") + f.write(f"Relative error (parameter): {ap.relative_error}\n") + f.write(f"Absolute error (parameter): {ap.absolute_error}\n") + + if error < error_threshold: + print(Fore.GREEN + "PASSED" + Style.RESET_ALL) + else: + print(Fore.RED + "FAILED" + Style.RESET_ALL) + sys.exit(1) diff --git a/build_system/lidar_depth/lidar_calib.py b/build_system/lidar_depth/lidar_calib.py new file mode 100755 index 0000000..972165b --- /dev/null +++ b/build_system/lidar_depth/lidar_calib.py @@ -0,0 +1,308 @@ +#!/usr/bin/env python3 + +""" GUI for hand calibrating the LiDAR with the different sequences """ + +import h5py +import pdb +import os +import numpy as np +import cv2 +from scipy.spatial.transform import Rotation +from scipy.spatial.transform import Slerp + +from ouster.sdk.examples.open3d import viewer_3d +from ouster.client import LidarPacket, SensorInfo, Scans, Packets, ChanField, XYZLut, _utils +import open3d as o3d + +from util import kalibr_to_opencv, get_cloud, load_clouds, load_trajectory, load_imu_based_calib + +class ManualCalibrator: + def __init__(self, cloud_image_list, camera_calib, R_imu, fn=None): + # Prepare point cloud for better plotting + bbox = o3d.geometry.AxisAlignedBoundingBox(min_bound=(1,-30,-30), max_bound=(150,30,30)) + + self.clouds = [c[0].crop(bbox) for c in cloud_image_list] + self.cloud_points = [np.array(c.points) for c in self.clouds] + self.cloud_colors = [np.array(c.colors) for c in self.clouds] + + self.images = [c[1] for c in cloud_image_list] + + self.camera_calib = camera_calib + self.point_scale = 1 + + self.R_ideal = np.array([[ 0, 0, 1], + [-1, 0, 0], + [ 0,-1, 0]]) + + self.fn = fn + if os.path.exists(self.fn): + lidar_calib_info = np.load(self.fn) + if not 'R_imu' in lidar_calib_info or not 'origin' in lidar_calib_info: + self.rotvec = np.zeros(3) + self.t = lidar_calib_info['t_c_l'] + self.R_imu = R_imu + else: + if lidar_calib_info['origin'] == self.fn: + self.R_imu = lidar_calib_info['R_imu'] + self.rotvec = lidar_calib_info['r_c_l'] + self.t = lidar_calib_info['t_c_l'] + else: + self.R_imu = R_imu + self.rotvec = np.zeros(3) + self.t = lidar_calib_info['t_c_l'] + else: + self.rotvec = np.zeros(3) + self.t = np.zeros(3) + self.R_imu = R_imu + + print("Initializing") + print(self.rotvec) + print(self.t) + print(self.R_imu) + + self.masks = [None] * len(self.clouds) + + def show_cloud(self): + o3d.visualization.draw_geometries(self.clouds) + + def project_points(self, cloud_points, mask, cloud_colors=None, + confirm_only=False): + T_c_l = np.eye(4) + T_c_l[:3,:3] = (Rotation.from_matrix(self.R_imu) * Rotation.from_rotvec(self.rotvec)).as_matrix().T + T_c_l[:3,3] = self.t + + if not confirm_only: + np.savez(self.fn, T_c_l=T_c_l, r_c_l=self.rotvec, t_c_l=self.t, R_imu=self.R_imu, origin=self.fn) + + transformed_cloud = cloud_points @ (Rotation.from_matrix(self.R_imu) * Rotation.from_rotvec(self.rotvec)).as_matrix() + self.t + + rvecs = cv2.Rodrigues(np.eye(3))[0] + tvecs = np.zeros((3,1)) + + imgpts, jac = cv2.projectPoints( + transformed_cloud.reshape(-1,1,3), + rvecs, + tvecs, + self.camera_calib['camera_matrix'], + self.camera_calib['distortion_coefficients']) + + if mask is None: + mask = (imgpts[:,:,0] > 0) * (imgpts[:,:,0]<1280) # * (imgpts[:,:,1] > 0) * (imgpts[:,:,1]<720) + mask = mask.squeeze() + + imgpts = imgpts.squeeze()[mask] + depth = transformed_cloud[mask][:,2] + colors = np.log(depth) # self.cloud_colors[mask] + colors = colors / colors.max() + + return imgpts, colors, mask + + def update_transform(self, val): + self.t = np.array([self.slider_l_x.val, self.slider_l_y.val, self.slider_l_z.val]) + self.rotvec = np.array([self.slider_r_x.val, self.slider_r_y.val, self.slider_r_z.val]) + + self.point_scale = self.slider_scale.val + + for i in range(len(self.clouds)): + imgpts, colors, mask = self.project_points(self.cloud_points[i], self.masks[i]) + + self.scatter_plts[i].set_offsets( imgpts ) + self.scatter_plts[i].set_sizes( np.ones((imgpts.shape[0],)) * self.point_scale) + + self.fig.canvas.draw_idle() + + def plot(self, confirm_only=False, figfn='tmp.pdf'): + if confirm_only: + import matplotlib as mpl + mpl.use('Agg') + import matplotlib.pyplot as plt + imgpts, colors,_ = self.project_points(self.cloud_points[0], None, + confirm_only=confirm_only) + self.fig, axes = plt.subplots(1,1) + axes.imshow(self.image.squeeze(), cmap='gray') + axes.scatter(x=imgpts[:,0], y=imgpts[:,1], c=colors, s=0.5) + self.fig.savefig(figfn, dpi=400) + else: + import matplotlib.pyplot as plt + + self.fig, axes = plt.subplots(8,len(self.clouds),height_ratios=[25,1,1,1,1,1,1,1],width_ratios=[1]*len(self.clouds), squeeze=False) + + self.img_plts = [] + self.scatter_plts = [] + for i in range(len(self.clouds)): + imgpts, colors, mask = self.project_points(self.cloud_points[i], self.masks[i]) + self.masks[i] = mask + self.img_plts.append(axes[0,i].imshow(self.images[i], cmap='gray')) + self.scatter_plts.append(axes[0,i].scatter(x=imgpts[:,0], y=imgpts[:,1], c=colors, s=self.point_scale)) + axes[0,i].set_xlim([-100,1380]) + axes[0,i].set_ylim([820,-100]) + + center_axis_id = len(self.clouds) // 2 + from matplotlib.widgets import Slider, Button + self.slider_l_x = Slider(axes[1, center_axis_id], "X", -0.40, 0.40, valinit=self.t[0]) + self.slider_l_y = Slider(axes[2, center_axis_id], "Y", -0.40, 0.40, valinit=self.t[1]) + self.slider_l_z = Slider(axes[3, center_axis_id], "Z", -0.40, 0.40, valinit=self.t[2]) + + self.slider_r_x = Slider(axes[4, center_axis_id], "Rx", -0.05, 0.05, valinit=self.rotvec[0]) + self.slider_r_y = Slider(axes[5, center_axis_id], "Ry", -0.15, 0.07, valinit=self.rotvec[1]) + self.slider_r_z = Slider(axes[6, center_axis_id], "Rz", -0.15, 0.15, valinit=self.rotvec[2]) + + self.slider_scale = Slider(axes[7, center_axis_id], "S", 0.5, 20, valinit=2) + + self.slider_l_x.on_changed(self.update_transform) + self.slider_l_y.on_changed(self.update_transform) + self.slider_l_z.on_changed(self.update_transform) + self.slider_r_x.on_changed(self.update_transform) + self.slider_r_y.on_changed(self.update_transform) + self.slider_r_z.on_changed(self.update_transform) + self.slider_scale.on_changed(self.update_transform) + + plt.show() + +def get_images(h5f, idx): + return { + '/ovc/left': h5f['/ovc/left/data'][idx,...], + '/ovc/right': h5f['/ovc/right/data'][idx,...], + '/ovc/ts': h5f['/ovc/ts'][idx,...], + } + +def get_info_via_scans(hdf5_file, args, ts): + lidar_start_ts = hdf5_file['/ouster/ts_start'] + lidar_end_ts = hdf5_file['/ouster/ts_end'] + + lidar_mid_times = ( lidar_start_ts[...] + lidar_end_ts[...] ) / 2 + lidar_idx = np.argmin( np.abs(lidar_mid_times - ts) ) + lidar_mid_time = lidar_mid_times[lidar_idx] + + camera_ts = hdf5_file['/ovc/ts'] + + cam_idx = np.argmin( np.abs(lidar_mid_time - camera_ts) ) + + cloud = get_cloud(hdf5_file, lidar_idx) + images = get_images(hdf5_file, cam_idx) + return cloud, images + +def get_info_via_fasterlio(hdf5_file, args, percentage): + exp_name = os.path.basename(args.tmp_folder) + + import yaml + with open(args.tmp_folder+"/"+exp_name+"_time_corrections.yaml", 'r') as f: + timesync = yaml.safe_load(f) + + traj_file = args.tmp_folder+"/"+exp_name+".traj" + traj = load_trajectory(traj_file, timesync, hdf5_file) + orig_times = np.array([t['orig_ts'] for t in traj]) + orig_corrected_times = np.array([t['orig_corrected_ts'] for t in traj]) + + lidar_times = np.array([t['timestamp'] for t in traj]) + # nominal_times = (np.arange(lidar_times.shape[0])+1)* 100000 + # offset = (lidar_times - nominal_times)[0] + # offset = (offset/1000).round() * 1000 + # lidar_times = nominal_times + offset + + ovc_times = hdf5_file['/ovc/ts'][...] + + from scipy.spatial.distance import cdist + all_pair_times = cdist(lidar_times[:,None], ovc_times[:,None]) + lidar_to_camera = np.argmin(np.abs(all_pair_times),axis=1) + lidar_to_camera = np.searchsorted(ovc_times, lidar_times ) + lidar_to_camera[lidar_to_camera == 0] = -1 + lidar_to_camera[lidar_to_camera >= ovc_times.shape[0]] = -1 + + # lidar_times = lidar_times[lidar_to_camera < ovc_times.shape[0]] + # lidar_to_camera = lidar_to_camera[lidar_to_camera < ovc_times.shape[0]] + + closest_ovc_times = ovc_times[lidar_to_camera] + closest_time_diffs = closest_ovc_times - lidar_times + + lidar_idx = int(percentage * lidar_to_camera.shape[0]) + cam_idx = lidar_to_camera[lidar_idx] + lidar_idx += 0 + + if np.abs(lidar_times[lidar_idx] - ovc_times[cam_idx]) > 20000: + print("Foo") + lidar_idx += 1 + cam_idx = lidar_to_camera[lidar_idx] + + traj_entry = traj[lidar_idx] + lidar_time = traj_entry['timestamp'] + + Lnm1_T_L0 = traj[lidar_idx-1]['Ln_T_L0'] # time = -100000 + Ln_T_L0 = traj_entry['Ln_T_L0'] # time = 0 + Lnp1_T_L0 = traj[lidar_idx+1]['Ln_T_L0'] # time = 100000 + + R_T_R0 = Rotation.from_matrix([ + Lnm1_T_L0[:3,:3], + Ln_T_L0[:3,:3], + Lnp1_T_L0[:3,:3], + ]) + slerp = Slerp([-100000,0,100000], R_T_R0) # setup slerp interpolation + + rel_cam_time = (ovc_times[cam_idx] - lidar_time) + + Lc_T_L0 = np.eye(4) + Lc_T_L0[:3,:3] = slerp(rel_cam_time).as_matrix() # interpolate to current time + + if rel_cam_time < 0: + a, b, c = (-rel_cam_time/100000), ((100000+rel_cam_time)/100000), 0.0 + elif rel_cam_time > 0: + a, b, c = 0.0, ((100000-rel_cam_time)/100000), ((rel_cam_time)/100000) + else: + a, b, c = 0.0, 1.0, 0.0 + + Lc_T_L0[:3,3:] = a * Lnm1_T_L0[:3,3:] + b * Ln_T_L0[:3,3:] + c * Lnp1_T_L0[:3,3:] + + cloud = o3d.geometry.PointCloud() + points = [load_clouds(args.out_folder+"/local_scans", idx) for idx in range(traj_entry['idx'], traj_entry['idx']+4)] + points = np.concatenate(points) + + points = (points @ Lc_T_L0[:3,:3].T) + Lc_T_L0[:3,3] + + cloud.points = o3d.utility.Vector3dVector(points) + cloud.paint_uniform_color([1, 0.706, 0]) + + mesh, pt_map = cloud.hidden_point_removal(np.zeros((3,)), 10000. ) # camera_location['Ln_T_L0'][:3,3], 1000000.0 ) + cloud = cloud.select_by_index(pt_map) + + images = get_images(hdf5_file, cam_idx) + + print("{ cam_idx:", cam_idx, ", lidar_idx: ", lidar_idx) + print(" dt ", ovc_times[cam_idx] - lidar_time) + + return cloud, images['/ovc/left'] + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--h5fn', help='HDF5 File') + parser.add_argument('--percentage', nargs='+',type=float, help='Lidar index as percentage of sequence') + parser.add_argument('--verbose', action='store_true', help='Set Verbose Mode') + parser.add_argument('--confirm_only', action='store_true', help='Set to only confirm') + parser.add_argument('--confirm_fn', help='Filename of figure to save out', default='tmp.pdf') + parser.add_argument('--npz_fn', help='Save settings for the npz') + parser.add_argument('--use_pcd', action='store_true', help="Use PCD from fasterlio") + parser.add_argument('--tmp_folder', help='Tmp folder within M3ED_Build') + parser.add_argument('--out_folder', help='Tmp folder within M3ED_Build') + args = parser.parse_args() + + hdf5_file = h5py.File(args.h5fn, 'r') + + lidar_end_ts = hdf5_file['/ouster/ts_end'] + + clouds_and_images = [get_info_via_fasterlio(hdf5_file, args, p) for p in args.percentage] + + left_h5_calib = hdf5_file['/ovc/left/calib'] + K = np.eye(3) + np.fill_diagonal(K[:2,:2], left_h5_calib['intrinsics'][:2]) + K[:2,2] = left_h5_calib['intrinsics'][2:] + D = np.zeros(5) + D[:4] = left_h5_calib['distortion_coeffs'] + left_cam_calib = { + "camera_matrix": K, + "distortion_coefficients": D, + } + + R_imu = load_imu_based_calib(hdf5_file) + + mc = ManualCalibrator(clouds_and_images, left_cam_calib, R_imu, args.npz_fn) + mc.plot(args.confirm_only, args.confirm_fn) diff --git a/build_system/lidar_depth/long_range_ouster64.yaml b/build_system/lidar_depth/long_range_ouster64.yaml new file mode 100644 index 0000000..7c82ec6 --- /dev/null +++ b/build_system/lidar_depth/long_range_ouster64.yaml @@ -0,0 +1,47 @@ +common: + lid_topic: "/os_node/points" + imu_topic: "/os_node/imu" + time_sync_en: false # ONLY turn on when external time synchronization is really not possible + +preprocess: + lidar_type: 3 # 1 for Livox serials LiDAR, 2 for Velodyne LiDAR, 3 for ouster LiDAR, + scan_line: 64 + blind: 4 + time_scale: 1e-3 + +mapping: + acc_cov: 0.1 + gyr_cov: 0.1 + b_acc_cov: 0.0001 + b_gyr_cov: 0.0001 + fov_degree: 180 + det_range: 150.0 + extrinsic_est_en: false # true: enable the online estimation of IMU-LiDAR extrinsic + extrinsic_T: [ -0.00625, 0.01175, -0.007645 ] + extrinsic_R: [1, 0, 0, + 0, 1, 0, + 0, 0, 1] + +publish: + path_publish_en: false + scan_publish_en: true # false: close all the point cloud output + scan_effect_pub_en: true # true: publish the pointscloud of effect point + dense_publish_en: false # false: low down the points number in a global-frame point clouds scan. + scan_bodyframe_pub_en: true # true: output the point cloud scans in IMU-body-frame + +path_save_en: true + +pcd_save: + pcd_save_en: true + interval: 1 # how many LiDAR frames saved in each pcd file; + # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames. +feature_extract_enable: false +point_filter_num: 3 +max_iteration: 4 +filter_size_surf: 0.20 +filter_size_map: 0.20 # 暂时未用到,代码中为0, 即倾向于将降采样后的scan中的所有点加入map +cube_side_length: 5000 + +ivox_grid_resolution: 0.20 # default=0.2 +ivox_nearby_type: 26 # 6, 18, 26 +esti_plane_threshold: 0.1 # default=0.1 diff --git a/build_system/lidar_depth/ouster_bag_convert/CMakeLists.txt b/build_system/lidar_depth/ouster_bag_convert/CMakeLists.txt new file mode 100644 index 0000000..ce21151 --- /dev/null +++ b/build_system/lidar_depth/ouster_bag_convert/CMakeLists.txt @@ -0,0 +1,218 @@ +cmake_minimum_required(VERSION 3.0.2) +project(ouster_bag_convert) + +find_package(Eigen3 REQUIRED) +find_package(PCL REQUIRED COMPONENTS common) +find_package(tf2_eigen REQUIRED) +find_package(CURL REQUIRED) +find_package(Boost REQUIRED) + +## Compile as C++11, supported in ROS Kinetic and newer +# add_compile_options(-std=c++11) + +## Find catkin macros and libraries +## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz) +## is used, also find other catkin packages +find_package(catkin REQUIRED COMPONENTS + geometry_msgs + ouster_ros + roscpp + sensor_msgs + std_msgs + pcl_conversions + tf2 + tf2_ros + rosbag +) + +## System dependencies are found with CMake's conventions +# find_package(Boost REQUIRED COMPONENTS system) + + +## Uncomment this if the package has a setup.py. This macro ensures +## modules and global scripts declared therein get installed +## See http://ros.org/doc/api/catkin/html/user_guide/setup_dot_py.html +# catkin_python_setup() + +################################################ +## Declare ROS messages, services and actions ## +################################################ + +## To declare and build messages, services or actions from within this +## package, follow these steps: +## * Let MSG_DEP_SET be the set of packages whose message types you use in +## your messages/services/actions (e.g. std_msgs, actionlib_msgs, ...). +## * In the file package.xml: +## * add a build_depend tag for "message_generation" +## * add a build_depend and a exec_depend tag for each package in MSG_DEP_SET +## * If MSG_DEP_SET isn't empty the following dependency has been pulled in +## but can be declared for certainty nonetheless: +## * add a exec_depend tag for "message_runtime" +## * In this file (CMakeLists.txt): +## * add "message_generation" and every package in MSG_DEP_SET to +## find_package(catkin REQUIRED COMPONENTS ...) +## * add "message_runtime" and every package in MSG_DEP_SET to +## catkin_package(CATKIN_DEPENDS ...) +## * uncomment the add_*_files sections below as needed +## and list every .msg/.srv/.action file to be processed +## * uncomment the generate_messages entry below +## * add every package in MSG_DEP_SET to generate_messages(DEPENDENCIES ...) + +## Generate messages in the 'msg' folder +# add_message_files( +# FILES +# Message1.msg +# Message2.msg +# ) + +## Generate services in the 'srv' folder +# add_service_files( +# FILES +# Service1.srv +# Service2.srv +# ) + +## Generate actions in the 'action' folder +# add_action_files( +# FILES +# Action1.action +# Action2.action +# ) + +## Generate added messages and services with any dependencies listed here +# generate_messages( +# DEPENDENCIES +# geometry_msgs# sensor_msgs# std_msgs +# ) + +################################################ +## Declare ROS dynamic reconfigure parameters ## +################################################ + +## To declare and build dynamic reconfigure parameters within this +## package, follow these steps: +## * In the file package.xml: +## * add a build_depend and a exec_depend tag for "dynamic_reconfigure" +## * In this file (CMakeLists.txt): +## * add "dynamic_reconfigure" to +## find_package(catkin REQUIRED COMPONENTS ...) +## * uncomment the "generate_dynamic_reconfigure_options" section below +## and list every .cfg file to be processed + +## Generate dynamic reconfigure parameters in the 'cfg' folder +# generate_dynamic_reconfigure_options( +# cfg/DynReconf1.cfg +# cfg/DynReconf2.cfg +# ) + +################################### +## catkin specific configuration ## +################################### +## The catkin_package macro generates cmake config files for your package +## Declare things to be passed to dependent projects +## INCLUDE_DIRS: uncomment this if your package contains header files +## LIBRARIES: libraries you create in this project that dependent projects also need +## CATKIN_DEPENDS: catkin_packages dependent projects also need +## DEPENDS: system dependencies of this project that dependent projects also need +catkin_package( +# INCLUDE_DIRS include +# LIBRARIES ouster_bag_convert +# CATKIN_DEPENDS geometry_msgs ouster_ros roscpp sensor_msgs std_msgs +# DEPENDS system_lib +) + +########### +## Build ## +########### + +## Specify additional locations of header files +## Your package locations should be listed before other locations +include_directories( +# include + ${catkin_INCLUDE_DIRS} +) + +## Declare a C++ library +# add_library(${PROJECT_NAME} +# src/${PROJECT_NAME}/ouster_bag_convert.cpp +# ) + +## Add cmake target dependencies of the library +## as an example, code may need to be generated before libraries +## either from message generation or dynamic reconfigure +# add_dependencies(${PROJECT_NAME} ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Declare a C++ executable +## With catkin_make all packages are built within a single CMake context +## The recommended prefix ensures that target names across packages don't collide +add_executable(ouster_bag_converter src/bag_rewriter.cpp) + +## Rename C++ executable without prefix +## The above recommended prefix causes long target names, the following renames the +## target back to the shorter version for ease of user use +## e.g. "rosrun someones_pkg node" instead of "rosrun someones_pkg someones_pkg_node" +# set_target_properties(${PROJECT_NAME}_node PROPERTIES OUTPUT_NAME node PREFIX "") + +## Add cmake target dependencies of the executable +## same as for the library above +# add_dependencies(ouster_bag_converter ${${PROJECT_NAME}_EXPORTED_TARGETS} ${catkin_EXPORTED_TARGETS}) + +## Specify libraries to link a library or executable target against +target_link_libraries(ouster_bag_converter + ${catkin_LIBRARIES} +) + +############# +## Install ## +############# + +# all install targets should use catkin DESTINATION variables +# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html + +## Mark executable scripts (Python etc.) for installation +## in contrast to setup.py, you can choose the destination +# catkin_install_python(PROGRAMS +# scripts/my_python_script +# DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark executables for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_executables.html +# install(TARGETS ${PROJECT_NAME}_node +# RUNTIME DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION} +# ) + +## Mark libraries for installation +## See http://docs.ros.org/melodic/api/catkin/html/howto/format1/building_libraries.html +# install(TARGETS ${PROJECT_NAME} +# ARCHIVE DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# LIBRARY DESTINATION ${CATKIN_PACKAGE_LIB_DESTINATION} +# RUNTIME DESTINATION ${CATKIN_GLOBAL_BIN_DESTINATION} +# ) + +## Mark cpp header files for installation +# install(DIRECTORY include/${PROJECT_NAME}/ +# DESTINATION ${CATKIN_PACKAGE_INCLUDE_DESTINATION} +# FILES_MATCHING PATTERN "*.h" +# PATTERN ".svn" EXCLUDE +# ) + +## Mark other files for installation (e.g. launch and bag files, etc.) +# install(FILES +# # myfile1 +# # myfile2 +# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION} +# ) + +############# +## Testing ## +############# + +## Add gtest based cpp test target and link libraries +# catkin_add_gtest(${PROJECT_NAME}-test test/test_ouster_bag_convert.cpp) +# if(TARGET ${PROJECT_NAME}-test) +# target_link_libraries(${PROJECT_NAME}-test ${PROJECT_NAME}) +# endif() + +## Add folders to be run by python nosetests +# catkin_add_nosetests(test) diff --git a/build_system/lidar_depth/ouster_bag_convert/package.xml b/build_system/lidar_depth/ouster_bag_convert/package.xml new file mode 100644 index 0000000..bf9c50d --- /dev/null +++ b/build_system/lidar_depth/ouster_bag_convert/package.xml @@ -0,0 +1,74 @@ + + + ouster_bag_convert + 0.0.0 + The ouster_bag_convert package + + + + + ken + + + + + + TODO + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + catkin + geometry_msgs + ouster_ros + roscpp + sensor_msgs + std_msgs + geometry_msgs + ouster_ros + roscpp + sensor_msgs + std_msgs + geometry_msgs + ouster_ros + roscpp + sensor_msgs + std_msgs + + + + + + + + diff --git a/build_system/lidar_depth/ouster_bag_convert/src/bag_rewriter.cpp b/build_system/lidar_depth/ouster_bag_convert/src/bag_rewriter.cpp new file mode 100644 index 0000000..e8b0b5a --- /dev/null +++ b/build_system/lidar_depth/ouster_bag_convert/src/bag_rewriter.cpp @@ -0,0 +1,139 @@ +#include +#include +#include +#include +#include +#include + +#include + +#include + +// Not defined in a header in ouster_ros...so copied +inline ros::Time to_ros_time(uint64_t ts) { + ros::Time t; + t.fromNSec(ts); + return t; +} + +// Not defined in a header in ouster_ros...so copied +inline ros::Time to_ros_time(std::chrono::nanoseconds ts) { + return to_ros_time(ts.count()); +} + +std::string get_metadata(rosbag::Bag& bag){ + // Grab single message on /os_node/metadata + std::string info; + + for(rosbag::MessageInstance const m: rosbag::View(bag, rosbag::TopicQuery("/os_node/metadata"))) + { + std_msgs::String::ConstPtr str = m.instantiate(); + if (str != nullptr) + info = str->data; + } + + return info; +} + +sensor_msgs::ImuPtr PacketMsg_to_Imu(const ouster_ros::PacketMsg::ConstPtr& pm, ouster::sensor::sensor_info& info) { + // Follow the logic from os_cloud_node.cpp in the ouster_ros driver + // Specifically we use the sensor time code path + auto pf = ouster::sensor::get_format(info); + ros::Time msg_ts = to_ros_time(pf.imu_gyro_ts(pm->buf.data())); + sensor_msgs::Imu imu_msg = ouster_ros::packet_to_imu_msg(*pm, msg_ts, "os_imu", pf); + sensor_msgs::ImuPtr imu_msg_ptr = + boost::make_shared(imu_msg); + return imu_msg_ptr; +} + +sensor_msgs::PointCloud2Ptr LidarScan_to_PointCloud(ouster::LidarScan& ls, ouster::XYZLut& xyz_lut, ouster::sensor::sensor_info& info) { + // Follow the logic from os_cloud_node.cpp in the ouster_ros driver + // Specifically we use the sensor time code path + sensor_msgs::PointCloud2Ptr pc_ptr; + + auto ts_v = ls.timestamp(); + auto idx = std::find_if(ts_v.data(), ts_v.data() + ts_v.size(), + [](uint64_t h) { return h != 0; }); + if (idx == ts_v.data() + ts_v.size()) return pc_ptr; + auto scan_ts = std::chrono::nanoseconds{ts_v(idx - ts_v.data())}; + ros::Time msg_ts = to_ros_time(scan_ts); + + uint32_t H = info.format.pixels_per_column; + uint32_t W = info.format.columns_per_frame; + ouster_ros::Cloud cloud{W,H}; + + ouster_ros::scan_to_cloud(xyz_lut, scan_ts, ls, cloud, 0); + + sensor_msgs::PointCloud2 pc = + ouster_ros::cloud_to_cloud_msg(cloud, msg_ts, "os_lidar"); + pc_ptr = boost::make_shared(pc); + + return pc_ptr; +} + +int main(int argc, char *argv[]) +{ + if(argc < 1){ + std::cout << "rosrun ouster_bag_convert ouster_bag_converter " << std::endl; + return 1; + } + + std::string read_bag_fn{argv[1]}; + std::string write_bag_fn{argv[2]}; + + std::cout << "=== Opening ===" << std::endl; + std::cout << " - Reading: " << read_bag_fn << std::endl; + std::cout << " - Writing: " << write_bag_fn << std::endl; + + // Read bag with topics to pull packets from + rosbag::Bag read_bag; + read_bag.open(read_bag_fn, rosbag::bagmode::Read); + const std::string read_topic_lidar{"/os_node/lidar_packets"}; + const std::string read_topic_imu{"/os_node/imu_packets"}; + + // Write bag with topics to write to + rosbag::Bag write_bag; + write_bag.open(write_bag_fn, rosbag::bagmode::Write); + const std::string write_topic_lidar{"/os_node/points"}; + const std::string write_topic_imu{"/os_node/imu"}; + + // Grab Ouster metadata from bag + std::string ouster_metadata = get_metadata(read_bag); + + ouster::sensor::sensor_info info = ouster::sensor::parse_metadata(ouster_metadata); + ouster::XYZLut xyz_lut = ouster::make_xyz_lut(info); + uint32_t H = info.format.pixels_per_column; + uint32_t W = info.format.columns_per_frame; + std::unique_ptr scan_batcher = std::make_unique(info); + ouster::LidarScan ls{W, H, info.format.udp_profile_lidar}; + + std::cout << "Starting loop" << std::endl; + int i = 0; + + for(rosbag::MessageInstance const m: rosbag::View(read_bag, rosbag::TopicQuery({read_topic_lidar, read_topic_imu}))) + { + ouster_ros::PacketMsg::ConstPtr pm = m.instantiate(); + + if(m.getTopic() == read_topic_imu) { + auto imu_ptr = PacketMsg_to_Imu(pm, info); + if(!imu_ptr)continue; + + write_bag.write(write_topic_imu, m.getTime(), imu_ptr); + + } else if(m.getTopic() == read_topic_lidar) { + if(!(*scan_batcher)(pm->buf.data(), ls))continue; // Not a full scan + auto pc_ptr = LidarScan_to_PointCloud(ls, xyz_lut, info); + if(!pc_ptr)continue; + + write_bag.write(write_topic_lidar, m.getTime(), pc_ptr); + } + + if (i++ % 1000 == 0) std::cout << "+" << std::flush; + } + std::cout << std::endl << "Done" << std::endl; + + read_bag.close(); + write_bag.close(); + + return 0; +} diff --git a/build_system/lidar_depth/pcd2gt.bash b/build_system/lidar_depth/pcd2gt.bash new file mode 100755 index 0000000..2dfd4ff --- /dev/null +++ b/build_system/lidar_depth/pcd2gt.bash @@ -0,0 +1,33 @@ +#!/bin/bash +source ./build_system/preamble.bash + +# Do not run this script for any calibration bags. +if [[ "$BAG_NAME" == *"calib"* ]]; then + echo -e "${YELLOW}Skipping calibration bag: $BAG_NAME${NC}" + exit 0 +fi + +# output_files includes TRAJ_PATH, PCD_LOCAL_PATH, and GT_PATH +output_files=("$GT_POSE_PATH" "$GT_DEPTH_PATH") +check_files output_files +check_free_space fixed + +# Generate GT +echo -e "${BLUE}Starting GT generation${NC}" +python3 build_system/lidar_depth/fasterlio_gt.py \ + --pcd_folder "$PCD_LOCAL_PATH" \ + --traj_fn "$TRAJ_PATH" \ + --timesync_fn "$TIME_CORRECTIONS_PATH" \ + --calib_h5 "$H5_PATH" \ + --pose_h5_fn "$GT_POSE_PATH.tmp" \ + --depth_h5_fn "$GT_DEPTH_PATH.tmp" \ + --scan_aggregation "$DEPTH_SCAN_AGGREGATION" +if [ $? -ne 0 ]; then + echo -e "${RED}Error generating GT${NC}" + exit 1 +fi + echo -e "${GREEN}GT generation finished${NC}" + +# Move file to final destination +mv "$GT_POSE_PATH.tmp" "$GT_POSE_PATH" +mv "$GT_DEPTH_PATH.tmp" "$GT_DEPTH_PATH" diff --git a/build_system/lidar_depth/rosbag2oustermetadata.py b/build_system/lidar_depth/rosbag2oustermetadata.py new file mode 100644 index 0000000..1e7882e --- /dev/null +++ b/build_system/lidar_depth/rosbag2oustermetadata.py @@ -0,0 +1,42 @@ +import roslib +import rospy +import rosbag +from sensor_msgs.msg import CompressedImage, Image + +import argparse +import os +import shutil + +import h5py +import numpy as np +import math +import cv2 +from cv_bridge import CvBridge, CvBridgeError +bridge = CvBridge() +import pdb +from collections import defaultdict + +from tqdm import tqdm + +def get_ouster_info(bag, topic="/os_node/metadata"): + for topic, msg, t in bag.read_messages(topics=[topic]): + ouster_metadata = msg.data + return ouster_metadata + +def process_bag(filename, ouster_fn=None): + # Open bag + bag = rosbag.Bag(filename) + metadata = get_ouster_info(bag) + + print("Saving metadata to -- %s" % ouster_fn) + with open(ouster_fn, 'w') as f: + f.write(metadata) + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument('--bag', help='ROS bag name') + parser.add_argument('--ouster_fn', default="/tmp/ouster_metadata.json", help='Ouster Metadata Name') + args = parser.parse_args() + + process_bag(args.bag, args.ouster_fn) diff --git a/build_system/lidar_depth/rosbag2pcd.bash b/build_system/lidar_depth/rosbag2pcd.bash new file mode 100755 index 0000000..228a2dc --- /dev/null +++ b/build_system/lidar_depth/rosbag2pcd.bash @@ -0,0 +1,74 @@ +#!/bin/bash +source ./build_system/preamble.bash + +# Do not run this script for any calibration bags. +if [[ "$BAG_NAME" == *"calib"* ]]; then + echo -e "${YELLOW}Skipping calibration bag: $BAG_NAME${NC}" + exit 0 +fi + +# output_files includes TRAJ_PATH, PCD_LOCAL_PATH, and GT_PATH +output_files=("$TRAJ_PATH" "$PCD_LOCAL_PATH" "$PCD_GLOBAL_PATH") +check_files output_files + +check_free_space fixed + +# Link the ouster_bag_convert directory to the current ROS src dir and build +if [ ! -L ~/catkin_ws/src/ouster_bag_convert ]; then + ln -s "$(pwd)"/build_system/lidar_depth/ouster_bag_convert ~/catkin_ws/src/ouster_bag_convert +fi +# same for concatenate_pcd_uncompressed +if [ ! -L ~/catkin_ws/src/concatenate_pcd_uncompressed ]; then + ln -s "$(pwd)"/build_system/lidar_depth/concatenate_pcd_uncompressed ~/catkin_ws/src/concatenate_pcd_uncompressed +fi +pushd ~/catkin_ws/src +catkin build --no-status +popd +. ~/catkin_ws/devel/setup.bash + +echo -e "${BLUE}Converting $BAG_NAME to $CONVERTED_BAG_PATH${NC}" +rosrun ouster_bag_convert ouster_bag_converter "$BAG_PATH" "$CONVERTED_BAG_PATH" +if [ $? -ne 0 ]; then + echo -e "${RED}Error running ouster_bag_convert${NC}" + exit 1 +fi +echo -e "${GREEN}Conversion done${NC}" + +echo -e "${YELLOW}Original bag: $BAG_PATH${NC}" +echo -e "${YELLOW}Converted bag: $CONVERTED_BAG_PATH${NC}" +echo -e "${YELLOW}PCD global file: $PCD_GLOBAL_PATH${NC}" + +current_dir=$(pwd) +# Create a Log dir where the trajectory can be saved +mkdir -p Log +roscd faster_lio +mkdir -p PCD + +# Run Faster-LIO to get the individual PCs +rm -rf "$PCD_LOCAL_PATH" +cd "$current_dir" +echo -e "${BLUE}FasterLIO individual${NC}" +echo -e "${YELLOW}Config file: ./lidar_depth/$(echo $FASTER_LIO_CONFIG).yaml${NC}" +cat build_system/lidar_depth/$(echo $FASTER_LIO_CONFIG).yaml +rosrun faster_lio run_mapping_offline \ + --bag_file "$CONVERTED_BAG_PATH" \ + --config_file "./build_system/lidar_depth/$(echo $FASTER_LIO_CONFIG).yaml" +if [ $? -ne 0 ]; then + echo -e "${RED}Error running FasterLIO local${NC}" + exit 1 +fi +echo -e "${GREEN}FasterLIO individual done${NC}" +chmod 777 Log/traj.bin +mv Log/traj.bin "$TRAJ_PATH" +roscd faster_lio +chmod 777 PCD +chmod 666 PCD/* +mv PCD "$PCD_LOCAL_PATH" + +# Create integrated global PCD +# TODO: remove this once Docker build is fixed +echo -e "${BLUE}Concatenate point clouds for global PC${NC}" +roscd concatenate_pcd_uncompressed +rosrun concatenate_pcd_uncompressed concatenate_pcd_uncompressed "$PCD_LOCAL_PATH"/*.pcd +chmod 666 output.pcd +mv output.pcd "$PCD_GLOBAL_PATH" diff --git a/build_system/lidar_depth/short_range_ouster64.yaml b/build_system/lidar_depth/short_range_ouster64.yaml new file mode 100644 index 0000000..5ed5b32 --- /dev/null +++ b/build_system/lidar_depth/short_range_ouster64.yaml @@ -0,0 +1,47 @@ +common: + lid_topic: "/os_node/points" + imu_topic: "/os_node/imu" + time_sync_en: false # ONLY turn on when external time synchronization is really not possible + +preprocess: + lidar_type: 3 # 1 for Livox serials LiDAR, 2 for Velodyne LiDAR, 3 for ouster LiDAR, + scan_line: 64 + blind: 1 + time_scale: 1e-3 + +mapping: + acc_cov: 0.4 + gyr_cov: 0.4 + b_acc_cov: 0.0002 + b_gyr_cov: 0.0002 + fov_degree: 180 + det_range: 150.0 + extrinsic_est_en: false # true: enable the online estimation of IMU-LiDAR extrinsic + extrinsic_T: [ -0.00625, 0.01175, -0.007645 ] + extrinsic_R: [1, 0, 0, + 0, 1, 0, + 0, 0, 1] + +publish: + path_publish_en: false + scan_publish_en: true # false: close all the point cloud output + scan_effect_pub_en: true # true: publish the pointscloud of effect point + dense_publish_en: false # false: low down the points number in a global-frame point clouds scan. + scan_bodyframe_pub_en: true # true: output the point cloud scans in IMU-body-frame + +path_save_en: true + +pcd_save: + pcd_save_en: true + interval: 1 # how many LiDAR frames saved in each pcd file; + # -1 : all frames will be saved in ONE pcd file, may lead to memory crash when having too much frames. +feature_extract_enable: false +point_filter_num: 3 +max_iteration: 4 +filter_size_surf: 0.10 +filter_size_map: 0.10 # 暂时未用到,代码中为0, 即倾向于将降采样后的scan中的所有点加入map +cube_side_length: 500 + +ivox_grid_resolution: 0.10 # default=0.2 +ivox_nearby_type: 26 # 6, 18, 26 +esti_plane_threshold: 0.1 # default=0.1 diff --git a/build_system/lidar_depth/util.py b/build_system/lidar_depth/util.py new file mode 100644 index 0000000..125e988 --- /dev/null +++ b/build_system/lidar_depth/util.py @@ -0,0 +1,352 @@ +import h5py +import pdb +import os +import numpy as np +import cv2 +import pandas as pd +from scipy.spatial.transform import Rotation + +import matplotlib.pyplot as plt +from matplotlib.widgets import Slider, Button + +from ouster.sdk.examples.open3d import viewer_3d +from ouster.client import LidarPacket, SensorInfo, Scans, Packets, ChanField, XYZLut, _utils +import open3d as o3d + +from scipy.interpolate import splev, splrep, barycentric_interpolate +from scipy.spatial.transform import Rotation +from scipy import signal + +def transform_inv(T): + T_inv = np.eye(4) + T_inv[:3,:3] = T[:3,:3].T + T_inv[:3,3] = -1.0 * ( T_inv[:3,:3] @ T[:3,3] ) + return T_inv + +def merge_pcs(pc_list): + if len(pc_list) < 10: + return reduce(lambda x,y: x+y, pc_list) + else: + n_pcs = len(pc_list) // 2 + return merge_pcs(pc_list[:n_pcs]) + merge_pcs(pc_list[n_pcs:]) + +def load_clouds(pcd_folder, idx=-1): + pcd_files = [file for file in os.listdir(pcd_folder) if file.endswith('.pcd') and file != "scans.pcd"] + pcd_files.sort(key=lambda x: int(x.split('_')[-1].split('.')[0])) + + if idx==-1: + pcd_list = [] + for file in pcd_files: + pcd_list.append( np.asarray(o3d.io.read_point_cloud(os.path.join(pcd_folder, file)).points) ) + + return pcd_list + else: + return np.asarray(o3d.io.read_point_cloud(os.path.join(pcd_folder, pcd_files[idx])).points) + + +def filter_global_cloud(cloud_point_list, method='dbscan', method_kwargs={"eps":0.10, "min_points":10}): + global_cloud = o3d.geometry.PointCloud() + global_cloud.points = o3d.utility.Vector3dVector(np.concatenate( cloud_point_list )) + + cloud_shapes = [cp.shape[0] for cp in cloud_point_list] + cloud_idx = np.cumsum([0]+cloud_shapes) + + if method == 'dbscan': + labels = global_cloud.cluster_dbscan(**method_kwargs) + keep_mask = (np.asarray(labels) >= 0) + elif method == 'radius': + new_cloud, labels = global_cloud.remove_radius_outlier(**method_kwargs) + keep_mask = np.ones((len(global_cloud.points),),dtype=bool) + keep_mask[labels] = 0 + elif method == 'statistical': + new_cloud, labels = global_cloud.remove_statistical_outlier(**method_kwargs) + keep_mask = np.ones((len(global_cloud.points),),dtype=bool) + keep_mask[labels] = 0 + + filtered_clouds = [] + + for i in range(len(cloud_shapes)): + start = cloud_idx[i] + stop = cloud_idx[i+1] + cur_cloud_filter = cloud_point_list[i][keep_mask[start:stop]] + filtered_clouds.append(cur_cloud_filter) + + return filtered_clouds + +def sensor_to_global_us(sensor_time_us, us_offset, skew_us_per_s, ovc_pps_offset): + offset_us = sensor_time_us - us_offset - ovc_pps_offset + new_time_us = offset_us / (1+ (skew_us_per_s/1e6)) + skew_offset_us = int((new_time_us/1e6) * skew_us_per_s) + return offset_us - skew_offset_us + +def load_trajectory(traj_fn, timesync, hdf5=None): + # Trajectory header is + # #timestamp x y z q_x q_y q_z q_w + # trajectory = pd.read_csv(traj_fn, delimiter=' ') + + dt = np.dtype([ ('#timestamp', 'f8'), + ('x', 'f8'), + ('y', 'f8'), + ('z', 'f8'), + ('q_x', 'f8'), + ('q_y', 'f8'), + ('q_z', 'f8'), + ('q_w', 'f8') ]) + data = np.fromfile(traj_fn, dtype=dt) + header = data[0] + footer = data[-1] + assert all([(np.isnan(header[i]) and np.isnan(footer[i])) or header[i] == footer[i] for i in range(8)]) + trajectory = pd.DataFrame(data[1:-1], columns=data.dtype.names) + + flio_trajectory = [] + + first_time = None + + for idx, t in trajectory.iterrows(): + L0_T_lidar = np.eye(4) + L0_T_lidar[:3,:3] = Rotation.from_quat( + np.array([t['q_x'], t['q_y'], t['q_z'], t['q_w']]) + ).as_matrix() + L0_T_lidar[:3,3] = np.array( [t['x'],t['y'],t['z']] ) + Ln_T_L0 = transform_inv(L0_T_lidar) + + lidar_sync = timesync['/os_node/sys_time']['correction'] + corrected_ts = sensor_to_global_us(int(t['#timestamp'] * 1e6), lidar_sync['offset'], lidar_sync['skew'], timesync['/ovc/pps_cb']['decimator']['round_offset']*2500) + + if first_time is None: + first_time = corrected_ts + + if hdf5 is not None: + lidar_end_ts = hdf5['/ouster/ts_end'][...] + closest_ind = np.searchsorted(lidar_end_ts, corrected_ts) + closest_ind = np.argmin(np.abs(lidar_end_ts - corrected_ts)) + final_ts = lidar_end_ts[closest_ind] + else: + final_ts = corrected_ts + + if corrected_ts > 0: + flio_trajectory.append({ + 'idx': idx, + "orig_ts": int(t['#timestamp']*1e6), + "orig_corrected_ts": corrected_ts, + "timestamp": final_ts, + "Ln_T_L0": Ln_T_L0, + "L0_T_lidar": L0_T_lidar, + }) + + return flio_trajectory + +def convert_model_type(cam_model, dist_model): + assert cam_model == "pinhole" + + if dist_model == "equidistant": + return "fisheye" + elif dist_model == "radtan": + return "plumb_bob" + else: + raise NotImplementedError("[%s] is not a supported model" % dist_model) + +def load_cam(cam_dict): + intrinsics = cam_dict["intrinsics"] + K = np.eye(3) + np.fill_diagonal(K[:2,:2], intrinsics[:2]) + K[:2,2] = intrinsics[2:] + M = convert_model_type(cam_dict['camera_model'], cam_dict['distortion_model']) + D = np.zeros(5 if M == "plumb_bob" else 4) + D[:4] = cam_dict["distortion_coeffs"] + return K, D, M + +def cam_dict(K, D, R, P, M, Size, name): + return { + "image_width": Size[0], + "image_height": Size[1], + "camera_name": name, + "camera_matrix": K, + "distortion_model": M, + "distortion_coefficients": D, + "rectification_matrix": R, + "projection_matrix": P, + } + +def kalibr_to_opencv(calib, cam_a_topic, cam_b_topic): + for k in calib.keys(): + topic = calib[k]['rostopic'] + if cam_a_topic in topic: + cam_a = k + if cam_b_topic in topic: + cam_b = k + + K0, D0, M0 = load_cam(calib[cam_a]) + K1, D1, M1 = load_cam(calib[cam_b]) + + T_01 = np.linalg.inv(np.array(calib[cam_b]["T_cn_cnm1"])) + + R = T_01[:3,:3] + T = T_01[:3,3] + Size = tuple(calib[cam_b]["resolution"]) # Assumes both cameras have same resolution + assert M0 == M1 + + if M0 == "plumb_bob": + R0, R1, P0, P1 = cv2.stereoRectify(cameraMatrix1=K0, cameraMatrix2=K1, distCoeffs1=D0, distCoeffs2=D1, imageSize=Size, R=R, T=T, flags=cv2.CALIB_ZERO_DISPARITY, alpha=0)[:4] + elif M0 == "fisheye": + R0, R1, P0, P1 = cv2.fisheye.stereoRectify(K1=K0, K2=K1, D1=D0, D2=D1, imageSize=Size, R=R, tvec=T, flags=cv2.CALIB_ZERO_DISPARITY)[:4] + + return { + cam_a_topic: cam_dict(K0, D0, R0, P0, M0, Size, cam_a_topic), + cam_b_topic: cam_dict(K1, D1, R1, P1, M1, Size, cam_b_topic), + "baseline": T[0], + "f": P0[0,0], + } + +def extract_all_chessboards(image): + # image = np.tile(image[:,:,None], (1,1,3)).copy() + image = image.copy() + + chessboards_info = [] + criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001) + + for cb_config in checkerboard_configs: + rowcol = cb_config[0] + square_size = cb_config[1] + border_size = cb_config[2] + + ret, corners = cv2.findChessboardCorners(image, rowcol) + + while ret: + corners_subp = cv2.cornerSubPix(image, corners, (11, 11), (-1, -1), criteria) + chessboards_info.append( {"corners": corners_subp, "square_size": square_size, "rowcol": rowcol, 'border_size': border_size} ) + + pts = corners[[0,9,-1,-10],:,:] + + image = cv2.fillPoly(image,[pts.astype(int)],128) + + ret, corners = cv2.findChessboardCorners(image, rowcol) + + return chessboards_info + +def range_for_field(f): + if f in (ChanField.RANGE2, ChanField.SIGNAL2, + ChanField.REFLECTIVITY2): + return ChanField.RANGE2 + else: + return ChanField.RANGE + +def get_cloud_times(h5f, idx): + info = SensorInfo(h5f['/ouster/metadata'][()]) + packet_buf = h5f['/ouster/data'][idx,...] + packets = [LidarPacket(packet_buf[i], info) for i in range(packet_buf.shape[0])] + print(packets[0].timestamp[0]) + print(packets[-1].timestamp[-1]) + print(h5f['/ouster/ts_start'][idx]) + print(h5f['/ouster/ts_end'][idx]) + +def get_cloud(h5f, idx): + info = SensorInfo(h5f['/ouster/metadata'][()]) + packet_buf = h5f['/ouster/data'][idx,...] + + packets = [LidarPacket(packet_buf[i], info) for i in range(packet_buf.shape[0])] + scans = Scans(Packets( packets, info )) + scan = next(iter(scans)) + + metadata = scans.metadata + xyzlut = XYZLut(metadata) + + fields = list(scan.fields) + aes = {} + for field_ind, field in enumerate(fields): + if field in (ChanField.SIGNAL, ChanField.SIGNAL2): + aes[field_ind] = _utils.AutoExposure(0.02, 0.1, 3) + else: + aes[field_ind] = _utils.AutoExposure() + + cloud = o3d.geometry.PointCloud() + from ouster import client + xyz_field = scan.field(range_for_field(fields[field_ind])) + signal_field = scan.field(fields[field_ind]).astype(float) + xyz_destaggered = client.destagger(metadata, xyz_field) + signal_destaggered = client.destagger(metadata, signal_field) + + xyz = xyzlut(xyz_field) # xyz_destaggered) + key = signal_field # signal_destaggered + + aes[field_ind](key) + from ouster.sdk.examples.open3d import colorize + color_img = colorize(key) + + # prepare point cloud for Open3d Visualiser + cloud.points = o3d.utility.Vector3dVector(xyz.reshape((-1, 3))) + # cloud.colors = o3d.utility.Vector3dVector( np.tile(key.reshape((-1, 1)), (1,3)) ) + cloud.colors = o3d.utility.Vector3dVector( color_img.reshape((-1,3)) ) + + return cloud + +def get_event_image(h5f, start_idx, stop_idx): + return + +def get_images(h5f, idx): + return { + '/ovc/left': h5f['/ovc/left'][idx,...], + '/ovc/right': h5f['/ovc/right'][idx,...], + '/ovc/ts': h5f['/ovc/ts'][idx], + } + + +def resample_imu(imu, imu_ts, sample_times): + spl = splrep(imu_ts, imu) + return splev( sample_times, spl) + +def filter_imu_sample(imu, numtaps=7, f=0.10): + coeffs = signal.firwin(numtaps, f) + filtered = np.zeros(imu.shape) + filtered[:,0] = signal.lfilter(coeffs, 1.0, imu[:,0]) + filtered[:,1] = signal.lfilter(coeffs, 1.0, imu[:,1]) + filtered[:,2] = signal.lfilter(coeffs, 1.0, imu[:,2]) + return filtered + +def align_imus(ovc_omega, ovc_accel, ouster_omega, ouster_accel): + """ + Solving ouster_R_ovc * ovc_omega = ouster_omega + """ + ovc_measurements = np.concatenate( [ovc_omega] ).T + ouster_measurements = np.concatenate( [ouster_omega] ).T + + ouster_R_ovc = (ouster_measurements @ np.linalg.pinv(ovc_measurements)) + U,S,Vh = np.linalg.svd(ouster_R_ovc) + ouster_R_ovc = U@Vh + + ouster_R_ovc[:,0] = -ouster_R_ovc[:,0] + ouster_R_ovc[:,2] = np.cross(ouster_R_ovc[:,0],ouster_R_ovc[:,1]) + + assert np.all(np.isclose( np.linalg.det(ouster_R_ovc), 1.0 )) + + return ouster_R_ovc + +def load_imu_based_calib(h5f): + ovc_mask = np.isfinite(h5f['/ovc/imu/accel'][:,0]) + ovc_ts = h5f['/ovc/imu/ts'][...][ovc_mask] + ovc_accel = h5f['/ovc/imu/accel'][...][ovc_mask,:] + ovc_omega = h5f['/ovc/imu/omega'][...][ovc_mask,:] + + ouster_mask = np.isfinite(h5f['/ouster/imu/accel'][1000:-1000,0]) + ouster_ts = h5f['/ouster/imu/ts'][1000:-1000][ouster_mask] + ouster_accel = h5f['/ouster/imu/accel'][1000:-1000][ouster_mask,:] + ouster_omega = h5f['/ouster/imu/omega'][1000:-1000][ouster_mask,:] + + ovc_resampled_omega = np.stack( [resample_imu(ovc_omega[:,i], ovc_ts, ouster_ts) for i in range(3)], axis=-1 ) + ovc_resampled_accel = np.stack( [resample_imu(ovc_accel[:,i], ovc_ts, ouster_ts) for i in range(3)], axis=-1 ) + + ouster_accel = filter_imu_sample(ouster_accel) + ouster_omega = filter_imu_sample(ouster_omega) + + ovc_omega = filter_imu_sample(ovc_resampled_omega) + ovc_accel = filter_imu_sample(ovc_resampled_accel) + ovc_ts = ouster_ts + + try: + ouster_R_ovc = align_imus(ovc_omega, ovc_accel, ouster_omega, ouster_accel) + except: + ouster_R_ovc = np.array([[ 0, 0, 1], + [-1, 0, 0], + [ 0,-1, 0]]) + + return ouster_R_ovc diff --git a/build_system/preamble.bash b/build_system/preamble.bash new file mode 100644 index 0000000..468d369 --- /dev/null +++ b/build_system/preamble.bash @@ -0,0 +1,210 @@ +# Preamble file. This will be executed before any bash file to create variables, +# colors, and bash options + +# ==================== MAIN PREAMBLE ========================== +set -eo pipefail + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +BLUE='\033[0;34m' +YELLOW='\e[1;33m' +MAGENTA='\e[1;35m' +NC='\033[0m' # No Color + +# Check that we have one argument and it is not null, except for script +# hdf52internimage.bash +if [[ "$0" != *"hdf52internimage.bash"* ]]; then + if [ $# -ne 1 ]; then + echo "Usage: $0 bag_name" + exit 1 + fi + if [ -z "$1" ]; then + echo "Usage: $0 bag_name" + exit 1 + fi +fi + +# Load env variables from python +eval $(python3 build_system/dataset_paths.py --bag_name "$1") + +# Check that BAG_NAME is set, otherwise the eval may have failed +if [ -z "${BAG_NAME+x}" ]; then + echo -e "${RED}Error: BAG_NAME variable not set.${NC}" + exit 1 +fi + +# Load ROS env +. ~/catkin_ws/devel/setup.bash + +# Skip most scripts for "falcon_calib_imu" bags (except IMU calib), as the +# synchronization was note performed correctly. This is not a big problem, as +# the bags are only used for imu calibration (between imu and cameras) +if [[ "$BAG_PATH" == *"falcon_imu_calib"* ]] && [[ "$0" != *"rosbag2imucalibration.bash"* ]]; then + echo -e "${YELLOW}Skipping script for $BAG_PATH${NC}" + echo -e "${YELLOW}These bags synchronization is not right. $BAG_PATH${NC}" + echo -e "${YELLOW}This is not a problem, as the bags are only used for imu calibration${NC}" + exit 0 +fi + +# Print the name of the script, the BAG_NAME and the start date in purple +echo -e "${MAGENTA}===================================================${NC}" +echo -e "${MAGENTA}$0 $BAG_NAME" +echo -e "${MAGENTA}$(date)${NC}" +echo -e "${MAGENTA}===================================================${NC}" + +# ==================== FUNCTIONS ============================== + +function check_files { + # Check that the files do not exist already, and exit the script gracefully if + # it does. An array with the list of files should be provide as an argument. + # If some files exist (but not all of them), deletes all the files in the + # array before proceeding. + local array_name=$1 + + # Check if the variable output_fns exists + if [ -z "${!array_name+x}" ]; then + echo -e "${RED}$array_name is unset${NC}"; + return 1 + fi + + # An array of output_fns + local -n output_fns=$1 + + # Variable to check if all files exist + local all_files_exist=1 + + # Check that the variable output_fns exist + if [ -z "$output_fns" ]; then + echo -e "${RED}Error: output_fns variable not set.${NC}" + exit 1 + fi + + # Check if each file exists + all_files_exist=1 + for file in "${output_fns[@]}" + do + # check if file or directory exists + if [ ! -e "$file" ]; then + all_files_exist=0 + echo -e "${YELLOW}File $file does not exist${NC}" + else + echo -e "${BLUE}File $file exists${NC}" + fi + done + + # If all files exist + if [ $all_files_exist -eq 1 ]; then + echo -e "${GREEN}All output files exist. Stopping script.${NC}" + exit 0 + else + # If any file does not exist, delete all the other files + echo -e "${RED}Not all output files exist. Removing all output files...${NC}" + for file in "${output_fns[@]}" + do + if [ -e "$file" ]; then + rm -rf "$file" + echo -e "${BLUE}Deleted $file${NC}" + rm -rf "$file.tmp" + echo -e "${BLUE}Deleted $file.tmp${NC}" + fi + done + fi +} + + +function compress_with_ffmpeg { + # Takes a raw video (FFV1) as input and generates compressed versions of that + # video in the same folder (with different extensions) + + # Which videos to create? + export CREATE_WEBM_VIDEOS=0 + export CREATE_MP4_VIDEOS=1 + + if [ "$#" -ne 1 ]; then + echo -e "${RED}compress_with_ffmpeg requires only one argument.${NC}" + return 1 + fi + RAW_VIDEO=$1 + # Check that input file exists and ends in .avi + if [ ! -e "$RAW_VIDEO" ]; then + echo -e "${RED}Error: File $RAW_VIDEO does not exist.${NC}" + return 1 + fi + + # if CREATE_MP4_VIDEOS is set, create mp4 video + if [ $CREATE_MP4_VIDEOS -eq 1 ]; then + # Remplace the .avi extension for MP4 + MP4_VIDEO="${RAW_VIDEO%.*}.mp4" + # TMP variable where the temp video will be stored + MP4_VIDEO_TMP="${MP4_VIDEO%.*}_tmp.${MP4_VIDEO##*.}" + rm -f "$MP4_VIDEO_TMP" + + echo -e "${BLUE}Compressing $RAW_VIDEO to MP4${NC}" + ffmpeg -hide_banner -loglevel error -i "$RAW_VIDEO" \ + -c:v libx264 -preset slow -crf 21 -pix_fmt yuv420p "$MP4_VIDEO_TMP" + if [ $? -ne 0 ]; then + echo -e "${RED}Error compressing $MP4_VIDEO_TMP${NC}" + rm -f "$MP4_VIDEO_TMP" + exit 1 + fi + # Move the temp video to the final video + chmod 666 "$MP4_VIDEO_TMP" + mv "$MP4_VIDEO_TMP" "$MP4_VIDEO" + else + echo -e "${YELLOW}CREATE_MP4_VIDEOS is set to 0. Skipping MP4 video creation${NC}" + fi + + # Same for webm + if [ $CREATE_WEBM_VIDEOS -eq 1 ]; then + # Remplace the .avi extension for wEBM + WEBM_VIDEO="${RAW_VIDEO%.*}.webm" + # TMP variable where the temp video will be stored + WEBM_VIDEO_TMP="${WEBM_VIDEO%.*}_tmp.${WEBM_VIDEO##*.}" + rm -f "$WEBM_VIDEO_TMP" + + echo -e "${BLUE}Compressing $RAW_VIDEO to WEBM${NC}" + ffmpeg -hide_banner -loglevel error -i "$RAW_VIDEO" -c:v libvpx-vp9 -threads 8 -row-mt 1 -b:v 0 -crf 30 -pass 1 -an -f null /dev/null && \ + ffmpeg -hide_banner -loglevel error -i "$RAW_VIDEO" -c:v libvpx-vp9 -threads 8 -row-mt 1 -b:v 0 -crf 30 -pass 2 -an "$WEBM_VIDEO_TMP" + if [ $? -ne 0 ]; then + echo -e "${RED}Error compressing $WEBM_VIDEO_TMP${NC}" + rm -f "$WEBM_VIDEO_TMP" + exit 1 + fi + # Move the temp video to the final video + chmod 666 "$WEBM_VIDEO_TMP" + mv "$WEBM_VIDEO_TMP" "$WEBM_VIDEO" + else + echo -e "${YELLOW}CREATE_WEBM_VIDEOS is set to 0. Skipping MP4 video creation${NC}" + fi +} + +function check_free_space { + # Check that we have enough free space before processing the file. The first + # argument corresponds to the type of check: + # - fixed: check that we have at least 50 GiB available + # - bag_multiple: check that we have at least 5x the size of BAG_PATH + + local CHECK_TYPE=$1 + local FIXED_GB=$((50 * 1024 * 1024)) # 50GiB in KiB + local AVAILABLE + AVAILABLE=$(df "$OUTPUT_PATH" | tail -1 | awk '{print $4}') # Available space in KiB + local BAG_SIZE + BAG_SIZE=$(du -sk "$BAG_PATH" | cut -f1) # Size of bag file in KiB + + if [ "$CHECK_TYPE" == "fixed" ]; then + if [ "$AVAILABLE" -lt "$FIXED_GB" ]; then + echo -e "${RED}Not enough free space. Required: 50 GiB, Available: $((AVAILABLE / 1024 / 1024)) GiB${NC}" + return 1 + fi + elif [ "$CHECK_TYPE" == "bag_multiple" ]; then + local REQUIRED_SPACE_BAG=$((BAG_SIZE * 5)) + if [ "$AVAILABLE" -lt "$REQUIRED_SPACE_BAG" ]; then + echo -e "${RED}Not enough free space. Required: $(($REQUIRED_SPACE_BAG / 1024 / 1024)) GiB, Available: $((AVAILABLE / 1024 / 1024)) GiB${NC}" + return 1 + fi + else + echo -e "${RED}Invalid check type. Must be 'fixed' or 'bag_multiple'${NC}" + return 1 + fi +} diff --git a/build_system/semantics/hdf52internimage.bash b/build_system/semantics/hdf52internimage.bash new file mode 100755 index 0000000..9beaaf6 --- /dev/null +++ b/build_system/semantics/hdf52internimage.bash @@ -0,0 +1,86 @@ +#!/bin/bash + +source ./build_system/preamble.bash + +# Check if the INTERNIMAGE_PATH is set, otherwise skip this file +if [ -z "$INTERNIMAGE_PATH" ]; then + echo -e "${YELLOW}INTERNIMAGE_PATH is not set, skipping this file${NC}" + exit 0 +fi + +# This is a special script, check that we have two arguments +if [ "$#" -ne 2 ]; then + echo -e "${RED}Usage $0 bag_name gpu_to_run${NC}" + exit 1 +fi + +output_files=("$INTERNIMAGE_PATH") +check_files output_files +check_free_space fixed + +# Add a .tmp to INTERNIMAGE_PATH for the temporary path +INTERNIMAGE_PATH_TMP="$INTERNIMAGE_PATH.tmp" + +# GPU_TARGET is the second argument +GPU_TARGET="$2" +TOTAL_GPUS=$(nvidia-smi --query-gpu=name --format=csv,noheader | wc -l) + +# Check that GPU TARGET is a number between 0 and TOTAL_GPUS +if ! [[ "$GPU_TARGET" =~ ^[0-9]+$ ]] ; then + echo -e "${RED}GPU_TARGET is not a number${NC}" + exit 1 +fi +if [ "$GPU_TARGET" -lt 0 ] || [ "$GPU_TARGET" -ge "$TOTAL_GPUS" ]; then + echo -e "${RED}GPU_TARGET is not between 0 and $TOTAL_GPUS${NC}" + exit 1 +fi + +shift 2 # Remove the first and second argument, otherwise activate will complain + +# Activate the preexisting environment +source ~/anaconda3/bin/activate +conda activate internimage + +# Compile InternImage segmentation +pushd ~/InternImage/segmentation/ops_dcnv3 +sh ./make.sh +popd + +# Link Internimage to the semantics folder +cd build_system/semantics/ +# Create links only if they do not exists +if [ ! -L configs ]; then + ln -s ~/InternImage/segmentation/configs . +fi +if [ ! -L mmcv_custom ]; then + ln -s ~/InternImage/segmentation/mmcv_custom . +fi +if [ ! -L mmseg_custom ]; then + ln -s ~/InternImage/segmentation/mmseg_custom . +fi +if [ ! -L ops_dcnv3 ]; then + ln -s ~/InternImage/segmentation/ops_dcnv3 . +fi +cd ../.. + +# run the script. +echo -e "${BLUE}Running InternImage on GPU $GPU_TARGET${NC}" +# Set CUDA env variables +export CUDA_DEVICE_ORDER=PCI_BUS_ID +export CUDA_VISIBLE_DEVICES="$GPU_TARGET" +python build_system/semantics/internimage.py \ + --h5fn "$H5_PATH" \ + --config ./build_system/semantics/configs/cityscapes/upernet_internimage_l_512x1024_160k_cityscapes.py \ + --checkpoint /M3ED_Build/input/InternImage/upernet_internimage_l_512x1024_160k_cityscapes.pth \ + --palette ade20k \ + --img_idx 1000 \ + --opacity 1 \ + --out_h5fn "$INTERNIMAGE_PATH_TMP" +if [ $? -ne 0 ]; then + echo -e "${RED}Error running InternImage${NC}" + exit 1 +fi +echo -e "${GREEN}Done InternImage${NC}" + +# Move the temp file to the final location +mv "$INTERNIMAGE_PATH_TMP" "$INTERNIMAGE_PATH" diff --git a/build_system/semantics/internimage.py b/build_system/semantics/internimage.py new file mode 100644 index 0000000..5e0ec13 --- /dev/null +++ b/build_system/semantics/internimage.py @@ -0,0 +1,270 @@ +# Some of these code was written with the sample code for InternImage +# Copyright (c) OpenMMLab. All rights reserved. +# See https://github.com/OpenGVLab/InternImage/blob/master/segmentation/image_demo.py + +import cv2 +import os +import sys +import numpy as np +import h5py +import pdb +import argparse +import subprocess +from datetime import datetime + +import matplotlib.pyplot as plt + +from argparse import ArgumentParser + + +def intern_image_model_load(args): + import mmcv + + import mmcv_custom # noqa: F401,F403 + import mmseg_custom # noqa: F401,F403 + from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot + from mmseg.core.evaluation import get_palette + from mmcv.runner import load_checkpoint + from mmseg.core import get_classes + import cv2 + + # build the model from a config file and a checkpoint file + + model = init_segmentor(args.config, checkpoint=None, device=args.device) + checkpoint = load_checkpoint(model, args.checkpoint, map_location='cpu') + if 'CLASSES' in checkpoint.get('meta', {}): + model.CLASSES = checkpoint['meta']['CLASSES'] + else: + model.CLASSES = get_classes(args.palette) + + return model + +def intern_image_model_inference(args, model, img): + from mmseg.apis import inference_segmentor, init_segmentor, show_result_pyplot + result = inference_segmentor(model, img) + return result + +def intern_image_inference_save(args, model, img, result, idx): + import mmcv + from mmseg.core.evaluation import get_palette + if hasattr(model, 'module'): + model = model.module + img = model.show_result(img, result, palette=get_palette(args.palette), + show=False, opacity=args.opacity) + mmcv.mkdir_or_exist(args.out) + outpath = os.path.join(args.out, "%05d.png" % idx) + cv2.imwrite(outpath, img) + +def invert_map(F): + # shape is (h, w, 2), an "xymap" + (h, w) = F.shape[:2] + I = np.zeros_like(F) + I[:,:,1], I[:,:,0] = np.indices((h, w)) # identity map + P = np.copy(I) + for i in range(10): + correction = I - cv2.remap(F, P, None, interpolation=cv2.INTER_LINEAR) + P += correction * 0.5 + return P + +def load_remapping(target_group, source_group): + target_T_to_prophesee_left = target_group['T_to_prophesee_left'][...] + source_T_to_prophesee_left = source_group['T_to_prophesee_left'][...] + + source_T_target = source_T_to_prophesee_left @ np.linalg.inv( target_T_to_prophesee_left ) + target_T_source = np.linalg.inv(source_T_target) + + target_model = target_group['camera_model'] + target_dist_coeffs = target_group['distortion_coeffs'][...] + target_dist_model = target_group['distortion_model'] + target_intrinsics = target_group['intrinsics'][...] + target_res = target_group['resolution'][...] + target_Size = target_res + + target_K = np.eye(3) + target_K[0,0] = target_intrinsics[0] + target_K[1,1] = target_intrinsics[1] + target_K[0,2] = target_intrinsics[2] + target_K[1,2] = target_intrinsics[3] + + target_P = np.zeros((3,4)) + target_P[:3,:3] = target_K + + source_model = source_group['camera_model'] + source_dist_coeffs = source_group['distortion_coeffs'][...] + source_dist_model = source_group['distortion_model'] + source_intrinsics = source_group['intrinsics'][...] + source_res = source_group['resolution'][...] + source_width, source_height = source_res + source_Size = source_res + + source_K = np.eye(3) + source_K[0,0] = source_intrinsics[0] + source_K[1,1] = source_intrinsics[1] + source_K[0,2] = source_intrinsics[2] + source_K[1,2] = source_intrinsics[3] + + source_P = np.zeros((3,4)) + source_P[:3,:3] = target_K + source_P[0,3] = target_K[0,0] * target_T_source[0,3] + source_P[1,3] = target_K[1,1] * target_T_source[1,3] + + target_R = target_T_source[:3,:3] # np.eye(3) + source_R = np.eye(3) # target_T_source[:3,:3] + + print(target_R) + print(source_R) + print(target_P) + print(source_P) + + map_target = np.stack(cv2.initUndistortRectifyMap(target_K, target_dist_coeffs, target_R, target_P, target_Size, cv2.CV_32FC1), axis=-1) + map_source = np.stack(cv2.initUndistortRectifyMap(source_K, source_dist_coeffs, source_R, source_P, source_Size, cv2.CV_32FC1), axis=-1) + inv_map_target = invert_map(map_target) + inv_map_source = invert_map(map_source) + + return map_target, map_source, inv_map_target, inv_map_source + +def create_full_map_and_mask(source_map, target_inv_map): + source_to_target_map = cv2.remap(source_map, target_inv_map, None, interpolation=cv2.INTER_LINEAR ) + source_to_target_mask = cv2.remap(source_map, target_inv_map, None, interpolation=cv2.INTER_LINEAR, borderValue=-1 ) + source_to_target_mask = source_to_target_mask[:,:,0] == -1 + + return source_to_target_map, source_to_target_mask + +def remap_and_mask(remap_grid, remap_mask, img): + img_remap = cv2.remap(img, remap_grid[:,:,0], remap_grid[:,:,1], cv2.INTER_LINEAR) + img_remap[remap_mask] = 0 + + if img_remap.ndim == 2: + img_remap = img_remap[:,:,None] + return img_remap + +def remap_mask(remap_grid, remap_mask, orig_resolution=None): + if orig_resolution is None: + orig_resolution = remap_grid[:,:,0].shape + mask = np.ones( orig_resolution, dtype=np.uint8 ) * 255 + mask_remapped = remap_and_mask(remap_grid, remap_mask, mask) + mask_remapped = cv2.medianBlur(mask_remapped,3) + return mask_remapped + +def view_remap_topics(remap_grid, remap_mask, image_group, time_lookup, event_group, img_idx=2190): + img = image_group['data'][img_idx] + + img_remap = remap_and_mask(remap_grid, remap_mask, img) + + start_idx = int(time_lookup[img_idx] - 200000) + end_idx = int(time_lookup[img_idx] + 0) + + x = event_group['x'][start_idx:end_idx] + y = event_group['y'][start_idx:end_idx] + p = event_group['p'][start_idx:end_idx] + eimg = np.zeros((720,1280,3)).astype(np.uint8) + eimg[y.astype(int),x.astype(int),0] = 255 + + fig, axes = plt.subplots(2,2,sharex=True,sharey=True) + + axes[0,0].imshow(img) + axes[0,1].imshow(img_remap) + axes[1,0].imshow(eimg) + + overlay = eimg.copy() + if img_remap.shape[-1] == 3: + overlay[:,:,2] = np.linalg.norm(img_remap, axis=-1) + else: + overlay[:,:,2] = img_remap.squeeze() + axes[1,1].imshow(overlay) + + plt.show() + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument("--h5fn", help="H5 file to load") + parser.add_argument("--out_h5fn", help="H5 file to save to") + parser.add_argument("--target", default="prophesee", help="Camera to warp to") + parser.add_argument("--img_idx", type=int, default=2000, help="Index to load") + parser.add_argument('--config', help='Config file') + parser.add_argument('--checkpoint', help='Checkpoint file') + parser.add_argument('--out', type=str, default="demo", help='out dir') + parser.add_argument("--verbose", action="store_true", + help="Verbose output") + parser.add_argument( + '--device', default='cuda:0', help='Device used for inference') + parser.add_argument( + '--palette', + default='ade20k', + choices=['ade20k', 'cityscapes', 'cocostuff'], + help='Color palette used for segmentation map') + parser.add_argument( + '--opacity', + type=float, + default=0.5, + help='Opacity of painted segmentation map. In (0, 1] range.') + + args = parser.parse_args() + + h5f = h5py.File(args.h5fn, 'r') + + target_key = '/' + args.target + '/left' + + ovc_key = '/ovc/rgb' + target_map, source_map, target_inv_map, source_inv_map = load_remapping( h5f[target_key]['calib'], h5f[ovc_key]['calib'] ) + + source_to_target_map, source_to_target_mask = create_full_map_and_mask(source_map, target_inv_map) + + # view_remap_topics(source_to_target_map, source_to_target_mask, h5f[ovc_key], h5f['/ovc/ts_map_prophesee_left_t'], h5f[target_key], img_idx=350) + + model = intern_image_model_load(args) + h5f_out = h5py.File(args.out_h5fn, 'w') + + total_images = h5f[ovc_key]['data'].shape[0] + remap_shape = source_to_target_map[:,:,0].shape + + semantics_shape = (total_images, remap_shape[0], remap_shape[1], 1) + semantics_chunk = (1, remap_shape[0], remap_shape[1], 1) + if "rgb" in ovc_key: + warped_shape = (total_images, remap_shape[0], remap_shape[1], 3) + warped_chunk = (1, remap_shape[0], remap_shape[1], 3) + else: + warped_shape = (total_images, remap_shape[0], remap_shape[1], 1) + warped_chunk = (1, remap_shape[0], remap_shape[1], 1) + + h5f_out.attrs["origin_camera"] = ovc_key + h5f_out.attrs["destination_camera"] = target_key + + version = subprocess.check_output(["git", "describe", "--tags", "--long"]).decode("utf-8").strip() + h5f_out.attrs["version"] = version + h5f_out.attrs["creation_date"] = str(datetime.now()) + + + h5f_out.create_dataset("ts", data=h5f['/ovc/ts']) + h5f_out.create_dataset("ts_map_prophesee_left_t", data=h5f['/ovc/ts_map_prophesee_left_t']) + + h5f_out.create_dataset("warped_image", warped_shape, dtype='u1', chunks=warped_chunk, compression='lzf') + h5f_out.create_dataset("predictions", semantics_shape, dtype='u1', chunks=semantics_chunk, compression='lzf') + + mask = remap_mask(source_to_target_map, source_to_target_mask) + mask = (255.0 - mask).astype(np.uint8) + mask = mask > 0 + + from tqdm import tqdm + for idx in tqdm(range(total_images), total=total_images, disable=not args.verbose): + ovc_image = h5f[ovc_key]['data'][idx] + ovc_image_remapped = remap_and_mask(source_to_target_map, source_to_target_mask, ovc_image) + if ovc_image_remapped.shape[-1] == 1: + input_image = np.tile(ovc_image_remapped, (1,1,3)) + else: + input_image = ovc_image_remapped + + input_image[mask] = 0 + + result = intern_image_model_inference(args, model, input_image) + result = result[0][:,:,None] + masked_result = result + masked_result[mask] = 255 + + if "rgb" in ovc_key: + h5f_out['warped_image'][idx] = ovc_image_remapped + else: + h5f_out['warped_image'][idx] = ovc_image_remapped + + h5f_out['predictions'][idx] = masked_result diff --git a/build_system/semantics/internimage2media.bash b/build_system/semantics/internimage2media.bash new file mode 100755 index 0000000..829cc06 --- /dev/null +++ b/build_system/semantics/internimage2media.bash @@ -0,0 +1,28 @@ +#!/bin/bash +source ./build_system/preamble.bash + +output_files=("$SEMANTICS_VIDEO_RAW") +check_files output_files + +check_free_space fixed + +SEMANTICS_VIDEO_RAW_TMP="${SEMANTICS_VIDEO_RAW%.*}_tmp.${SEMANTICS_VIDEO_RAW##*.}" + +# Run media generation +echo -e "${BLUE}Starting semantics video generation${NC}" +python3 build_system/semantics/internimage2media.py \ + --semantics_h5 "$INTERNIMAGE_PATH" \ + --events_h5 "$H5_PATH" \ + --outfn "$SEMANTICS_VIDEO_RAW_TMP" +# Check if program exited with error +if [ $? -ne 0 ]; then + echo -e "${RED}Error creating media files for $H5_PATH${NC}" + exit 1 +fi +mv "$SEMANTICS_VIDEO_RAW_TMP" "$SEMANTICS_VIDEO_RAW" +echo -e "${GREEN}Semantics raw created${NC}" + +# Compress video +echo -e "${BLUE}Compressing${NC}" +compress_with_ffmpeg "$SEMANTICS_VIDEO_RAW" +echo -e "${GREEN}Compression finished${NC}" diff --git a/build_system/semantics/internimage2media.py b/build_system/semantics/internimage2media.py new file mode 100644 index 0000000..ff8c05b --- /dev/null +++ b/build_system/semantics/internimage2media.py @@ -0,0 +1,151 @@ +#!/usr/bin/env python3 +import os +import sys +import yaml +import pdb +import numpy as np +import cv2 +import h5py +from matplotlib import cm +import matplotlib.pyplot as plt +from colorama import Fore, Style + +# https://github.com/open-mmlab/mmsegmentation/blob/00790766aff22bd6470dbbd9e89ea40685008395/mmseg/utils/class_names.py#L249C1-L249C1 +def cityscapes_palette(): + """Cityscapes palette for external use.""" + return [[128, 64, 128], [244, 35, 232], [70, 70, 70], [102, 102, 156], + [190, 153, 153], [153, 153, 153], [250, 170, 30], [220, 220, 0], + [107, 142, 35], [152, 251, 152], [70, 130, 180], [220, 20, 60], + [255, 0, 0], [0, 0, 142], [0, 0, 70], [0, 60, 100], [0, 80, 100], + [0, 0, 230], [119, 11, 32]] + +def generate_rgb_check_video(ap): + # get all the semantic labels + + img_w = 1280 + img_h = 720 + + out_w = img_w * 3 + out_h = img_h + fps = 25 + + # Initialize the video writer + fourcc = cv2.VideoWriter_fourcc(*"FFV1") + fn_parts = os.path.splitext(ap.outfn) + filename = ''.join([fn_parts[0], '_rgb_check', fn_parts[1]]) + video_writer = cv2.VideoWriter(filename, fourcc, + fps, (out_w, out_h), + isColor=True) + print(Fore.BLUE + "Loading semantics and writing video" + Style.RESET_ALL) + palette = np.array(cityscapes_palette()) + # Add a last class to palette, for the background + palette = np.vstack((palette, np.array([0, 0, 0]))) + + with h5py.File(ap.events_h5, "r") as f, h5py.File(ap.semantics_h5, "r") as sems: + # Read the events dataset + # events_dataset = f["/depth/prophesee/left"][:] + predictions = sems["/predictions"] + ts = sems["/ts"] + warped_image = sems["/warped_image"] + rgb_image = f["/ovc/rgb/data"] + + for i in range(len(predictions)-1): + # remap all the 255 to 19 so they are plotted black + img = predictions[i] + img[img == 255] = 19 + color_img = palette[img] + color_img = color_img.squeeze(2).astype(np.uint8) + + write_frame = np.zeros((out_h, out_w, 3), dtype=np.uint8) + + write_frame[:,0*img_w:1*img_w,:] = warped_image[i,:img_h,:img_w,:] + write_frame[:,1*img_w:2*img_w,:] = color_img[:img_h,:img_w,:] + write_frame[:,2*img_w:3*img_w,:] = rgb_image[i,:img_h,:img_w,:] + + video_writer.write(write_frame) + + if ap.debug: + print(Fore.GREEN + "Writing frame: {}".format(i) + + Style.RESET_ALL, end="\r") + video_writer.release() + +def generate_event_overlay_video(ap): + # get all the semantic labels + + width = 1280 + height = 720 + fps = 25 + + # Initialize the video writer + fourcc = cv2.VideoWriter_fourcc(*"FFV1") + video_writer = cv2.VideoWriter(ap.outfn, fourcc, + fps, (width, height), + isColor=True) + print(Fore.BLUE + "Loading semantics and writing video" + Style.RESET_ALL) + palette = np.array(cityscapes_palette()) + # Add a last class to palette, for the background + palette = np.vstack((palette, np.array([0, 0, 0]))) + + with h5py.File(ap.events_h5, "r") as f, h5py.File(ap.semantics_h5, "r") as sems: + # Read the events dataset + # events_dataset = f["/depth/prophesee/left"][:] + pl = events_dataset = f["/prophesee/left"] + predictions = sems["/predictions"] + ts = sems["/ts"] + ts_map_prophesee_left_t = sems["/ts_map_prophesee_left_t"] + warped_image = sems["/warped_image"] + + for i in range(len(predictions)-1): + # remap all the 255 to 19 so they are plotted black + img = predictions[i] + img[img == 255] = 19 + color_img = palette[img] + color_img = color_img.squeeze(2).astype(np.uint8) + + # Get the events for the corresponding frame + pl = f["/prophesee/left"] + ev_x = pl["x"][ts_map_prophesee_left_t[i]:ts_map_prophesee_left_t[i + 1]] + ev_y = pl["y"][ts_map_prophesee_left_t[i]:ts_map_prophesee_left_t[i + 1]] + ev_p = pl["p"][ts_map_prophesee_left_t[i]:ts_map_prophesee_left_t[i + 1]] + ev_t = pl["t"][ts_map_prophesee_left_t[i]:ts_map_prophesee_left_t[i + 1]] + min_t = ev_t.min() + max_t = min_t + 5e3 + idx = np.logical_and(ev_t >= min_t, ev_t <= max_t) + ev_x = ev_x[idx] + ev_y = ev_y[idx] + ev_p = ev_p[idx]*255 + ev_t = ev_t[idx] + + # Color events in video + color = np.array([ev_p, np.zeros_like(ev_p), 255-ev_p]) + color = color//2 + + # Merge them + color_img[ev_y, ev_x, :] = color.T + color_image_bgra = cv2.cvtColor(color_img, cv2.COLOR_RGBA2BGRA) + video_writer.write(color_image_bgra[:, :, :3]) + + if ap.debug: + print(Fore.GREEN + "Writing frame: {}".format(i) + + Style.RESET_ALL, end="\r") + video_writer.release() + +if __name__ == "__main__": + import argparse + ap = argparse.ArgumentParser() + ap.add_argument("--events_h5", help="Input HDF5 file with camera data", required=True) + ap.add_argument("--semantics_h5", help="Input HDF5 file from InternImage", required=True) + ap.add_argument("--outfn", help="Output video file", required=True) + # Add a debug flag argument + ap.add_argument("--debug", help="Debug flag", action="store_true") + ap = ap.parse_args() + + # Check that the input file exists + if not os.path.exists(ap.semantics_h5): + sys.exit("Input file does not exist: {}".format(ap.semantics_h5)) + + if not os.path.exists(ap.events_h5): + sys.exit("Input file does not exist: {}".format(ap.events_h5)) + + generate_event_overlay_video(ap) + # generate_rgb_check_video(ap) diff --git a/build_system/stats_and_summary/dataset_statistics.py b/build_system/stats_and_summary/dataset_statistics.py new file mode 100755 index 0000000..fe367e4 --- /dev/null +++ b/build_system/stats_and_summary/dataset_statistics.py @@ -0,0 +1,144 @@ +#!/usr/bin/env python3 +import os +import sys +import yaml +import pdb +import numpy as np +from datetime import datetime +from colorama import Fore, Back, Style + + +class Statistics: + def __init__(self, statistics_file, insert=None, duration=None): + self.fn = statistics_file + + if insert is None: + # Parse the yaml file + with open(self.fn, 'r') as f: + self.statistics = yaml.load(f, Loader=yaml.FullLoader) + + self.num_event_left = self.statistics["events_left"] + self.num_event_right = self.statistics["events_right"] + self.num_img_left = self.statistics["ovc_left_images"] + self.num_img_right = self.statistics["ovc_right_images"] + + # Extract the start time + date_time_str = self.statistics["start_date"].split(" (")[0] + self.start_time = datetime.strptime(date_time_str, + "%b %d %Y %H:%M:%S.%f") + + time_str = self.statistics["duration"] + if '(' in time_str and ')' in time_str: + seconds = time_str.split('(')[1].split(')')[0] + else: + seconds = time_str[:-1] + + # Remove the 's' at the end, if exists + self.duration = float(seconds[:-1]) if seconds[-1] == 's' else float(seconds) + else: + assert duration is not None + self.duration = duration + assert self.duration != 0 + self.num_event_left = 0 + self.num_event_right = 0 + + +if __name__ == '__main__': + import argparse + argparser = argparse.ArgumentParser() + argparser.add_argument('--output_folder', type=str, required=True, + help='Folder where the statistics are stored') + args = argparser.parse_args() + + # Check that the statistics folder exists and it is not empty + if not os.path.exists(args.output_folder): + sys.exit('The statistics folder does not exist') + if len(os.listdir(args.output_folder)) == 0: + sys.exit('The statistics folder is empty') + + all_stats = [] + + # List all the statistics files and iterate over them + for file in os.listdir(args.output_folder): + # Skip all the calib files for statistics + if "calib" in file: + continue + full_path = os.path.join(args.output_folder, file, + file + ".yaml") + # Check if the path exists + if not os.path.exists(full_path): + print(Fore.RED + f"File {full_path} does not exist" + Fore.RESET) + continue + s = Statistics(full_path) + all_stats.append(s) + + # all_stats.append(Statistics("falcon_pennov_outdoor_flight_parking_2", + # insert=True, duration=109.0)) + # all_stats.append(Statistics("falcon_pennov_outdoor_flight_parking_2", + # insert=True, duration=113.0)) + # all_stats.append(Statistics("falcon_pennov_outdoor_vision_1", + #insert=True, duration=69.0)) + + environments = ["urban", "outdoor", "indoor", "forest"] + vehicles = ["car", "falcon", "spot"] + stats_summary = { + vehicle: { + environment: {"count": 0, "duration": 0} + for environment in environments + } + for vehicle in vehicles + } + + total_time = 0 + total_events = 0 + processed_bags = 0 + for stat in all_stats: + if stat.duration > 200: + # print red the name + print(Fore.YELLOW + "Big file:", stat.fn, "- Total time:", + str(stat.duration), Fore.RESET) + + # Identify the environment and vehicle from the filename + environment = None + vehicle = None + for env in environments: + if env in stat.fn: + environment = env + break + for veh in vehicles: + if veh in stat.fn: + vehicle = veh + break + + if vehicle is None: + raise ValueError(f"Unknown vehicle in dataset: {stat.fn}") + if environment is None: + raise ValueError(f"Unknown environment in dataset: {stat.fn}") + + # Update the stats_summary dictionary based on the environment and vehicle + stats_summary[vehicle][environment]["count"] += 1 + stats_summary[vehicle][environment]["duration"] += stat.duration + + total_time += stat.duration + total_events += stat.num_event_left + stat.num_event_right + processed_bags += 1 + + # Print the table + print("Vehicle Environment Total Sequences Test Sequences Total Time") + for vehicle in vehicles: + for environment in environments: + count = stats_summary[vehicle][environment]["count"] + test = int(count * .25) + duration = stats_summary[vehicle][environment]["duration"] + if count == 0: + continue + print(f"{vehicle: <9}{environment: <16}{count: <18}{test: <18}{duration: <.2f}") + + + print(f"Processed {processed_bags} bags") + + # Print the total number of events with scientific notations + print(f"Total number of events: {total_events:,}") + + # Print the total time in minutes, and seconds + print(f"Total time: {total_time / 60:.2f} minutes, {total_time:.2f} seconds") diff --git a/build_system/stats_and_summary/getStats.bash b/build_system/stats_and_summary/getStats.bash new file mode 100755 index 0000000..622250e --- /dev/null +++ b/build_system/stats_and_summary/getStats.bash @@ -0,0 +1,67 @@ +#!/bin/bash +source ./build_system/preamble.bash + +output_files=("$STATS_PATH") +check_files output_files + +echo -e "${YELLOW}Getting stats from bag file: ${BOLD}${BAG_NAME}${NC}" +echo -e "${YELLOW}H5 file: ${BOLD}${H5_PATH}${NC}" + +ROS_INFO=$(rosbag info "${BAG_PATH}") +# Get the start date using rosbag info, grep, and awk +start_date=$(echo "${ROS_INFO}" | grep -oP 'start\s*:\s*\K.+') +# Get the duration using rosbag info, grep, and awk +duration=$(echo "${ROS_INFO}"| grep -oP 'duration\s*:\s*\K.+') +# Get the raw bag file size +bag_file_size_kb=$(du -k "${BAG_PATH}" | cut -f1) + +# Statistics from the H5 file +h5_file_size_kb=$(du -k "${H5_PATH}" | cut -f1) +# if this is a bag that has calib and lidar in the name, the number of events is zero +if [[ $BAG_PATH == *"calib"* ]] && [[ $BAG_PATH == *"lidar"* ]]; then + events_left=0 + events_right=0 +else + events_left=$(h5ls -r "${H5_PATH}" | grep "prophesee/left/p" | awk '{print $3}'|sed 's/{\([0-9]*\)\/Inf}/\1/') + events_right=$(h5ls -r "${H5_PATH}" | grep "prophesee/right/p" | awk '{print $3}'|sed 's/{\([0-9]*\)\/Inf}/\1/') +fi +ovc_left=$(h5ls -r "${H5_PATH}" | grep "ovc/left/data" | awk '{print $3}'|sed 's/{\([0-9]*\)\/.*/\1/') +ovc_right=$(h5ls -r "${H5_PATH}" | grep "ovc/right/data" | awk '{print $3}'|sed 's/{\([0-9]*\)\/.*/\1/') + +# Check that all the statistics are not empty +if [ -z "$start_date" ] || [ -z "$duration" ] || [ -z "$bag_file_size_kb" ] || [ -z "$h5_file_size_kb" ] || [ -z "$events_left" ] || [ -z "$events_right" ] || [ -z "$ovc_left" ] || [ -z "$ovc_right" ]; then + echo -e "${RED}Some stats are empty${NC}" + exit 1 +fi + +# Write the start date and duration to a YAML file +cat > "${STATS_PATH}" << EOL +start_date: ${start_date} +duration: ${duration} +raw_bag_file_size_kb: ${bag_file_size_kb} +h5_file_size_kb: ${h5_file_size_kb} +events_left: ${events_left} +events_right: ${events_right} +ovc_left_images: ${ovc_left} +ovc_right_images: ${ovc_right} +EOL + +chmod 666 "${STATS_PATH}" + +echo -e "${YELLOW}Stats written to: ${BOLD}${STATS_PATH}${NC}" + +# Create validation images for the LiDAR calibration for non "calib" bags +# if [[ $BAG_PATH != *"calib"* ]]; then +# python3 lidar/lidar_calib.py \ +# --h5fn "$H5_PATH" --confirm_only \ +# --percentage 0.25 --confirm_fn "$TMP_PATH/calib_validation1.png" \ +# --npz_fn "$CALIB_LIDAR_PATH" +# python3 lidar/lidar_calib.py \ +# --h5fn "$H5_PATH" --confirm_only \ +# --percentage 0.5 --confirm_fn "$TMP_PATH/calib_validation2.png" \ +# --npz_fn "$CALIB_LIDAR_PATH" +# python3 lidar/lidar_calib.py \ +# --h5fn "$H5_PATH" --confirm_only \ +# --percentage 0.75 --confirm_fn "$TMP_PATH/calib_validation3.png" \ +# --npz_fn "$CALIB_LIDAR_PATH" +# fi diff --git a/dataset_list.yaml b/dataset_list.yaml new file mode 100644 index 0000000..3a26f0b --- /dev/null +++ b/dataset_list.yaml @@ -0,0 +1,1099 @@ + - file: car_forest_camera_calib_3 + filetype: camera_calib + + - file: car_forest_camera_calib_4 + filetype: camera_calib + + - file: car_forest_camera_calib_5 + filetype: camera_calib + + - file: car_forest_camera_calib_6 + filetype: camera_calib + + - file: car_forest_sand_1 + filetype: data + camera_calib_reference: car_forest_camera_calib_5 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_forest_sand_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .5 + notes: > + This is a driving sequence on an open space of the forest, where the car is driven + on a sand path and there is drift on the wheels. + The car returns to the original position. + See car_forest_sand_2 for a similar sequence. + + - file: car_forest_sand_2 + filetype: data + camera_calib_reference: car_forest_camera_calib_5 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_forest_sand_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: True + internimage_semantics: False + check_pose_return: + absolute_position_error: 2.0 + notes: > + This is a driving sequence on an open space of the forest, where the car is driven + on a sand path and there is drift on the wheels. + The car returns to the original position. + See car_forest_sand_1 for a similar sequence. + + - file: car_forest_tree_tunnel + filetype: data + camera_calib_reference: car_forest_camera_calib_5 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_forest_sand_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: False + notes: > + This is a driving sequence in a narrow path in the forest, in a single + direction. The car does not return to the original position. The different levels of density of the + vegetation make it challenging for standard VIO techniques. + + - file: car_forest_into_ponds_long + filetype: data + camera_calib_reference: car_forest_camera_calib_5 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_forest_sand_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: False + notes: > + This is a driving sequence from an open road, into the forest, going through + ponds, to an open space in the forest. Illumination conditions are quite challenging + and the forest density is high. The car does not return into its original position. + See car_forest_into_ponds_short for a similar + sequence where the car returns to its original position. + + - file: car_forest_into_ponds_short + filetype: data + camera_calib_reference: car_forest_camera_calib_5 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_forest_sand_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .5 + notes: > + This is a driving sequence from an open road, into the forest, going through + ponds, to an open space in the forest. Illumination conditions are quite challenging + and the forest density is high. The car returns to its original position. + See car_forest_into_ponds_long for a longer sequence + where the car does not return into its original position. + + - file: car_forest_camera_calib_1 + filetype: camera_calib + + - file: car_forest_camera_calib_2 + filetype: camera_calib + + - file: car_urban_day_camera_calib_3 + filetype: camera_calib + + - file: car_urban_day_camera_calib_4 + filetype: camera_calib + + - file: car_urban_day_camera_calib_5 + filetype: camera_calib + + - file: car_urban_day_camera_calib_1 + filetype: camera_calib + + - file: car_urban_day_camera_calib_2 + filetype: camera_calib + + - file: car_urban_day_city_hall + filetype: data + camera_calib_reference: car_urban_day_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .5 + + - file: car_urban_day_horse + filetype: data + camera_calib_reference: car_urban_day_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: False + notes: > + A very short sequence of the car moving in Philadelphia's center city. This is + an excellent test sequence, because it is short, has multiple IMOs. There is even a horse! + + - file: car_urban_day_penno_big_loop + filetype: data + camera_calib_reference: car_urban_day_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .5 + notes: > + Long sequence of a car in a parking lot, without many IMOs. + + - file: car_urban_day_penno_small_loop + filetype: data + camera_calib_reference: car_urban_day_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .5 + + - file: car_urban_day_rittenhouse + filetype: data + camera_calib_reference: car_urban_day_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: 3.0 + + - file: car_urban_day_ucity_big_loop + filetype: data + camera_calib_reference: car_urban_day_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: True + internimage_semantics: True + check_pose_return: + absolute_position_error: 11.0 + + - file: car_urban_day_ucity_small_loop + filetype: data + camera_calib_reference: car_urban_day_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .5 + + - file: car_urban_day_camera_calib_6 + filetype: camera_calib + + - file: car_urban_day_camera_calib_7 + filetype: camera_calib + + - file: car_urban_day_camera_calib_8 + filetype: camera_calib + + - file: car_urban_day_schuylkill_tunnel + filetype: data + camera_calib_reference: car_urban_day_camera_calib_8 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: True + internimage_semantics: True + check_pose_return: False + notes: > + A challenging and long sequence of a car driving in a highway, + into a tunnel. + + - file: car_urban_night_camera_calib_1 + filetype: camera_calib + + - file: car_urban_night_city_hall + filetype: data + camera_calib_reference: car_urban_night_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .6 + + - file: car_urban_night_penno_big_loop + filetype: data + camera_calib_reference: car_urban_night_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .5 + + - file: car_urban_night_penno_small_loop + filetype: data + camera_calib_reference: car_urban_night_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .5 + + - file: car_urban_night_penno_small_loop_darker + filetype: data + camera_calib_reference: car_urban_night_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .5 + + - file: car_urban_night_rittenhouse + filetype: data + camera_calib_reference: car_urban_night_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .5 + + - file: car_urban_night_schuylkill_tunnel + filetype: data + camera_calib_reference: car_urban_night_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: True + internimage_semantics: False + check_pose_return: False + notes: > + A challenging and long sequence of a car driving in a highway, + into a tunnel. + + - file: car_urban_night_ucity_big_loop + filetype: data + camera_calib_reference: car_urban_night_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: True + internimage_semantics: False + check_pose_return: + absolute_position_error: 9.0 + + - file: car_urban_night_ucity_small_loop + filetype: data + camera_calib_reference: car_urban_night_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: car_urban_day_city_hall_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .5 + + - file: falcon_forest_camera_calib_1 + filetype: camera_calib + + - file: falcon_forest_camera_calib_2 + filetype: camera_calib + + - file: falcon_forest_camera_calib_3 + filetype: camera_calib + + - file: falcon_forest_road_1 + filetype: data + camera_calib_reference: falcon_forest_camera_calib_1 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + + - file: falcon_forest_camera_calib_4 + filetype: camera_calib + + - file: falcon_forest_camera_calib_5 + filetype: camera_calib + + - file: falcon_forest_road_forest + filetype: data + camera_calib_reference: falcon_forest_camera_calib_4 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + + - file: falcon_forest_up_down + filetype: data + camera_calib_reference: falcon_forest_camera_calib_4 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + + - file: falcon_forest_camera_calib_6 + filetype: camera_calib + + - file: falcon_forest_camera_calib_7 + filetype: camera_calib + + - file: falcon_forest_camera_calib_8 + filetype: camera_calib + + - file: falcon_forest_into_forest_4 + filetype: data + camera_calib_reference: falcon_forest_camera_calib_6 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + + - file: falcon_forest_road_3 + filetype: data + camera_calib_reference: falcon_forest_camera_calib_6 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: True + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + + - file: falcon_forest_road_2 + filetype: data + camera_calib_reference: falcon_forest_camera_calib_6 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .1 + + - file: falcon_forest_camera_calib_9 + filetype: camera_calib + + - file: falcon_forest_camera_calib_10 + filetype: camera_calib + + - file: falcon_forest_into_forest_1 + filetype: data + camera_calib_reference: falcon_forest_camera_calib_6 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + notes: > + This is a sequence of the UAV flying in a low density forest. The UAV returns + to its takeoff position. Similar to falcon_forest_into_forest_2. + + - file: falcon_forest_into_forest_2 + filetype: data + camera_calib_reference: falcon_forest_camera_calib_6 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + notes: > + This is a sequence of the UAV flying in a low density forest. The UAV returns + to its takeoff position. + Similar to falcon_forest_into_forest_1. + + + - file: falcon_into_forest_3 + filetype: data + camera_calib_reference: falcon_forest_camera_calib_6 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_forest_road_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: True + internimage_semantics: False + check_pose_return: False + notes: > + This is a short sequence with the UAV flying along the road in the forest. + The sequence is ended before the UAV landed due to technical issues, so the + UAV does not return to its original position. + + + - file: falcon_indoor_camera_calib + filetype: camera_calib + + - file: falcon_indoor_flight_1 + filetype: data + camera_calib_reference: falcon_indoor_camera_calib + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_indoor_flight_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 100 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .1 + notes: + A short indoor sequence where the UAV flies in an area with multiple + obstacles. The UAV returns close to its original position. + See falcon_indoor_flight_2 and falcon_indoor_flight_3 for similar + sequences. + + + - file: falcon_indoor_flight_2 + filetype: data + camera_calib_reference: falcon_outdoor_night_camera_calib_2 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_indoor_flight_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 100 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + notes: + A short indoor sequence where the UAV flies in an area with multiple + obstacles. The UAV returns close to its original position. + See falcon_indoor_flight_1 and falcon_indoor_flight_3 for similar + sequences. + + - file: falcon_indoor_flight_3 + filetype: data + camera_calib_reference: falcon_outdoor_night_camera_calib_2 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_indoor_flight_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 100 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .3 + notes: + A short indoor sequence where the UAV flies in an area with multiple + obstacles. The UAV returns close to its original position. + See falcon_indoor_flight_1 and falcon_indoor_flight_2 for similar + sequences. + + - file: falcon_outdoor_day_penno_parking_1 + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_penno_parking_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .25 + + - file: falcon_outdoor_day_penno_parking_2 + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_penno_parking_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: falcon_outdoor_day_penno_plaza + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_penno_parking_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: falcon_outdoor_day_camera_calib_1 + filetype: camera_calib + + - file: falcon_outdoor_day_penno_cars + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_penno_cars_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: falcon_outdoor_day_penno_parking_3 + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_penno_cars_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: True + internimage_semantics: True + check_pose_return: + absolute_position_error: .25 + + - file: falcon_outdoor_day_penno_trees + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_penno_cars_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .5 + + - file: falcon_outdoor_day_camera_calib_2 + filetype: camera_calib + + - file: falcon_outdoor_day_camera_calib_3 + filetype: camera_calib + + - file: falcon_outdoor_day_fast_flight_1 + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_2 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_fast_flight_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 50 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: falcon_outdoor_day_fast_flight_3 + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_2 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_fast_flight_3_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 50 + is_test_file: True + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: falcon_outdoor_day_fast_flight_2 + filetype: data + camera_calib_reference: falcon_outdoor_day_camera_calib_2 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_outdoor_day_fast_flight_2_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 50 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: falcon_outdoor_night_camera_calib_1 + filetype: camera_calib + + - file: falcon_outdoor_night_camera_calib_2 + filetype: camera_calib + + - file: falcon_outdoor_night_penno_parking_1 + filetype: data + camera_calib_reference: falcon_outdoor_night_camera_calib_2 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_indoor_flight_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 50 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .1 + + - file: falcon_outdoor_night_penno_parking_2 + filetype: data + camera_calib_reference: falcon_outdoor_night_camera_calib_2 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_indoor_flight_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 50 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + + - file: falcon_outdoor_night_high_beams + filetype: data + camera_calib_reference: falcon_outdoor_night_camera_calib_2 + imu_calib_reference: falcon_imu_calib_1 + lidar_calib_reference: falcon_indoor_flight_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .1 + notes: > + This is a short sequence where the UAV flies in a parking lot, in front + of a car with the high beams enabled. This sequence showcases the advantages + of event cameras compared to traditional imagers. + + - file: spot_forest_camera_calib_1 + filetype: camera_calib + + - file: spot_forest_camera_calib_2 + filetype: camera_calib + + - file: spot_forest_camera_calib_3 + filetype: camera_calib + + - file: spot_forest_easy_1 + filetype: data + camera_calib_reference: spot_forest_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_forest_easy_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .6 + notes: > + An easy sequence of spot walking in the forest between the trees. + + - file: spot_forest_easy_2 + filetype: data + camera_calib_reference: spot_forest_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_forest_easy_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .2 + + - file: spot_forest_hard + filetype: data + camera_calib_reference: spot_forest_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_forest_easy_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .6 + + - file: spot_forest_road_1 + filetype: data + camera_calib_reference: spot_forest_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_forest_easy_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: 1.1 + notes: > + This is an easy sequence of spot in the forest, walking on a dirt road. + The robot returns to its original position. Similar to spot_forest_road_2 and + spot_forest_road_3. + + - file: spot_forest_road_2 + filetype: data + camera_calib_reference: spot_forest_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_forest_easy_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: True + internimage_semantics: False + check_pose_return: + absolute_position_error: 1.4 + notes: > + This is an easy sequence of spot in the forest, walking on a dirt road. + The robot returns close to its original position but not exactly + on the same spot (1.5m), so this sequence is not recommended + for VIO or loop closure. Similar to spoot_forest_road_1 and spot_forest_road_3. + + - file: spot_forest_road_3 + filetype: data + camera_calib_reference: spot_forest_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_forest_easy_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .75 + notes: > + This is an easy sequence of spot in the forest, walking on a dirt road. + The robot returns to its original position. Similar to spot_forest_road_1 and + spot_forest_road_2. + + - file: spot_indoor_camera_calib + filetype: camera_calib + + - file: spot_indoor_building_loop + filetype: data + camera_calib_reference: spot_indoor_camera_calib + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_indoor_stairs_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .1 + + - file: spot_indoor_obstacles + filetype: data + camera_calib_reference: spot_indoor_camera_calib + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_indoor_stairs_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .1 + + - file: spot_indoor_stairs + filetype: data + camera_calib_reference: spot_indoor_camera_calib + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_indoor_stairs_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .4 + + - file: spot_indoor_stairwell + filetype: data + camera_calib_reference: spot_indoor_camera_calib + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_indoor_stairs_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: False + notes: > + This is a complex sequence where spot is driven inside a stairwell, very + close to the walls. This pose cannot get verified, as spot does not + return to the starting pose. This sequence is very challenging for VIO + systems, as there are not many features on the stairwell steps. + + - file: spot_outdoor_day_camera_calib_1 + filetype: camera_calib + + - file: spot_outdoor_day_camera_calib_2 + filetype: camera_calib + + - file: spot_outdoor_day_camera_calib_3 + filetype: camera_calib + + - file: spot_outdoor_day_art_plaza_loop + filetype: data + camera_calib_reference: spot_outdoor_day_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_day_srt_green_loop + filetype: data + camera_calib_reference: spot_outdoor_day_camera_calib_1 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: 1.2 + notes: > + This is a short easy sequence of spot walking on a paved road and grass. + The robot returns to a position close to the starting point + but not exactly the same (1.2 m), so this sequence is not recommended + for VIO or loop closure. + + - file: spot_outdoor_day_skatepark_1 + filetype: data + camera_calib_reference: spot_outdoor_day_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_day_skatepark_2 + filetype: data + camera_calib_reference: spot_outdoor_day_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + faster_lio_config: short_range_ouster64 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_day_skatepark_3 + filetype: data + camera_calib_reference: spot_outdoor_day_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: True + internimage_semantics: True + check_pose_return: + absolute_position_error: .25 + + - file: spot_outdoor_day_rocky_steps + filetype: data + camera_calib_reference: spot_outdoor_day_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: False + + - file: spot_outdoor_day_srt_under_bridge_1 + filetype: data + camera_calib_reference: spot_outdoor_day_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_day_srt_under_bridge_2 + filetype: data + camera_calib_reference: spot_outdoor_day_camera_calib_3 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: short_range_ouster64 + depth_scan_aggregation: 4 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_day_penno_building_loop + filetype: data + camera_calib_reference: spot_indoor_camera_calib + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: True + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_day_penno_short_loop + filetype: data + camera_calib_reference: spot_indoor_camera_calib + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 400 + is_test_file: False + internimage_semantics: True + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_night_camera_calib_2 + filetype: camera_calib + + - file: spot_outdoor_night_camera_calib_1 + filetype: camera_calib + + - file: spot_outdoor_night_penno_building_loop + filetype: data + camera_calib_reference: spot_outdoor_night_camera_calib_2 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: True + internimage_semantics: False + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_night_penno_plaza_lights + filetype: data + camera_calib_reference: spot_outdoor_night_camera_calib_2 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .1 + + - file: spot_outdoor_night_penno_short_loop + filetype: data + camera_calib_reference: spot_outdoor_night_camera_calib_2 + imu_calib_reference: tower_imu_calib_1 + lidar_calib_reference: spot_outdoor_day_skatepark_1_T_c_l.npz + faster_lio_config: long_range_ouster64 + depth_scan_aggregation: 40 + is_test_file: False + internimage_semantics: False + check_pose_return: + absolute_position_error: .5 + + - file: tower_imu_calib_1 + filetype: imu_calib + camera_calib_reference: spot_outdoor_night_camera_calib_2 + + - file: tower_imu_calib_2 + filetype: imu_calib + camera_calib_reference: spot_outdoor_night_camera_calib_2 + + - file: falcon_imu_calib_1 + filetype: imu_calib + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + + - file: falcon_imu_calib_2 + filetype: imu_calib + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + + - file: falcon_imu_calib_3 + filetype: imu_calib + camera_calib_reference: falcon_outdoor_day_camera_calib_1 + + - file: tower_lidar_calib_icp_2 + filetype: lidar_calib + + - file: tower_lidar_calib_light_icp_1 + filetype: lidar_calib + + - file: tower_lidar_calib_light_icp_2 + filetype: lidar_calib + + - file: tower_lidar_calib_light_icp_3 + filetype: lidar_calib + + - file: tower_lidar_calib_light_matlab_big_1 + filetype: lidar_calib + + - file: tower_lidar_calib_light_matlab_big_2 + filetype: lidar_calib + + - file: tower_lidar_calib_light_matlab_small_1 + filetype: lidar_calib + + - file: tower_lidar_calib_light_matlab_small_2 + filetype: lidar_calib + + - file: tower_lidar_calib_light_moving_icp_1 + filetype: lidar_calib + + - file: tower_lidar_calib_matlab_large_1 + filetype: lidar_calib + + - file: tower_lidar_calib_matlab_large_2 + filetype: lidar_calib + + - file: falcon_lidar_calib_icp_1 + filetype: lidar_calib + + - file: falcon_lidar_calib_icp_2 + filetype: lidar_calib + + - file: falcon_lidar_calib_icp_3 + filetype: lidar_calib + + - file: falcon_lidar_calib_icp_move_1 + filetype: lidar_calib + + - file: falcon_lidar_calib_matlab_large_1 + filetype: lidar_calib + + - file: falcon_lidar_calib_matlab_large_2 + filetype: lidar_calib + + - file: falcon_lidar_calib_matlab_small_1 + filetype: lidar_calib + + - file: falcon_lidar_calib_matlab_small_2 + filetype: lidar_calib diff --git a/tools/download_m3ed.py b/tools/download_m3ed.py new file mode 100755 index 0000000..a77cb65 --- /dev/null +++ b/tools/download_m3ed.py @@ -0,0 +1,87 @@ +#!/usr/bin/env python3 +import yaml +import requests +import pdb +import os +import itertools + +BASE_URL = "https://m3ed-dist.s3.us-west-2.amazonaws.com" +DATASETS_LIST_URL = "https://raw.githubusercontent.com/daniilidis-group/m3ed/main/README.md" + +def download_with_progress(file, url, directory): + +if __name__ == "__main__": + import argparse + parser = argparse.ArgumentParser() + parser.add_argument("--vehicle", required=False, + help="Type of vehicle to download: car, falcon, spot") + parser.add_argument("--environment", required=False, + help="Type of environment to download: urban, indoor, forest, outdoor") + parser.add_argument("--data", required=False, + help="Data to download: processed h5, pcd, gt") + parser.add_argument("--yaml", required=False, + help="Path to dataset_lists.yaml. It will be downloaded from the repository if not provided") + args = parser.parse_args() + + # if yaml file is not provided, download it + if args.yaml is None: + print("Downloading dataset_lists.yaml from the repository") + with open("dataset_lists.yaml", "wb") as f: + f.write(requests.get(DATASETS_LIST_URL).content) + args.yaml = "dataset_lists.yaml" + + # load yaml file + with open(args.yaml, "r") as f: + dataset_list = yaml.safe_load(f) + + # Check arguments + if args.vehicle is None: + vehicles = ["car", "falcon", "spot"] + elif args.vehicle in ["car", "falcon", "spot"]: + vehicles = [args.vehicle] + else: + raise ValueError("Invalid vehicle type") + + if args.environment is None: + environments = ["urban", "indoor", "forest", "outdoor"] + elif args.environment in ["urban", "indoor", "forest", "outdoor"]: + environments = [args.environment] + else: + raise ValueError("Invalid environment type") + + if args.data is None: + data = ["h5", "pcd", "gt"] + elif args.data in ["h5", "pcd", "gt"]: + data = [args.data] + else: + raise ValueError("Invalid data type") + + print("Downloading data") + print("Vehicles: {}".format(vehicles)) + print("Environments: {}".format(environments)) + + + # Files without calib + lof = [] + combinations = list(itertools.product(vehicles, environments)) + for i in dataset_list: + filename = i["file"] + if "calib" in i["filetype"]: + continue + for vehicle, environment in combinations: + if vehicle in filename and environment in filename: + lof.append(filename) + + # Download all the files + for file in lof: + # Create directory if it does not exists + if not os.path.exists(file): + os.makedirs(file) + if "pcd" in data: + url = f"{BASE_URL}/processed/{file}/{file}.pcd" + print(f"Downloading {url}") + with open(f"{file}/{file}.pcd", "wb") as f: + f.write(requests.get(url).content) + if "h5" in data: + url = f"{BASE_URL}/processed/{file}/{file}.h5" +