diff --git a/CMakeLists.txt b/CMakeLists.txt
new file mode 100644
index 0000000..d5a6a35
--- /dev/null
+++ b/CMakeLists.txt
@@ -0,0 +1,80 @@
+cmake_minimum_required(VERSION 3.0.2)
+project(physics_atv_visual_mapping)
+
+## Compile as C++11, supported in ROS Kinetic and newer
+# add_compile_options(-std=c++11)
+
+## Find catkin macros and libraries
+## if COMPONENTS list like find_package(catkin REQUIRED COMPONENTS xyz)
+## is used, also find other catkin packages
+find_package(catkin REQUIRED COMPONENTS
+ geometry_msgs
+ nav_msgs
+ rospy
+ sensor_msgs
+ grid_map_msgs
+ std_msgs
+ message_generation
+)
+
+catkin_python_setup()
+
+
+## Generate added messages and services with any dependencies listed here
+# generate_messages(
+# DEPENDENCIES
+# std_msgs
+# grid_map_msgs
+# geometry_msgs
+# nav_msgs
+# # geometry_msgs# nav_msgs# sensor_msgs# std_msgs
+# )
+
+###################################
+## catkin specific configuration ##
+###################################
+## The catkin_package macro generates cmake config files for your package
+## Declare things to be passed to dependent projects
+## INCLUDE_DIRS: uncomment this if your package contains header files
+## LIBRARIES: libraries you create in this project that dependent projects also need
+## CATKIN_DEPENDS: catkin_packages dependent projects also need
+## DEPENDS: system dependencies of this project that dependent projects also need
+catkin_package(
+# INCLUDE_DIRS include
+# LIBRARIES physics_atv_visual_mapping
+ CATKIN_DEPENDS geometry_msgs nav_msgs rospy sensor_msgs grid_map_msgs std_msgs message_runtime
+# DEPENDS system_lib
+)
+
+###########
+## Build ##
+###########
+
+## Specify additional locations of header files
+## Your package locations should be listed before other locations
+include_directories(
+# include
+ ${catkin_INCLUDE_DIRS}
+)
+
+#############
+## Install ##
+#############
+
+# all install targets should use catkin DESTINATION variables
+# See http://ros.org/doc/api/catkin/html/adv_user_guide/variables.html
+
+## Mark executable scripts (Python etc.) for installation
+## in contrast to setup.py, you can choose the destination
+catkin_install_python(PROGRAMS
+ scripts/ros/voxel_localmapping.py
+ DESTINATION ${CATKIN_PACKAGE_BIN_DESTINATION}
+)
+
+## Mark other files for installation (e.g. launch and bag files, etc.)
+install(FILES
+ DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}
+)
+
+# install(DIRECTORY models/
+# DESTINATION ${CATKIN_PACKAGE_SHARE_DESTINATION}/models)
diff --git a/config/ros/matthew_warthog.yaml b/config/ros/matthew_warthog.yaml
deleted file mode 100644
index bf3ba37..0000000
--- a/config/ros/matthew_warthog.yaml
+++ /dev/null
@@ -1,139 +0,0 @@
-models_dir: /phoenix/cmu_sara_perception_models #necessary to include this for non-ROS offline proc
-# models_dir: /home/tartandriver/tartandriver_ws/models
-
-image:
- image_topic: sensors/multisense_front/aux/image_rect_color/camera_info
- camera_info_topic: sensors/multisense_front/aux/image_rect_color
- image_compressed: False
-
-pointcloud:
- topic: registered_scan
-
-odometry:
- topic: integrated_to_init
-
-gridmap:
- topic: local_gridmap
-
-#the base link of the vehicle (e.g. base_link, vehicle, etc.)
-vehicle_frame: vehicle
-
-#camera intrinsics
-intrinsics:
- K: [1064.3520, 0., 732.8348, 0., 1064.3520, 581.5662, 0., 0., 1]
- P: [1064.3520, 0., 732.8348, 0., 1064.3520, 581.5662, 0., 0., 1]
-
-#transform from the vehicle link to the camera link
-extrinsics:
- p: [0.353, 1.025, -0.627]
- q: [-0.509, 0.512, -0.492, -0.485] #xyzw
-
-image_processing:
- # -
- # type: radio
- # args:
- # radio_type: radio_v2.5-b # radio_v2 e-radio_v2
- # image_insize: [672, 496] # Original Image Size must be divisible by 16
-
- # -
- # type: pca
- # args:
- # fp: physics_atv_visual_mapping/data/dino_clusters/8_clusters_364x686_
-
- -
- type: dino
- args:
- dino_type: dinov2_vitb14
- dino_layer: 10
- image_insize: [686, 364] #this should be the size of the input images
- desc_facet: value
- -
- type: vlad
- args:
- n_clusters: 8
- cache_dir: /phoenix/src/perception/cmu_sara_perception/physics_atv_visual_mapping/data/dino_clusters/8_clusters_364x686_
-
-# voxel
-localmapping:
- mapper_type: voxel
- layer_key: dino # dino
- ema: 0.5 #higher->use recent more
- n_features: 8
- metadata: #if too slow, can reduce length/height or slightly decrease resolution
- origin: [-25., -25., -10.]
- length: [50., 50., 20.]
- resolution: [0.1999, 0.1999, 0.1999]
-
-
-terrain_estimation:
- -
- type: elevation_stats
- args: {}
-
- -
- type: elevation_filter
- args:
- input_layer: min_elevation
- cnt_layer: num_voxels
-
- height_low_thresh: -3.0 #cells this far below their neighbors are not terrain
- height_high_thresh: 0.5 #cells this far above their neighbors are not terrain
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 0.25 #kernel radius in m
- kernel_sharpness: 0.5 #sharpness of (Gaussian) kernel
-
- -
- type: terrain_inflation
- args:
- input_layer: min_elevation_filtered
- mask_layer: min_elevation_filtered_mask
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 3.0 #kernel radius in m
- kernel_sharpness: 1.0 #sharpness of (Gaussian) kernel
-
- -
- type: mrf_terrain_estimation
- args:
- input_layer: min_elevation_filtered_inflated
- mask_layer: min_elevation_filtered_inflated_mask
-
- itrs: 5 #num updates
- alpha: 1. #weight on the measurement update
- beta: 10. #weight on the neighbor update
- lr: 0.05 #learning rate
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 3.0
- kernel_sharpness: 1.0
-
- -
- type: slope
- args:
- input_layer: terrain
- mask_layer: min_elevation_filtered_inflated_mask
-
- -
- type: terrain_diff
- args:
- terrain_layer: terrain
- max_elevation_layer: max_elevation
- max_elevation_mask_layer: num_voxels
- overhang: 2.0 #done to match irl yamaha values
- -
- type: terrain_aware_bev_feature_splat
- args:
- output_key: dino
- terrain_layer: terrain
- terrain_mask_layer: min_elevation_filtered_inflated_mask
- overhang: 1.0
-
-device: cuda
-viz: False
diff --git a/config/ros/rzr_voxel_mapping.yaml b/config/ros/rzr_voxel_mapping.yaml
index c7bfb1c..32b01c0 100644
--- a/config/ros/rzr_voxel_mapping.yaml
+++ b/config/ros/rzr_voxel_mapping.yaml
@@ -1,11 +1,12 @@
-models_dir: /home/tartandriver/tartandriver_ws/models #necessary to include this for non-ROS offline proc
+models_dir: /models #necessary to include this for non-ROS offline proc
images:
image_left:
- image_topic: /multisense/left/image_rect_color
- camera_info_topic: /multisense/left/camera_info
+ image_topic: /crl_rzr/multisense_left/aux/image_rect_color
+ camera_info_topic: /crl_rzr/multisense_left/aux/camera_info
image_compressed: False
- folder: image_left_color
+ folder: image_left
+ mask: /catkin_ws/src/vfm_voxel_mapping/visual_mapping_noetic/data/masks/rzr/left.png
#camera intrinsics
intrinsics:
@@ -18,10 +19,11 @@ images:
q: [0.805, -0.212, 0.150, 0.534] #xyzw
image_front:
- image_topic: /multisense/left/image_rect_color
- camera_info_topic: /multisense/left/camera_info
+ image_topic: /crl_rzr/multisense_front/aux/image_rect_color
+ camera_info_topic: /crl_rzr/multisense_front/aux/camera_info
image_compressed: False
- folder: image_front_color
+ folder: image_front
+ mask: /catkin_ws/src/vfm_voxel_mapping/visual_mapping_noetic/data/masks/rzr/front.png
#camera intrinsics
intrinsics:
@@ -34,10 +36,11 @@ images:
q: [0.597, -0.593, 0.384, 0.380] #xyzw
image_right:
- image_topic: /multisense/left/image_rect_color
- camera_info_topic: /multisense/left/camera_info
+ image_topic: /crl_rzr/multisense_right/aux/image_rect_color
+ camera_info_topic: /crl_rzr/multisense_right/aux/camera_info
image_compressed: False
- folder: image_right_color
+ folder: image_right
+ mask: /catkin_ws/src/vfm_voxel_mapping/visual_mapping_noetic/data/masks/rzr/right.png
#camera intrinsics
intrinsics:
@@ -50,33 +53,35 @@ images:
q: [-0.217, 0.812, -0.523, -0.144] #xyzw
pointcloud:
- topic: /superodometry/velodyne_cloud_registered
- folder: merged_points_base
-
-odometry:
- topic: /superodometry/integrated_to_init
- folder: odom
+ topic: /crl_rzr/velodyne_merged_points
+ folder: pointcloud_in_vehicle
#the base link of the vehicle (e.g. base_link, vehicle, etc.)
vehicle_frame: crl_rzr/base_link
+#the fixed frame to map in
+mapping_frame: crl_rzr/map
+
image_processing:
-
type: dino
args:
- dino_type: dinov2_vitb14_reg
- dino_layers: [10]
- image_insize: [854, 448]
+ # dino_type: dinov2_vitb14_reg
+ # dino_layers: [10]
+ # image_insize: [560, 448]
+
+ dino_type: radio_v2.5-b
+ dino_layers: [11]
+ image_insize: [640, 512]
+ # stride: 4
desc_facet: value
-
type: pca
args:
- # fp: physics_atv_visual_mapping/pca/radio_v2.5-b_64_renegade_ridge.pt
- # fp: physics_atv_visual_mapping/pca/radio_v2.5b_64_gascola_20250124.pt
- fp: physics_atv_visual_mapping/pca/vitb_reg_64_gascola_veg.pt
- # fp: physics_atv_visual_mapping/pca/dino_vitl_multi_64_gascola_veg.pt
+ # fp: physics_atv_visual_mapping/pca/vitb_reg_64_gascola_veg.pt
+ fp: physics_atv_visual_mapping/pca/radio_64_trabuco_racer.pt
# voxel
localmapping:
@@ -90,7 +95,7 @@ localmapping:
# resolution: [0.8, 0.8, 0.8]
origin: [-50., -50., -10.]
length: [100., 100., 20.]
- resolution: [0.25, 0.25, 0.25]
+ resolution: [0.2, 0.2, 0.1]
# origin: [-30., -30., -15.]
# length: [60., 60., 30.]
# resolution: [0.2, 0.2, 0.2]
@@ -197,5 +202,6 @@ terrain_estimation:
terrain_mask_layer: min_elevation_filtered_inflated_mask
overhang: 2.0
+rate: 0.2
device: cuda
viz: True
\ No newline at end of file
diff --git a/config/ros/warthog.yaml b/config/ros/warthog.yaml
deleted file mode 100644
index dbce21d..0000000
--- a/config/ros/warthog.yaml
+++ /dev/null
@@ -1,125 +0,0 @@
-models_dir: /phoenix/cmu_sara_perception_models #necessary to include this for non-ROS offline proc
-# models_dir: /home/tartandriver/tartandriver_ws/models
-
-image:
- image_topic: sensors/multisense_front/aux/image_rect_color/camera_info
- camera_info_topic: sensors/multisense_front/aux/image_rect_color
- image_compressed: False
-
-pointcloud:
- topic: registered_scan
-
-odometry:
- topic: integrated_to_init
-
-gridmap:
- topic: local_gridmap
-
-#the base link of the vehicle (e.g. base_link, vehicle, etc.)
-vehicle_frame: vehicle
-
-#camera intrinsics
-intrinsics:
- K: [1064.3520, 0., 732.8348, 0., 1064.3520, 581.5662, 0., 0., 1]
- P: [1064.3520, 0., 732.8348, 0., 1064.3520, 581.5662, 0., 0., 1]
-
-#transform from the vehicle link to the camera link
-extrinsics:
- p: [0.353, 1.025, -0.627]
- q: [-0.509, 0.512, -0.492, -0.485] #xyzw
-
-image_processing:
- -
- type: radio
- args:
- radio_type: radio_v2.5-b # radio_v2 e-radio_v2
- image_insize: [672, 496] # Original Image Size must be divisible by 16
-
- -
- type: pca
- args:
- fp: physics_atv_visual_mapping/pca/radio_v2.5b_64_gascola_20250124.pt
-
-# voxel
-localmapping:
- mapper_type: voxel
- layer_key: dino
- ema: 0.5 #higher->use recent more
- n_features: 16
- metadata:
- origin: [-25., -25., -15.]
- length: [50.001, 50.001, 30.001]
- resolution: [0.2, 0.2, 0.2]
-
-terrain_estimation:
- -
- type: elevation_stats
- args: {}
-
- -
- type: elevation_filter
- args:
- input_layer: min_elevation
- cnt_layer: num_voxels
-
- height_low_thresh: -3.0 #cells this far below their neighbors are not terrain
- height_high_thresh: 0.5 #cells this far above their neighbors are not terrain
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 0.25 #kernel radius in m
- kernel_sharpness: 0.5 #sharpness of (Gaussian) kernel
-
- -
- type: terrain_inflation
- args:
- input_layer: min_elevation_filtered
- mask_layer: min_elevation_filtered_mask
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 3.0 #kernel radius in m
- kernel_sharpness: 1.0 #sharpness of (Gaussian) kernel
-
- -
- type: mrf_terrain_estimation
- args:
- input_layer: min_elevation_filtered_inflated
- mask_layer: min_elevation_filtered_inflated_mask
-
- itrs: 5 #num updates
- alpha: 1. #weight on the measurement update
- beta: 10. #weight on the neighbor update
- lr: 0.05 #learning rate
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 0.4
- kernel_sharpness: 1.0
-
- -
- type: slope
- args:
- input_layer: terrain
- mask_layer: min_elevation_filtered_inflated_mask
-
- -
- type: terrain_diff
- args:
- terrain_layer: terrain
- max_elevation_layer: max_elevation
- max_elevation_mask_layer: num_voxels
- overhang: 2.0 #done to match irl yamaha values
- -
- type: terrain_aware_bev_feature_splat
- args:
- output_key: dino
- terrain_layer: terrain
- terrain_mask_layer: min_elevation_filtered_inflated_mask
- overhang: 1.0
-
-device: cuda
-viz: False
diff --git a/config/ros/yamaha_thermal_voxel_mapping.yaml b/config/ros/yamaha_thermal_voxel_mapping.yaml
deleted file mode 100644
index 13cb5e6..0000000
--- a/config/ros/yamaha_thermal_voxel_mapping.yaml
+++ /dev/null
@@ -1,180 +0,0 @@
-models_dir: /home/tartandriver/tartandriver_ws/models #necessary to include this for non-ROS offline proc
-
-image:
- image_topic: /multisense/left/image_rect_color
- camera_info_topic: /multisense/left/camera_info
- image_compressed: False
- folder: image_left_color
-
-pointcloud:
- topic: /superodometry/velodyne_cloud_registered
- folder: super_odometry_pc
-
-odometry:
- topic: /superodometry/integrated_to_init
- folder: odom
-
-gridmap:
- topic: local_gridmap
- folder: local_gridmap
-
-#the base link of the vehicle (e.g. base_link, vehicle, etc.)
-vehicle_frame: vehicle
-
-#camera intrinsics
-intrinsics:
- K: [455.7750, 0., 497.1180, 0., 456.3191, 251.8580, 0., 0., 1.]
- P: [455.7750, 0., 497.1180, 0., 456.3191, 251.8580, 0., 0., 1.]
-
-#transform from the vehicle link to the camera link
-extrinsics:
- p: [0.17265, -0.15227, 0.05708]
- q: [0.55940, -0.54718, 0.44603, 0.43442]
-
-image_processing:
- -
- type: dino
- args:
- dino_type: dinov2_vitb14_reg
- dino_layer: 10
- image_insize: [644, 518] #this should be the size of the input images
- # image_insize: [854, 448]
- desc_facet: value
- # -
- # type: pca
- # args:
- # fp: physics_atv_visual_mapping/pca/pca_vitb10_value.pt
-
- # -
- # type: radio
- # args:
- # radio_type: radio_v2.5-b # radio_v2 e-radio_v2
- # image_insize: [672, 352] # Original Image Size must be divisible by 16
- # # image_insize: [1344, 704]
-
- -
- type: pca
- args:
- # fp: physics_atv_visual_mapping/pca/radio_v2.5-b_64_renegade_ridge.pt
- # fp: physics_atv_visual_mapping/pca/radio_v2.5b_64_gascola_20250124.pt
- fp: physics_atv_visual_mapping/pca/vitb_reg_64_gascola_veg.pt
-
-# voxel
-localmapping:
- mapper_type: voxel
- layer_key: dino
- ema: 0.5 #higher->use recent more
- n_features: 16
- metadata:
- # origin: [-100., -100., -30.]
- # length: [200., 200., 60.]
- # resolution: [0.8, 0.8, 0.8]
- origin: [-50., -50., -10.]
- length: [100., 100., 20.]
- resolution: [0.4, 0.4, 0.1]
- # origin: [-30., -30., -15.]
- # length: [60., 60., 30.]
- # resolution: [0.2, 0.2, 0.2]
-
- raytracer:
- type: frustum
- sensor:
- type: VLP32C-front
-
- # type: VLP32C
-
- # type: generic
-
- # n_el: 72 #number of bins
- # el_range: [-25., 15.] #min/max angle
- # el_thresh: default #optional arg to filter based on remainder
-
- # n_az: 900
- # az_range: [-180, 180.]
- # az_thresh: default
-
-terrain_estimation:
- -
- type: elevation_stats
- args: {}
-
- -
- type: porosity
- args: {}
-
- -
- type: elevation_filter
- args:
- input_layer: min_elevation
- cnt_layer: num_voxels
-
- height_low_thresh: -3.0 #cells this far below their neighbors are not terrain
- height_high_thresh: 0.5 #cells this far above their neighbors are not terrain
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 2. #kernel radius in m
- kernel_sharpness: 5. #sharpness of (Gaussian) kernel
-
- # kernel_radius: 0.25 #kernel radius in m
- # kernel_sharpness: 0.5 #sharpness of (Gaussian) kernel
-
- -
- type: terrain_inflation
- args:
- input_layer: min_elevation_filtered
- mask_layer: min_elevation_filtered_mask
-
- thresh: 0.05 #at least this frac of neighboring cells in the kernel must be observed
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 2. #kernel radius in m
- kernel_sharpness: 1. #sharpness of (Gaussian) kernel
-
- # kernel_radius: 0.5 #kernel radius in m
- # kernel_sharpness: 1.0 #sharpness of (Gaussian) kernel
-
- -
- type: mrf_terrain_estimation
- args:
- input_layer: min_elevation_filtered_inflated
- mask_layer: min_elevation_filtered_inflated_mask
-
- itrs: 5 #num updates
- alpha: 1. #weight on the measurement update
- beta: 10. #weight on the neighbor update
- lr: 0.05 #learning rate
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 2. #kernel radius in m
- kernel_sharpness: 1. #sharpness of (Gaussian) kernel
-
- # kernel_radius: 0.5
- # kernel_sharpness: 1.0
-
- -
- type: slope
- args:
- input_layer: terrain
- mask_layer: min_elevation_filtered_inflated_mask
-
- -
- type: terrain_diff
- args:
- terrain_layer: terrain
- overhang: 1.7
- -
- type: terrain_aware_bev_feature_splat
- args:
- output_key: dino
- terrain_layer: terrain
- terrain_mask_layer: min_elevation_filtered_inflated_mask
- overhang: 1.7
-
-device: cuda
-viz: True
diff --git a/config/ros/yamaha_voxel_mapping.yaml b/config/ros/yamaha_voxel_mapping.yaml
deleted file mode 100644
index 73e9876..0000000
--- a/config/ros/yamaha_voxel_mapping.yaml
+++ /dev/null
@@ -1,189 +0,0 @@
-models_dir: /home/tartandriver/tartandriver_ws/models #necessary to include this for non-ROS offline proc
-
-image:
- image_topic: /multisense/left/image_rect_color
- camera_info_topic: /multisense/left/camera_info
- image_compressed: False
- folder: image_left_color
-
-pointcloud:
- topic: /superodometry/velodyne_cloud_registered
- folder: merged_points_base
-
-odometry:
- topic: /superodometry/integrated_to_init
- folder: odom
-
-gridmap:
- topic: local_gridmap
- folder: local_gridmap
-
-#the base link of the vehicle (e.g. base_link, vehicle, etc.)
-vehicle_frame: vehicle
-
-#camera intrinsics
-intrinsics:
- K: [600., 0., 480., 0., 600., 300., 0., 0., 1.]
- P: [600., 0., 480., 0., 600., 300., 0., 0., 1.]
-
-#transform from the vehicle link to the camera link
-extrinsics:
- p: [-1.789, 1.966, -0.788]
- q: [0.805, -0.212, 0.150, 0.534] #xyzw
-
-image_processing:
- -
- type: dino
- args:
- # dino_type: radio_v2.5-l
- # dino_layers: [6, 11, 16, 21]
- # image_insize: [848, 448]
-
- # dino_type: dinov2_vitl14_reg
- # dino_layers: [5, 10, 15, 20]
- dino_type: dinov2_vitb14_reg
- dino_layers: [10]
- image_insize: [854, 448]
-
- desc_facet: value
- # -
- # type: pca
- # args:
- # fp: physics_atv_visual_mapping/pca/pca_vitb10_value.pt
-
- # -
- # type: radio
- # args:
- # radio_type: radio_v2.5-l # radio_v2 e-radio_v2
- # image_insize: [672, 352] # Original Image Size must be divisible by 16
- # # image_insize: [1344, 704]
-
- -
- type: pca
- args:
- # fp: physics_atv_visual_mapping/pca/radio_v2.5-b_64_renegade_ridge.pt
- # fp: physics_atv_visual_mapping/pca/radio_v2.5b_64_gascola_20250124.pt
- fp: physics_atv_visual_mapping/pca/vitb_reg_64_gascola_veg.pt
- # fp: physics_atv_visual_mapping/pca/dino_vitl_multi_64_gascola_veg.pt
-
-# voxel
-localmapping:
- mapper_type: voxel
- layer_key: dino
- ema: 0.5 #higher->use recent more
- n_features: 16
- metadata:
- # origin: [-100., -100., -30.]
- # length: [200., 200., 60.]
- # resolution: [0.8, 0.8, 0.8]
- origin: [-50., -50., -10.]
- length: [100., 100., 20.]
- resolution: [0.4, 0.4, 0.1]
- # origin: [-30., -30., -15.]
- # length: [60., 60., 30.]
- # resolution: [0.2, 0.2, 0.2]
-
- raytracer:
- type: frustum
- sensor:
- type: VLP32C-front
-
- # type: VLP32C
-
- # type: generic
-
- # n_el: 72 #number of bins
- # el_range: [-25., 15.] #min/max angle
- # el_thresh: default #optional arg to filter based on remainder
-
- # n_az: 900
- # az_range: [-180, 180.]
- # az_thresh: default
-
-terrain_estimation:
- -
- type: elevation_stats
- args: {}
-
- -
- type: porosity
- args: {}
-
- -
- type: elevation_filter
- args:
- input_layer: min_elevation
- cnt_layer: num_voxels
-
- height_low_thresh: -3.0 #cells this far below their neighbors are not terrain
- height_high_thresh: 0.5 #cells this far above their neighbors are not terrain
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 2. #kernel radius in m
- kernel_sharpness: 5. #sharpness of (Gaussian) kernel
-
- # kernel_radius: 0.25 #kernel radius in m
- # kernel_sharpness: 0.5 #sharpness of (Gaussian) kernel
-
- -
- type: terrain_inflation
- args:
- input_layer: min_elevation_filtered
- mask_layer: min_elevation_filtered_mask
-
- thresh: 0.05 #at least this frac of neighboring cells in the kernel must be observed
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 1. #kernel radius in m
- kernel_sharpness: 2. #sharpness of (Gaussian) kernel
-
- # kernel_radius: 0.5 #kernel radius in m
- # kernel_sharpness: 1.0 #sharpness of (Gaussian) kernel
-
- -
- type: mrf_terrain_estimation
- args:
- input_layer: min_elevation_filtered_inflated
- mask_layer: min_elevation_filtered_inflated_mask
-
- itrs: 5 #num updates
- alpha: 10. #weight on the measurement update
- beta: 1. #weight on the neighbor update
- lr: 0.05 #learning rate
-
- kernel_params:
- kernel_type: gaussian #one of {gaussian/box} the kernel type to use for inflation
-
- kernel_radius: 1. #kernel radius in m
- kernel_sharpness: 2. #sharpness of (Gaussian) kernel
-
- # kernel_radius: 0.5
- # kernel_sharpness: 1.0
-
- -
- type: slope
- args:
- input_layer: terrain
- mask_layer: min_elevation_filtered_inflated_mask
- radius: 1.0
- max_slope: 2.0
-
- -
- type: terrain_diff
- args:
- terrain_layer: terrain
- overhang: 2.0
- -
- type: terrain_aware_bev_feature_splat
- args:
- output_key: dino
- terrain_layer: terrain
- terrain_mask_layer: min_elevation_filtered_inflated_mask
- overhang: 2.0
-
-device: cuda
-viz: True
diff --git a/data/masks/rzr/front.png b/data/masks/rzr/front.png
new file mode 100644
index 0000000..92441d4
Binary files /dev/null and b/data/masks/rzr/front.png differ
diff --git a/data/masks/rzr/left.png b/data/masks/rzr/left.png
new file mode 100644
index 0000000..6704f29
Binary files /dev/null and b/data/masks/rzr/left.png differ
diff --git a/data/masks/rzr/right.png b/data/masks/rzr/right.png
new file mode 100644
index 0000000..f0f6de8
Binary files /dev/null and b/data/masks/rzr/right.png differ
diff --git a/launch/dino_localmapping.launch.py b/launch/dino_localmapping.launch.py
deleted file mode 100755
index 15e8154..0000000
--- a/launch/dino_localmapping.launch.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import yaml
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument
-from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
- # Declare the argument for the config file
- visual_mapping_config = LaunchConfiguration("visual_mapping_config")
-
- # Use PathJoinSubstitution to dynamically join the package path and the config file
- config_fp = PathJoinSubstitution(
- [
- FindPackageShare("physics_atv_visual_mapping"),
- "config",
- "ros",
- visual_mapping_config,
- ]
- )
-
- nodes = [
- # Declare the use_sim_time argument
- DeclareLaunchArgument(
- "use_sim_time",
- default_value="true",
- description="Use simulation (Gazebo) clock if true",
- ),
- # Declare the launch argument with a default value
- DeclareLaunchArgument(
- "visual_mapping_config",
- default_value="sam_pca.yaml",
- description="Config file for visual mapping",
- ),
- # Node definition
- Node(
- package="physics_atv_visual_mapping",
- executable="dino_localmapping",
- name="visual_localmapping",
- output="screen",
- parameters=[
- {"config_fp": config_fp},
- {"use_sim_time": LaunchConfiguration("use_sim_time")},
- {"models_dir": LaunchConfiguration("models_dir")},
- ],
- ),
- ]
-
- return LaunchDescription(nodes)
diff --git a/launch/vfm_voxel_mapping.launch b/launch/vfm_voxel_mapping.launch
new file mode 100644
index 0000000..c0963c2
--- /dev/null
+++ b/launch/vfm_voxel_mapping.launch
@@ -0,0 +1,10 @@
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/launch/voxel_localmapping.launch.py b/launch/voxel_localmapping.launch.py
deleted file mode 100755
index 143e56c..0000000
--- a/launch/voxel_localmapping.launch.py
+++ /dev/null
@@ -1,51 +0,0 @@
-import yaml
-
-from launch import LaunchDescription
-from launch.actions import DeclareLaunchArgument
-from launch.substitutions import LaunchConfiguration, PathJoinSubstitution
-from launch_ros.actions import Node
-from launch_ros.substitutions import FindPackageShare
-
-
-def generate_launch_description():
- # Declare the argument for the config file
- visual_mapping_config = LaunchConfiguration("visual_mapping_config")
-
- # Use PathJoinSubstitution to dynamically join the package path and the config file
- config_fp = PathJoinSubstitution(
- [
- FindPackageShare("physics_atv_visual_mapping"),
- "config",
- "ros",
- visual_mapping_config,
- ]
- )
-
- nodes = [
- # Declare the use_sim_time argument
- DeclareLaunchArgument(
- "use_sim_time",
- default_value="true",
- description="Use simulation (Gazebo) clock if true",
- ),
- # Declare the launch argument with a default value
- DeclareLaunchArgument(
- "visual_mapping_config",
- default_value="sam_pca.yaml",
- description="Config file for visual mapping",
- ),
- # Node definition
- Node(
- package="physics_atv_visual_mapping",
- executable="voxel_localmapping",
- name="visual_localmapping",
- output="screen",
- parameters=[
- {"config_fp": config_fp},
- {"use_sim_time": LaunchConfiguration("use_sim_time")},
- {"models_dir": LaunchConfiguration("models_dir")},
- ],
- ),
- ]
-
- return LaunchDescription(nodes)
diff --git a/launch/wheelie_playback.launch.py b/launch/wheelie_playback.launch.py
deleted file mode 100644
index 116d458..0000000
--- a/launch/wheelie_playback.launch.py
+++ /dev/null
@@ -1,44 +0,0 @@
-from launch import LaunchDescription
-from launch_ros.actions import Node
-from launch.actions import DeclareLaunchArgument
-from launch.substitutions import LaunchConfiguration
-
-
-def generate_launch_description():
- return LaunchDescription(
- [
- # Declare the use_sim_time argument
- DeclareLaunchArgument(
- "use_sim_time",
- default_value="true",
- description="Use simulation (Gazebo) clock if true",
- ),
- # Static Transform Publisher node
- Node(
- package="tf2_ros",
- executable="static_transform_publisher",
- name="static_transform_publisher",
- output="screen",
- arguments=[
- "0",
- "0",
- "0",
- "0",
- "0",
- "0",
- "1",
- "base_link",
- "zedx_left_camera_frame",
- ],
- parameters=[{"use_sim_time": LaunchConfiguration("use_sim_time")}],
- ),
- # RViz node
- Node(
- package="rviz2",
- executable="rviz2",
- name="rviz2",
- output="screen",
- parameters=[{"use_sim_time": LaunchConfiguration("use_sim_time")}],
- ),
- ]
- )
diff --git a/package.xml b/package.xml
index 4d95019..f19d950 100644
--- a/package.xml
+++ b/package.xml
@@ -1,20 +1,81 @@
-
-
+
physics_atv_visual_mapping
0.0.0
- TODO: Package description
- cherieh
- TODO: License declaration
+ The physics_atv_visual_mapping package
- rclpy
+
+
+
+ yamaha
- ament_copyright
- ament_flake8
- ament_pep257
- python3-pytest
+
+
+
+ TODO
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ catkin
+ geometry_msgs
+ nav_msgs
+ grid_map_msgs
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ message_generation
+ geometry_msgs
+ nav_msgs
+ grid_map_msgs
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ geometry_msgs
+ nav_msgs
+ grid_map_msgs
+ roscpp
+ rospy
+ sensor_msgs
+ std_msgs
+ message_runtime
+
+
- ament_python
+
+
-
+
\ No newline at end of file
diff --git a/physics_atv_visual_mapping/dino_localmapping.py b/physics_atv_visual_mapping/dino_localmapping.py
deleted file mode 100755
index c3fe6bf..0000000
--- a/physics_atv_visual_mapping/dino_localmapping.py
+++ /dev/null
@@ -1,757 +0,0 @@
-import rclpy
-from rclpy.node import Node
-import yaml
-import copy
-import numpy as np
-
-np.float = np.float64 # hack for numpify
-
-import tf2_ros
-import torch
-import cv_bridge
-import os
-
-from std_msgs.msg import Float32, Float32MultiArray, MultiArrayDimension
-from sensor_msgs.msg import PointCloud2, Image, CompressedImage
-from nav_msgs.msg import Odometry
-from grid_map_msgs.msg import GridMap
-from perception_interfaces.msg import FeatureVoxelGrid
-
-from physics_atv_visual_mapping.image_processing.image_pipeline import (
- setup_image_pipeline,
-)
-from physics_atv_visual_mapping.pointcloud_colorization.torch_color_pcl_utils import *
-
-from physics_atv_visual_mapping.localmapping.bev.bev_localmapper import BEVLocalMapper
-from physics_atv_visual_mapping.localmapping.voxel.voxel_localmapper import (
- VoxelLocalMapper,
-)
-from physics_atv_visual_mapping.localmapping.metadata import LocalMapperMetadata
-
-from physics_atv_visual_mapping.utils import *
-
-
-class DinoMappingNode(Node):
- def __init__(self):
- super().__init__("visual_mapping")
-
- self.declare_parameter("config_fp", "")
- self.declare_parameter("models_dir", "")
-
- self.get_logger().info(
- "Looking for models in {}".format(
- self.get_parameter("models_dir").get_parameter_value().string_value
- )
- )
-
- config_fp = self.get_parameter("config_fp").get_parameter_value().string_value
- config = yaml.safe_load(open(config_fp, "r"))
- config["models_dir"] = (
- self.get_parameter("models_dir").get_parameter_value().string_value
- )
-
- self.vehicle_frame = config["vehicle_frame"]
-
- self.localmap = None
- self.pcl_msg = None
- self.odom_msg = None
- self.img_msg = None
- self.odom_frame = None
-
- self.device = config["device"]
- self.base_metadata = config["localmapping"]["metadata"]
- self.localmap_ema = config["localmapping"]["ema"]
- self.layer_key = (
- config["localmapping"]["layer_key"]
- if "layer_key" in config["localmapping"].keys()
- else None
- )
- self.layer_keys = (
- self.make_layer_keys(config["localmapping"]["layer_keys"])
- if "layer_keys" in config["localmapping"].keys()
- else None
- )
- self.last_update_time = 0.0
-
- self.image_pipeline = setup_image_pipeline(config)
-
- self.setup_localmapper(config)
-
- self.tf_buffer = tf2_ros.Buffer()
- self.tf_listener = tf2_ros.TransformListener(self.tf_buffer, self)
-
- self.bridge = cv_bridge.CvBridge()
-
- self.compressed_img = config["image"]["image_compressed"]
- if self.compressed_img:
- self.image_sub = self.create_subscription(
- CompressedImage, config["image"]["image_topic"], self.handle_img, 1
- )
- else:
- self.image_sub = self.create_subscription(
- Image, config["image"]["image_topic"], self.handle_img, 1
- )
-
- self.intrinsics = (
- torch.tensor(config["intrinsics"]["P"], device=config["device"])
- .reshape(3, 3)
- .float()
- )
- self.dino_intrinsics = None
-
- self.extrinsics = pose_to_htm(
- np.concatenate(
- [
- np.array(config["extrinsics"]["p"]),
- np.array(config["extrinsics"]["q"]),
- ],
- axis=-1,
- )
- )
-
- self.pcl_sub = self.create_subscription(
- PointCloud2, config["pointcloud"]["topic"], self.handle_pointcloud, 10
- )
- self.odom_sub = self.create_subscription(
- Odometry, config["odometry"]["topic"], self.handle_odom, 10
- )
-
- self.pcl_pub = self.create_publisher(PointCloud2, "/dino_pcl", 10)
- self.image_pub = self.create_publisher(Image, "/dino_image", 10)
-
- if self.mapper_type == "bev":
- self.gridmap_pub = self.create_publisher(GridMap, "/dino_gridmap", 10)
- else:
- self.voxel_pub = self.create_publisher(FeatureVoxelGrid, "/dino_voxels", 10)
- self.voxel_viz_pub = self.create_publisher(
- PointCloud2, "/dino_voxels_viz", 1
- )
-
- self.timing_pub = self.create_publisher(Float32, "/dino_proc_time", 10)
-
- self.timer = self.create_timer(0.2, self.spin)
- self.viz = config["viz"]
-
- def setup_localmapper(self, config):
- """
- check that the localmapper metadata is good
- """
- self.mapper_type = config["localmapping"]["mapper_type"]
- metadata = LocalMapperMetadata(**self.base_metadata)
-
- assert self.mapper_type in [
- "bev",
- "voxel",
- ], "need mapper type to be either ['bev', 'voxel']"
- if self.mapper_type == "bev":
- assert metadata.ndims == 2, "need 2d metadata for BEV mapping"
- assert (
- metadata.resolution[0] - metadata.resolution[1]
- ).abs() < 1e-4, "need x,y resolution to match for ROS node"
-
- self.localmapper = BEVLocalMapper(
- metadata,
- n_features=config["localmapping"]["n_features"],
- ema=config["localmapping"]["ema"],
- device=config["device"],
- )
- elif self.mapper_type == "voxel":
- assert metadata.ndims == 3, "need 3d metadata for voxel mapping"
- self.localmapper = VoxelLocalMapper(
- metadata,
- n_features=config["localmapping"]["n_features"],
- ema=config["localmapping"]["ema"],
- device=config["device"],
- )
- else:
- self.get_logger().error("unsupported mapping type!")
- exit(1)
-
- def make_layer_keys(self, layer_keys):
- out = []
- for lk in layer_keys:
- for i in range(lk["n"]):
- out.append("{}_{}".format(lk["key"], i))
- return out
-
- def handle_pointcloud(self, msg):
- self.pcl_msg = msg
-
- def handle_odom(self, msg):
- if self.odom_frame is None:
- self.odom_frame = msg.header.frame_id
- self.odom_msg = msg
-
- def handle_img(self, msg):
- self.img_msg = msg
-
- def preprocess_inputs(self):
- if self.pcl_msg is None:
- self.get_logger().warn("no pcl msg received")
- return None
-
- pcl_time = (
- self.pcl_msg.header.stamp.sec + self.pcl_msg.header.stamp.nanosec * 1e-9
- )
- if abs(pcl_time - self.last_update_time) < 1e-3:
- return None
-
- if self.odom_msg is None:
- self.get_logger().warn("no odom msg received")
- return None
-
- if self.img_msg is None:
- self.get_logger().warn("no img msg received")
- return None
-
- if not self.tf_buffer.can_transform(
- self.vehicle_frame,
- self.pcl_msg.header.frame_id,
- rclpy.time.Time.from_msg(self.pcl_msg.header.stamp),
- ):
- self.get_logger().warn(
- "cant tf from {} to {} at {}".format(
- self.vehicle_frame,
- self.pcl_msg.header.frame_id,
- self.pcl_msg.header.stamp,
- )
- )
- return None
-
- tf_vehicle_to_pcl_msg = self.tf_buffer.lookup_transform(
- self.vehicle_frame,
- self.pcl_msg.header.frame_id,
- rclpy.time.Time.from_msg(self.pcl_msg.header.stamp),
- )
-
- if not self.tf_buffer.can_transform(
- self.odom_frame,
- self.pcl_msg.header.frame_id,
- rclpy.time.Time.from_msg(self.pcl_msg.header.stamp),
- ):
- self.get_logger().warn(
- "cant tf from {} to {} at {}".format(
- self.odom_frame,
- self.pcl_msg.header.frame_id,
- self.pcl_msg.header.stamp,
- )
- )
- return None
-
- tf_odom_to_pcl_msg = self.tf_buffer.lookup_transform(
- self.odom_frame,
- self.pcl_msg.header.frame_id,
- rclpy.time.Time.from_msg(self.pcl_msg.header.stamp),
- )
-
- pcl = pcl_msg_to_xyz(self.pcl_msg).to(self.device)
-
- vehicle_to_pcl_htm = tf_msg_to_htm(tf_vehicle_to_pcl_msg).to(self.device)
- pcl_in_vehicle = transform_points(pcl.clone(), vehicle_to_pcl_htm)
-
- odom_to_pcl_htm = tf_msg_to_htm(tf_odom_to_pcl_msg).to(self.device)
- pcl_in_odom = transform_points(pcl.clone(), odom_to_pcl_htm)
-
- self.last_update_time = pcl_time
-
- # Camera processing
- if self.compressed_img:
- img = (
- self.bridge.compressed_imgmsg_to_cv2(
- self.img_msg, desired_encoding="rgb8"
- )
- / 255.0
- )
- else:
- img = (
- self.bridge.imgmsg_to_cv2(self.img_msg, desired_encoding="rgb8").astype(
- np.float32
- )
- / 255.0
- )
-
- img = torch.tensor(img).unsqueeze(0).permute(0, 3, 1, 2)
-
- dino_img, dino_intrinsics = self.image_pipeline.run(
- img, self.intrinsics.unsqueeze(0)
- )
- # dino_img = img.to(self.device)
- # dino_intrinsics = self.intrinsics.unsqueeze(0).to(self.device)
- dino_img = dino_img[0].permute(1, 2, 0)
- dino_intrinsics = dino_intrinsics[0]
-
- I = get_intrinsics(dino_intrinsics).to(self.device)
- E = get_extrinsics(self.extrinsics).to(self.device)
- P = obtain_projection_matrix(I, E)
-
- pixel_coordinates = get_pixel_from_3D_source(pcl_in_vehicle[:, :3], P)
- (
- lidar_points_in_frame,
- pixels_in_frame,
- ind_in_frame,
- ) = get_points_and_pixels_in_frame(
- pcl_in_vehicle[:, :3], pixel_coordinates, dino_img.shape[0], dino_img.shape[1]
- )
-
- dino_features = dino_img[pixels_in_frame[:, 1], pixels_in_frame[:, 0]]
- dino_pcl = torch.cat([pcl_in_odom[ind_in_frame][:, :3], dino_features], dim=-1)
-
- pos = torch.tensor(
- [
- self.odom_msg.pose.pose.position.x,
- self.odom_msg.pose.pose.position.y,
- self.odom_msg.pose.pose.position.z,
- ],
- device=self.device,
- )
-
- self.get_logger().info('colorized {}/{} points'.format(dino_pcl.shape[0], pcl_in_vehicle.shape[0]))
-
- return {
- "pos": pos,
- "pcl": pcl_in_odom,
- "image": img,
- "dino_image": dino_img,
- "dino_pcl": dino_pcl,
- "pixel_projection": pixel_coordinates[ind_in_frame],
- }
-
- def make_voxel_msg(self, voxel_grid):
- msg = FeatureVoxelGrid()
- msg.header.stamp = self.pcl_msg.header.stamp
- msg.header.frame_id = self.odom_frame
-
- msg.metadata.origin.x = voxel_grid.metadata.origin[0].item()
- msg.metadata.origin.y = voxel_grid.metadata.origin[1].item()
- msg.metadata.origin.z = voxel_grid.metadata.origin[2].item()
-
- msg.metadata.length.x = voxel_grid.metadata.length[0].item()
- msg.metadata.length.y = voxel_grid.metadata.length[1].item()
- msg.metadata.length.z = voxel_grid.metadata.length[2].item()
-
- msg.metadata.resolution.x = voxel_grid.metadata.resolution[0].item()
- msg.metadata.resolution.y = voxel_grid.metadata.resolution[1].item()
- msg.metadata.resolution.z = voxel_grid.metadata.resolution[2].item()
-
- msg.num_voxels = voxel_grid.features.shape[0]
- msg.num_features = voxel_grid.features.shape[1]
-
- if self.layer_keys is None:
- msg.feature_keys = [
- "{}_{}".format(self.layer_key, i) for i in range(voxel_grid.features.shape[1])
- ]
- else:
- msg.feature_keys = copy.deepcopy(self.layer_keys)
-
- msg.indices = voxel_grid.indices.tolist()
-
- feature_msg = Float32MultiArray()
- feature_msg.layout.dim.append(
- MultiArrayDimension(
- label="column_index",
- size=voxel_grid.features.shape[0],
- stride=voxel_grid.features.shape[0],
- )
- )
- feature_msg.layout.dim.append(
- MultiArrayDimension(
- label="row_index",
- size=voxel_grid.features.shape[0],
- stride=voxel_grid.features.shape[0] * voxel_grid.features.shape[1],
- )
- )
-
- feature_msg.data = voxel_grid.features.flatten().tolist()
-
- return msg
-
- def make_gridmap_msg(self, vmin=None, vmax=None):
- """
- convert dino into gridmap msg
-
- Publish all the feature channels, plus a visualization and elevation layer
-
- Note that we assume all the requisite stuff is available (pcl, img, odom) as this
- should only be called after a dino map is successfully produced
- """
- gridmap_msg = GridMap()
-
- gridmap_data = self.localmapper.bev_grid.data.cpu().numpy()
-
- # setup metadata
- print("gridmap_msg", dir(gridmap_msg.info))
- gridmap_msg.header.stamp = self.img_msg.header.stamp
- gridmap_msg.header.frame_id = self.odom_frame
-
- if self.layer_keys is None:
- gridmap_msg.layers = [
- "{}_{}".format(self.layer_key, i) for i in range(gridmap_data.shape[-1])
- ]
- else:
- gridmap_msg.layers = copy.deepcopy(self.layer_keys)
-
- gridmap_msg.info.resolution = self.localmapper.metadata.resolution.mean().item()
- gridmap_msg.info.length_x = self.localmapper.metadata.length[0].item()
- gridmap_msg.info.length_y = self.localmapper.metadata.length[1].item()
- gridmap_msg.info.pose.position.x = (
- self.localmapper.metadata.origin[0].item() + 0.5 * gridmap_msg.info.length_x
- )
- gridmap_msg.info.pose.position.y = (
- self.localmapper.metadata.origin[1].item() + 0.5 * gridmap_msg.info.length_y
- )
- gridmap_msg.info.pose.position.z = self.odom_msg.pose.pose.position.z
- gridmap_msg.info.pose.orientation.w = 1.0
- # transposed_layer_data = np.transpose(gridmap_data, (0, 2,1))
- # flipped_layer_data = np.flip(np.flip(transposed_layer_data, axis=1), axis=2)
-
- # gridmap_data has the shape (rows, cols, layers)
- # Step 1: Flip the 2D grid layers in both directions (reverse both axes)
- flipped_data = np.flip(gridmap_data, axis=(0, 1)) # Flips along both axes
-
- # Step 2: Transpose the first two dimensions (x, y) for each layer
- transposed_data = np.transpose(
- flipped_data, axes=(1, 0, 2)
- ) # Transpose rows and cols
-
- # Step 3: Flatten each 2D layer, maintaining the layers' structure (flattening across x, y)
- flattened_data = transposed_data.reshape(-1, gridmap_data.shape[-1])
- accum_time = 0
- for i in range(gridmap_data.shape[-1]):
- layer_data = gridmap_data[..., i]
- gridmap_layer_msg = Float32MultiArray()
- gridmap_layer_msg.layout.dim.append(
- MultiArrayDimension(
- label="column_index",
- size=layer_data.shape[0],
- stride=layer_data.shape[0],
- )
- )
- gridmap_layer_msg.layout.dim.append(
- MultiArrayDimension(
- label="row_index",
- size=layer_data.shape[0],
- stride=layer_data.shape[0] * layer_data.shape[1],
- )
- )
-
- # gridmap reverses the rasterization
- start_time = time.time()
- gridmap_layer_msg.data = flattened_data[:, i].tolist()
- end_time = time.time()
- accum_time += end_time - start_time
-
- # gridmap_layer_msg.data = flipped_layer_data[i].flatten().tolist()
- gridmap_msg.data.append(gridmap_layer_msg)
- self.get_logger().info("time to flatten layer {}: {}".format(i, accum_time))
- # add dummy elevation
- gridmap_msg.layers.append("elevation")
- layer_data = (
- np.zeros_like(gridmap_data[..., 0])
- + self.odom_msg.pose.pose.position.z
- - 1.73
- )
- gridmap_layer_msg = Float32MultiArray()
- gridmap_layer_msg.layout.dim.append(
- MultiArrayDimension(
- label="column_index",
- size=layer_data.shape[0],
- stride=layer_data.shape[0],
- )
- )
- gridmap_layer_msg.layout.dim.append(
- MultiArrayDimension(
- label="row_index",
- size=layer_data.shape[0],
- stride=layer_data.shape[0] * layer_data.shape[1],
- )
- )
-
- gridmap_layer_msg.data = layer_data.flatten().tolist()
- gridmap_msg.data.append(gridmap_layer_msg)
-
- # TODO: figure out how to support multiple viz output types
- gridmap_rgb = gridmap_data[..., :3]
- if vmin is None or vmax is None:
- vmin = vmin.cpu().numpy().reshape(1, 1, 3)
- vmax = vmax.cpu().numpy().reshape(1, 1, 3)
- else:
- vmin = gridmap_rgb.reshape(-1, 3).min(axis=0).reshape(1, 1, 3)
- vmax = gridmap_rgb.reshape(-1, 3).max(axis=0).reshape(1, 1, 3)
-
- gridmap_cs = ((gridmap_rgb - vmin) / (vmax - vmin)).clip(0.0, 1.0)
- gridmap_cs = (gridmap_cs * 255.0).astype(np.int32)
-
- gridmap_color = (
- gridmap_cs[..., 0] * (2 ** 16)
- + gridmap_cs[..., 1] * (2 ** 8)
- + gridmap_cs[..., 2]
- )
- gridmap_color = gridmap_color.view(dtype=np.float32)
-
- gridmap_msg.layers.append("rgb_viz")
- gridmap_layer_msg = Float32MultiArray()
- gridmap_layer_msg.layout.dim.append(
- MultiArrayDimension(
- label="column_index",
- size=layer_data.shape[0],
- stride=layer_data.shape[0],
- )
- )
- gridmap_layer_msg.layout.dim.append(
- MultiArrayDimension(
- label="row_index",
- size=layer_data.shape[0],
- stride=layer_data.shape[0] * layer_data.shape[1],
- )
- )
-
- gridmap_layer_msg.data = gridmap_color.T[::-1, ::-1].flatten().tolist()
- gridmap_msg.data.append(gridmap_layer_msg)
-
- return gridmap_msg
-
- def make_pcl_msg(self, pcl, vmin=None, vmax=None):
- """
- Convert dino pcl into message
- """
- self.get_logger().info("{}".format(pcl.shape))
- start_time = time.time()
- pcl_pos = pcl[:, :3].cpu().numpy()
-
- pcl_cs = pcl[:, 3:6]
- if vmin is None or vmax is None:
- vmin = pcl_cs.min(dim=0)[0].view(1, 3)
- vmax = pcl_cs.max(dim=0)[0].view(1, 3)
- else:
- vmin = vmin.view(1, 3)
- vmax = vmax.view(1, 3)
- pcl_cs = ((pcl_cs - vmin) / (vmax - vmin)).cpu().numpy()
-
- after_init_time = time.time()
-
- points = pcl_pos
- rgb_values = (pcl_cs * 255.0).astype(np.uint8)
- # Prepare the data array with XYZ and RGB
- xyzcolor = np.zeros(
- points.shape[0],
- dtype=[
- ("x", np.float32),
- ("y", np.float32),
- ("z", np.float32),
- ("rgb", np.float32),
- ],
- )
-
- # Assign XYZ values
- xyzcolor["x"] = points[:, 0]
- xyzcolor["y"] = points[:, 1]
- xyzcolor["z"] = points[:, 2]
-
- color = np.zeros(
- points.shape[0], dtype=[("r", np.uint8), ("g", np.uint8), ("b", np.uint8)]
- )
- color["r"] = rgb_values[:, 0]
- color["g"] = rgb_values[:, 1]
- color["b"] = rgb_values[:, 2]
- xyzcolor["rgb"] = ros2_numpy.point_cloud2.merge_rgb_fields(color)
-
- msg = ros2_numpy.msgify(PointCloud2, xyzcolor)
- msg.header.frame_id = self.odom_frame
- msg.header.stamp = self.pcl_msg.header.stamp
- self.get_logger().info("pcl total time: {}".format(time.time() - start_time))
-
- return msg
-
- def xyz_array_to_point_cloud_msg(
- self, points, frame, timestamp=None, rgb_values=None
- ):
- """
- Modified from: https://github.com/castacks/physics_atv_deep_stereo_vo/blob/main/src/stereo_node_multisense.py
- Please refer to this ros answer about the usage of point cloud message:
- https://answers.ros.org/question/234455/pointcloud2-and-pointfield/
- :param points:
- :param header:
- :return:
- """
- # Create the message header
- header = Header()
- header.frame_id = frame
- if timestamp is None:
- timestamp = self.get_clock().now().to_msg() # ROS2 time function
- header.stamp = timestamp
-
- msg = PointCloud2()
- msg.header = header
-
- # Determine point cloud dimensions (organized vs unorganized)
- if len(points.shape) == 3: # Organized point cloud
- msg.width = points.shape[0]
- msg.height = points.shape[1]
- else: # Unorganized point cloud
- msg.width = points.shape[0]
- msg.height = 1
-
- msg.is_bigendian = False
- msg.is_dense = False # Set to False since organized clouds are non-dense
-
- if rgb_values is None:
- # XYZ only
- msg.fields = [
- PointField(name="x", offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name="y", offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name="z", offset=8, datatype=PointField.FLOAT32, count=1),
- ]
- msg.point_step = 12 # 3 fields (x, y, z) each 4 bytes (float32)
- msg.row_step = msg.point_step * msg.width
- xyz = points.astype(np.float32)
- msg.data = xyz.tobytes() # Convert to bytes
- else:
- # XYZ and RGB
- msg.fields = [
- PointField(name="x", offset=0, datatype=PointField.FLOAT32, count=1),
- PointField(name="y", offset=4, datatype=PointField.FLOAT32, count=1),
- PointField(name="z", offset=8, datatype=PointField.FLOAT32, count=1),
- PointField(name="rgb", offset=12, datatype=PointField.UINT32, count=1),
- ]
- msg.point_step = (
- 16 # 4 fields (x, y, z, rgb) with rgb as a packed 32-bit integer
- )
- msg.row_step = msg.point_step * msg.width
-
- # Prepare the data array with XYZ and RGB
- xyzcolor = np.zeros(
- (points.shape[0],),
- dtype={
- "names": ("x", "y", "z", "rgba"),
- "formats": ("f4", "f4", "f4", "u4"),
- },
- )
-
- # Assign XYZ values
- xyzcolor["x"] = points[:, 0]
- xyzcolor["y"] = points[:, 1]
- xyzcolor["z"] = points[:, 2]
-
- # Prepare RGB values (packed into a 32-bit unsigned int)
- rgb_uint32 = np.zeros(points.shape[0], dtype=np.uint32)
- rgb_uint32 = (
- np.left_shift(rgb_values[:, 0].astype(np.uint32), 16)
- | np.left_shift(rgb_values[:, 1].astype(np.uint32), 8)
- | rgb_values[:, 2].astype(np.uint32)
- )
- xyzcolor["rgba"] = rgb_uint32
-
- msg.data = xyzcolor.tobytes() # Convert to bytes
-
- return msg
-
- def make_img_msg(self, dino_img, vmin=None, vmax=None):
- if vmin is None or vmax is None:
- viz_img = normalize_dino(dino_img[..., :3])
- else:
- vmin = vmin.view(1, 1, 3)
- vmax = vmax.view(1, 1, 3)
- viz_img = (dino_img[..., :3] - vmin) / (vmax-vmin)
- viz_img = viz_img.clip(0., 1.)
-
- viz_img = viz_img.cpu().numpy() * 255
- img_msg = self.bridge.cv2_to_imgmsg(viz_img.astype(np.uint8), "rgb8")
- img_msg.header.stamp = self.img_msg.header.stamp
- return img_msg
-
- def publish_messages(self, res):
- """
- Publish the dino pcl and dino map
- """
- if self.mapper_type == "bev":
- #use the aggregated embeddings for min/max to keep consistency across image/pcl/map
- bev_viz_data = self.localmapper.bev_grid.data[..., :3].view(-1, 3)
- vmin = bev_viz_data.min(dim=0)[0]
- vmax = bev_viz_data.max(dim=0)[0]
-
- gridmap_msg = self.make_gridmap_msg(vmin=vmin, vmax=vmax)
- self.gridmap_pub.publish(gridmap_msg)
-
- pcl_msg = self.make_pcl_msg(res["dino_pcl"], vmin=vmin, vmax=vmax)
- self.pcl_pub.publish(pcl_msg)
-
- img_msg = self.make_img_msg(res["dino_image"], vmin=vmin, vmax=vmax)
- self.image_pub.publish(img_msg)
-
- elif self.mapper_type == "voxel":
- pts = self.localmapper.voxel_grid.grid_indices_to_pts(
- self.localmapper.voxel_grid.raster_indices_to_grid_indices(
- self.localmapper.voxel_grid.indices
- )
- )
- colors = self.localmapper.voxel_grid.features[:, :3]
- vmin = colors.min(dim=0)[0]
- vmax = colors.max(dim=0)[0]
-
- voxel_viz_msg = self.make_pcl_msg(
- torch.cat([pts, colors], axis=-1), vmin=vmin, vmax=vmax
- )
- self.voxel_viz_pub.publish(voxel_viz_msg)
-
- voxel_msg = self.make_voxel_msg(self.localmapper.voxel_grid)
- self.voxel_pub.publish(voxel_msg)
-
- pcl_msg = self.make_pcl_msg(res["dino_pcl"], vmin=vmin, vmax=vmax)
- self.pcl_pub.publish(pcl_msg)
-
- img_msg = self.make_img_msg(res["dino_image"], vmin=vmin, vmax=vmax)
- self.image_pub.publish(img_msg)
-
- timing_msg = Float32()
- self.timing_pub.publish(timing_msg)
-
- def spin(self):
- self.get_logger().info("spinning...")
-
- start_time = time.time()
- res = self.preprocess_inputs()
- after_preprocess_time = time.time()
- if res:
- self.get_logger().info("updating localmap...")
-
- pts = res["dino_pcl"][:, :3]
- features = res["dino_pcl"][:, 3:]
-
- self.get_logger().info(
- "Got {} features, mapping first {}".format(
- features.shape[-1], self.localmapper.n_features
- )
- )
-
- self.localmapper.update_pose(res["pos"])
- self.localmapper.add_feature_pc(
- pts=pts, features=features[:, : self.localmapper.n_features]
- )
-
- after_update_time = time.time()
- self.publish_messages(res)
- after_publish_time = time.time()
- self.get_logger().info(
- "preprocess time: {}".format(after_preprocess_time - start_time)
- )
- self.get_logger().info(
- "update time: {}".format(after_update_time - start_time)
- )
- self.get_logger().info(
- "publish time: {}".format(after_publish_time - after_update_time)
- )
- self.get_logger().info("total time: {}".format(time.time() - start_time))
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- visual_mapping_node = DinoMappingNode()
- rclpy.spin(visual_mapping_node)
-
- visual_mapping_node.destroy_node()
- rclpy.shutdown()
-
-
-if __name__ == "__main__":
- main()
\ No newline at end of file
diff --git a/physics_atv_visual_mapping/feature_pointcloud.py b/physics_atv_visual_mapping/feature_pointcloud.py
deleted file mode 100644
index 9f22126..0000000
--- a/physics_atv_visual_mapping/feature_pointcloud.py
+++ /dev/null
@@ -1,25 +0,0 @@
-import torch
-
-class FeaturePointCloud:
- """
- Class for pointclouds with an arbitrary set of "features"
- """
- def __init__(self, pts, feats, pt_to_feat_idxs, device='cpu'):
- """
- Args:
- pts: A [N1x3] FloatTensor of spatial positions
- feats: A [N2xF] Float tensor of features, N2 <= N1
- pt_to_feat_idxs: A [N1] LongTensor of indices, suich that
- the k-th point in points should have features corresponding to the idxs[k]-th row in feats
- """
- self.pts = pts
- self.feats = feats
- self.pt_to_feat_idxs = pt_to_feat_idxs
- self.device = device
-
- def to(self, device):
- self.device = device
- self.pts = self.pts.to(device)
- self.feats = self.feats.to(device)
- self.pt_to_feat_idxs = self.pt_to_feat_idxs.to(device)
- return self
\ No newline at end of file
diff --git a/physics_atv_visual_mapping/odom_to_tf.py b/physics_atv_visual_mapping/odom_to_tf.py
deleted file mode 100644
index c5fb0ce..0000000
--- a/physics_atv_visual_mapping/odom_to_tf.py
+++ /dev/null
@@ -1,57 +0,0 @@
-import rclpy
-from rclpy.node import Node
-from nav_msgs.msg import Odometry
-from geometry_msgs.msg import TransformStamped
-from tf2_ros import TransformBroadcaster
-
-
-class OdometryToTF(Node):
- def __init__(self):
- super().__init__("odometry_to_tf")
-
- # Create a transform broadcaster to publish the tf
- self.tf_broadcaster = TransformBroadcaster(self)
-
- # Subscribe to the /odom topic
- self.odom_subscriber = self.create_subscription(
- Odometry, "/zedx/zed_node/odom", self.handle_odom, 10
- )
-
- def handle_odom(self, msg):
- # Create a new TransformStamped message to broadcast
- t = TransformStamped()
-
- # Set the header (time and frame ID)
- t.header.stamp = msg.header.stamp
- t.header.frame_id = "odom" # Parent frame (odom)
- t.child_frame_id = "base_link" # Child frame (base_link)
-
- # Use the position from the odometry message
- t.transform.translation.x = msg.pose.pose.position.x
- t.transform.translation.y = msg.pose.pose.position.y
- t.transform.translation.z = msg.pose.pose.position.z
-
- # Use the orientation from the odometry message
- t.transform.rotation = msg.pose.pose.orientation
-
- # Broadcast the transform
- self.tf_broadcaster.sendTransform(t)
-
-
-def main(args=None):
- rclpy.init(args=args)
-
- # Create the node
- node = OdometryToTF()
-
- try:
- # Keep the node running
- rclpy.spin(node)
- except KeyboardInterrupt:
- pass
- finally:
- rclpy.shutdown()
-
-
-if __name__ == "__main__":
- main()
diff --git a/physics_atv_visual_mapping/viz_gridmap.py b/physics_atv_visual_mapping/viz_gridmap.py
deleted file mode 100644
index 624c9d3..0000000
--- a/physics_atv_visual_mapping/viz_gridmap.py
+++ /dev/null
@@ -1,67 +0,0 @@
-import numpy as np
-from matplotlib import pyplot as plt
-import matplotlib.patches as patches
-
-# Variables
-gridmap_path = "/wheelsafe_ws/gridmap_data.npy"
-
-# Load gridmap
-gridmap_npy = np.load(gridmap_path) # [8, 64, 64]
-
-gridmap_npy = gridmap_npy.transpose(1, 2, 0)
-
-# Visualize
-# TODO: figure out how to support multiple viz types
-gridmap_rgb = gridmap_npy[..., :3]
-vmin = gridmap_rgb.reshape(-1, 3).min(axis=0).reshape(1, 1, 3)
-vmax = gridmap_rgb.reshape(-1, 3).max(axis=0).reshape(1, 1, 3)
-print(gridmap_rgb.dtype)
-print(vmin.dtype)
-gridmap_cs = (gridmap_rgb - vmin) / (vmax - vmin)
-gridmap_cs = (gridmap_cs * 255.0).astype(np.int32)
-
-# Make GUI to get gridmap_values at selected index
-# Create a figure and axis
-fig, ax = plt.subplots()
-
-# Display the gridmap RGB image
-img = ax.imshow(gridmap_cs)
-# Initialize a variable to store the rectangle to show index selected
-click_rect = None
-# Define a click event handler
-def onclick(event):
- global click_rect # Access the global rectangle object
- # Get the x and y pixel coordinates
- ix, iy = int(event.xdata), int(event.ydata)
-
- # Check if the click is inside the image
- if ix >= 0 and iy >= 0 and ix < gridmap_rgb.shape[1] and iy < gridmap_rgb.shape[0]:
- # Get the corresponding gridmap values at the clicked coordinates
- gridmap_values = gridmap_npy[
- iy, ix, :
- ] # Extract the values from gridmap_npy at the clicked point
-
- # Update the title with the gridmap values
- title = f"GridMap at ({ix}, {iy}): {gridmap_values}"
- ax.set_title(title)
- print(title)
-
- # Remove the previous rectangle if it exists
- if click_rect:
- click_rect.remove()
-
- # Draw a red rectangle around the clicked point
- click_rect = patches.Rectangle(
- (ix - 0.5, iy - 0.5), 1, 1, linewidth=2, edgecolor="r", facecolor="none"
- )
- ax.add_patch(click_rect)
-
- # Redraw the figure to update the rectangle and title
- fig.canvas.draw()
-
-
-# Connect the click event handler to the figure
-cid = fig.canvas.mpl_connect("button_press_event", onclick)
-
-# Show the plot
-plt.show()
diff --git a/resource/physics_atv_visual_mapping b/resource/physics_atv_visual_mapping
deleted file mode 100644
index e69de29..0000000
diff --git a/scripts/offline_processing/postprocess_tartandrive.py b/scripts/offline_processing/postprocess_tartandrive.py
index c57f06a..bcace42 100644
--- a/scripts/offline_processing/postprocess_tartandrive.py
+++ b/scripts/offline_processing/postprocess_tartandrive.py
@@ -10,7 +10,7 @@
from ros_torch_converter.datatypes.pointcloud import FeaturePointCloudTorch
-from tartandriver_utils.geometry_utils import TrajectoryInterpolator
+from physics_atv_visual_mapping.geometry_utils import TrajectoryInterpolator
from physics_atv_visual_mapping.image_processing.image_pipeline import (
setup_image_pipeline,
@@ -91,11 +91,11 @@
pcl_dir = os.path.join(args.run_dir, config['pointcloud']['folder'])
pcl_ts = np.loadtxt(os.path.join(pcl_dir, "timestamps.txt"))
- for pcl_idx in range(len(pcl_ts))[500:]:
+ for pcl_idx in range(len(pcl_ts))[1000:]:
pcl_fp = os.path.join(pcl_dir, "{:08d}.npy".format(pcl_idx))
pcl_t = pcl_ts[pcl_idx]
- pose = traj_interp(pcl_t)
+ pose = traj_interp([pcl_t])[0]
pose = pose_to_htm(pose).to(config["device"])
pcl = torch.from_numpy(np.load(pcl_fp)).float().to(config["device"])
@@ -136,8 +136,8 @@
img_t = img_ts[pcl_idx]
# pre-multiply by the tf from pc odom to img odom
- odom_pc_H = pose_to_htm(traj_interp(pcl_t)).to(config['device'])
- odom_img_H = pose_to_htm(traj_interp(img_t)).to(config['device'])
+ odom_pc_H = pose_to_htm(traj_interp([pcl_t])[0]).to(config['device'])
+ odom_img_H = pose_to_htm(traj_interp([img_t])[0]).to(config['device'])
pc_img_H = odom_img_H @ torch.linalg.inv(odom_pc_H)
E = pc_img_H @ image_extrinsics[ii]
I = feature_intrinsics[ii]
@@ -172,7 +172,7 @@
print('mapping took {:.4f}s'.format(t6-t5))
## viz code ##
- if (pcl_idx+1) % 50 == 0:
+ if (pcl_idx+1) % 250 == 0:
## viz proj ##
ni = len(images)
nax = ni+2
diff --git a/physics_atv_visual_mapping/voxel_localmapping.py b/scripts/ros/voxel_localmapping.py
similarity index 62%
rename from physics_atv_visual_mapping/voxel_localmapping.py
rename to scripts/ros/voxel_localmapping.py
index 69f04d6..ed17334 100755
--- a/physics_atv_visual_mapping/voxel_localmapping.py
+++ b/scripts/ros/voxel_localmapping.py
@@ -1,64 +1,32 @@
-import rclpy
-from rclpy.node import Node
+import rospy
import yaml
import copy
import numpy as np
np.float = np.float64 # hack for numpify
+import ros_numpy
import tf2_ros
import torch
import cv_bridge
-import os
from std_msgs.msg import Float32, Float32MultiArray, MultiArrayDimension
-from sensor_msgs.msg import PointCloud2, Image, CompressedImage
+from sensor_msgs.msg import PointCloud2, Image
from nav_msgs.msg import Odometry
from grid_map_msgs.msg import GridMap
-from perception_interfaces.msg import FeatureVoxelGrid
-from physics_atv_visual_mapping.image_processing.image_pipeline import (
- setup_image_pipeline,
-)
+from ros_torch_converter.datatypes.pointcloud import FeaturePointCloudTorch
+
+from physics_atv_visual_mapping.image_processing.image_pipeline import setup_image_pipeline
from physics_atv_visual_mapping.pointcloud_colorization.torch_color_pcl_utils import *
from physics_atv_visual_mapping.terrain_estimation.terrain_estimation_pipeline import setup_terrain_estimation_pipeline
-
-from physics_atv_visual_mapping.localmapping.bev.bev_localmapper import BEVLocalMapper
-from physics_atv_visual_mapping.localmapping.voxel.voxel_localmapper import (
- VoxelLocalMapper,
-)
+from physics_atv_visual_mapping.localmapping.voxel.voxel_localmapper import VoxelLocalMapper
from physics_atv_visual_mapping.localmapping.metadata import LocalMapperMetadata
-
from physics_atv_visual_mapping.utils import *
+import time
-
-class VoxelMappingNode(Node):
- def __init__(self):
- super().__init__("visual_mapping")
-
- self.declare_parameter("config_fp", "")
- self.declare_parameter("models_dir", "")
-
- self.get_logger().info(
- "Looking for models in {}".format(
- self.get_parameter("models_dir").get_parameter_value().string_value
- )
- )
-
- config_fp = self.get_parameter("config_fp").get_parameter_value().string_value
- config = yaml.safe_load(open(config_fp, "r"))
- config["models_dir"] = (
- self.get_parameter("models_dir").get_parameter_value().string_value
- )
-
- self.vehicle_frame = config["vehicle_frame"]
-
- self.localmap = None
- self.pcl_msg = None
- self.odom_msg = None
- self.img_msg = None
- self.odom_frame = None
-
+class VoxelMappingNode:
+ def __init__(self, config):
self.device = config["device"]
self.base_metadata = config["localmapping"]["metadata"]
self.localmap_ema = config["localmapping"]["ema"]
@@ -75,68 +43,55 @@ def __init__(self):
self.last_update_time = 0.0
self.image_pipeline = setup_image_pipeline(config)
-
self.setup_localmapper(config)
-
self.do_terrain_estimation = "terrain_estimation" in config.keys()
if self.do_terrain_estimation:
- self.get_logger().info("doing terrain estimation")
+ rospy.loginfo("doing terrain estimation")
self.terrain_estimator = setup_terrain_estimation_pipeline(config)
- self.tf_buffer = tf2_ros.Buffer()
- self.tf_listener = tf2_ros.TransformListener(self.tf_buffer, self)
+ self.vehicle_frame = config['vehicle_frame']
+ self.mapping_frame = config['mapping_frame']
+ self.tf_buffer = tf2_ros.Buffer()
+ self.tf_listener = tf2_ros.TransformListener(self.tf_buffer)
self.bridge = cv_bridge.CvBridge()
- self.compressed_img = config["image"]["image_compressed"]
- if self.compressed_img:
- self.image_sub = self.create_subscription(
- CompressedImage, config["image"]["image_topic"], self.handle_img, 1
- )
- else:
- self.image_sub = self.create_subscription(
- Image, config["image"]["image_topic"], self.handle_img, 1
- )
+ self.setup_ros_interface(config)
- self.intrinsics = (
- torch.tensor(config["intrinsics"]["P"], device=config["device"])
- .reshape(3, 3)
- .float()
- )
- self.dino_intrinsics = None
-
- self.extrinsics = pose_to_htm(
- np.concatenate(
- [
- np.array(config["extrinsics"]["p"]),
- np.array(config["extrinsics"]["q"]),
- ],
- axis=-1,
- )
- )
+ self.timer = rospy.Timer(rospy.Duration(config['rate']), self.spin)
- self.pcl_sub = self.create_subscription(
- PointCloud2, config["pointcloud"]["topic"], self.handle_pointcloud, 10
- )
- self.odom_sub = self.create_subscription(
- Odometry, config["odometry"]["topic"], self.handle_odom, 10
- )
+ def setup_ros_interface(self, config):
+ #guarantee images in same order
+ self.image_keys = []
+ self.image_data = {}
+ self.image_pubs = {}
+ self.image_subs = {}
- self.pcl_pub = self.create_publisher(PointCloud2, "/dino_pcl", 10)
- self.image_pub = self.create_publisher(Image, "/dino_image", 10)
+ self.pcl_msg = None
- self.voxel_pub = self.create_publisher(FeatureVoxelGrid, "/dino_voxels", 10)
- self.voxel_viz_pub = self.create_publisher(
- PointCloud2, "/dino_voxels_viz", 1
- )
+ for img_key, img_conf in config['images'].items():
+ self.image_data[img_key] = {}
+ self.image_keys.append(img_key)
- if self.do_terrain_estimation:
- self.gridmap_pub = self.create_publisher(GridMap, "/dino_gridmap", 10)
+ self.image_data[img_key]['intrinsics'] = get_intrinsics(np.array(img_conf["intrinsics"]["P"]).reshape(3, 3)).float().to(self.device)
+ self.image_data[img_key]['extrinsics'] = pose_to_htm(np.concatenate(
+ [
+ np.array(img_conf["extrinsics"]["p"]),
+ np.array(img_conf["extrinsics"]["q"]),
+ ], axis=-1,)).to(self.device)
+ self.image_data[img_key]['message'] = None
+
+ self.image_subs[img_key] = rospy.Subscriber(img_conf['image_topic'], Image, self.handle_img, callback_args=img_key)
+ self.image_pubs[img_key] = rospy.Publisher("feature_images/{}".format(img_key), Image)
+
+ self.pcl_sub = rospy.Subscriber(config["pointcloud"]["topic"], PointCloud2, self.handle_pointcloud)
- self.timing_pub = self.create_publisher(Float32, "/dino_proc_time", 10)
+ self.pcl_pub = rospy.Publisher("feature_pc", PointCloud2)
+ self.voxel_pub = rospy.Publisher("feature_voxels", PointCloud2)
- self.timer = self.create_timer(0.2, self.spin)
- self.viz = config["viz"]
+ # self.odom_sub = rospy.Subscriber(config["odometry"]["topic"], Odometry, self.handle_odom, 10)
+
+ # self.timing_pub = self.create_publisher(Float32, "/dino_proc_time", 10)
def setup_localmapper(self, config):
"""
@@ -162,151 +117,127 @@ def make_layer_keys(self, layer_keys):
return out
def handle_pointcloud(self, msg):
+ # rospy.loginfo('handling pointcloud')
self.pcl_msg = msg
- def handle_odom(self, msg):
- if self.odom_frame is None:
- self.odom_frame = msg.header.frame_id
- self.odom_msg = msg
+ # def handle_odom(self, msg):
+ # rospy.loginfo('handling odom')
+ # if self.odom_frame is None:
+ # self.odom_frame = msg.header.frame_id
+ # self.odom_msg = msg
- def handle_img(self, msg):
- self.img_msg = msg
+ def handle_img(self, msg, img_key):
+ # rospy.loginfo('handling img {}'.format(img_key))
+ self.image_data[img_key]['message'] = msg
def preprocess_inputs(self):
if self.pcl_msg is None:
- self.get_logger().warn("no pcl msg received")
+ rospy.logwarn("no pcl msg received")
return None
- pcl_time = (
- self.pcl_msg.header.stamp.sec + self.pcl_msg.header.stamp.nanosec * 1e-9
- )
+ pcl_time = self.pcl_msg.header.stamp.to_sec()
if abs(pcl_time - self.last_update_time) < 1e-3:
return None
- if self.odom_msg is None:
- self.get_logger().warn("no odom msg received")
- return None
-
- if self.img_msg is None:
- self.get_logger().warn("no img msg received")
- return None
-
- if not self.tf_buffer.can_transform(
- self.vehicle_frame,
- self.pcl_msg.header.frame_id,
- rclpy.time.Time.from_msg(self.pcl_msg.header.stamp),
- ):
- self.get_logger().warn(
- "cant tf from {} to {} at {}".format(
- self.vehicle_frame,
- self.pcl_msg.header.frame_id,
- self.pcl_msg.header.stamp,
- )
+ for img_key, img_data in self.image_data.items():
+ if img_data['message'] is None:
+ rospy.logwarn("no {} msg received".format(img_key))
+ return None
+
+ # need to wait for tf to be available
+ try:
+ tf_vehicle_to_pcl_msg = self.tf_buffer.lookup_transform(
+ self.vehicle_frame,
+ self.pcl_msg.header.frame_id,
+ self.pcl_msg.header.stamp,
+ timeout=rospy.Duration(0.1)
)
+ except (tf2_ros.TransformException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
+ rospy.logwarn("cant tf from {} to {} at {}".format(
+ self.vehicle_frame,
+ self.pcl_msg.header.frame_id,
+ self.pcl_msg.header.stamp,
+ ))
return None
- tf_vehicle_to_pcl_msg = self.tf_buffer.lookup_transform(
- self.vehicle_frame,
- self.pcl_msg.header.frame_id,
- rclpy.time.Time.from_msg(self.pcl_msg.header.stamp),
- )
-
- if not self.tf_buffer.can_transform(
- self.odom_frame,
- self.pcl_msg.header.frame_id,
- rclpy.time.Time.from_msg(self.pcl_msg.header.stamp),
- ):
- self.get_logger().warn(
- "cant tf from {} to {} at {}".format(
- self.odom_frame,
- self.pcl_msg.header.frame_id,
- self.pcl_msg.header.stamp,
- )
+ try:
+ tf_odom_to_pcl_msg = self.tf_buffer.lookup_transform(
+ self.mapping_frame,
+ self.pcl_msg.header.frame_id,
+ self.pcl_msg.header.stamp,
+ timeout=rospy.Duration(0.1)
)
+ except (tf2_ros.TransformException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
+ rospy.logwarn("cant tf from {} to {} at {}".format(
+ self.mapping_frame,
+ self.pcl_msg.header.frame_id,
+ self.pcl_msg.header.stamp,
+ ))
return None
- tf_odom_to_pcl_msg = self.tf_buffer.lookup_transform(
- self.odom_frame,
- self.pcl_msg.header.frame_id,
- rclpy.time.Time.from_msg(self.pcl_msg.header.stamp),
- )
-
pcl = pcl_msg_to_xyz(self.pcl_msg).to(self.device)
vehicle_to_pcl_htm = tf_msg_to_htm(tf_vehicle_to_pcl_msg).to(self.device)
+
pcl_in_vehicle = transform_points(pcl.clone(), vehicle_to_pcl_htm)
odom_to_pcl_htm = tf_msg_to_htm(tf_odom_to_pcl_msg).to(self.device)
pcl_in_odom = transform_points(pcl.clone(), odom_to_pcl_htm)
+ odom_to_vehicle_htm = odom_to_pcl_htm @ torch.linalg.inv(vehicle_to_pcl_htm)
+
self.last_update_time = pcl_time
# Camera processing
- if self.compressed_img:
- img = (
- self.bridge.compressed_imgmsg_to_cv2(
- self.img_msg, desired_encoding="rgb8"
- )
- / 255.0
- )
- else:
+ images = []
+ image_intrinsics = []
+ image_extrinsics = []
+
+ for img_key in self.image_keys:
img = (
- self.bridge.imgmsg_to_cv2(self.img_msg, desired_encoding="rgb8").astype(
+ self.bridge.imgmsg_to_cv2(self.image_data[img_key]['message'], desired_encoding="rgb8").astype(
np.float32
)
/ 255.0
)
+ img = torch.tensor(img).unsqueeze(0).permute(0, 3, 1, 2).to(self.device)
- img = torch.tensor(img).unsqueeze(0).permute(0, 3, 1, 2)
+ images.append(img)
+ image_intrinsics.append(self.image_data[img_key]['intrinsics'])
+ image_extrinsics.append(self.image_data[img_key]['extrinsics'])
- dino_img, dino_intrinsics = self.image_pipeline.run(
- img, self.intrinsics.unsqueeze(0)
- )
- # dino_img = img.to(self.device)
- # dino_intrinsics = self.intrinsics.unsqueeze(0).to(self.device)
- dino_img = dino_img[0].permute(1, 2, 0)
- dino_intrinsics = dino_intrinsics[0]
-
- I = get_intrinsics(dino_intrinsics).to(self.device)
- E = get_extrinsics(self.extrinsics).to(self.device)
- P = obtain_projection_matrix(I, E)
-
- pixel_coordinates = get_pixel_from_3D_source(pcl_in_vehicle[:, :3], P)
- (
- lidar_points_in_frame,
- pixels_in_frame,
- ind_in_frame,
- ) = get_points_and_pixels_in_frame(
- pcl_in_vehicle[:, :3], pixel_coordinates, dino_img.shape[0], dino_img.shape[1]
+ images = torch.cat(images, dim=0)
+ image_intrinsics = torch.stack(image_intrinsics, dim=0)
+ image_extrinsics = torch.stack(image_extrinsics, dim=0)
+
+ feature_images, feature_intrinsics = self.image_pipeline.run(
+ images, image_intrinsics
)
- dino_features = dino_img[pixels_in_frame[:, 1], pixels_in_frame[:, 0]]
- dino_pcl = torch.cat([pcl_in_odom[ind_in_frame][:, :3], dino_features], dim=-1)
+ feature_images = feature_images.permute(0, 2, 3, 1)
+ image_Ps = get_projection_matrix(feature_intrinsics, image_extrinsics)
- pos = torch.tensor(
- [
- self.odom_msg.pose.pose.position.x,
- self.odom_msg.pose.pose.position.y,
- self.odom_msg.pose.pose.position.z,
- ],
- device=self.device,
- )
+ coords, valid_mask = get_pixel_projection(pcl_in_vehicle, image_Ps, feature_images)
+ pc_features, cnt = colorize(coords, valid_mask, feature_images)
+ pc_features = pc_features[cnt > 0]
+ feature_pcl = FeaturePointCloudTorch.from_torch(pts=pcl_in_odom, features=pc_features, mask=(cnt > 0))
+
+ pos = odom_to_vehicle_htm[:3, -1]
- self.get_logger().info('colorized {}/{} points'.format(dino_pcl.shape[0], pcl_in_vehicle.shape[0]))
+ rospy.loginfo('colorized {}/{} points'.format(feature_pcl.features.shape[0], feature_pcl.pts.shape[0]))
return {
"pos": pos,
"pcl": pcl_in_odom,
- "image": img,
- "dino_image": dino_img,
- "dino_pcl": dino_pcl,
- "pixel_projection": pixel_coordinates[ind_in_frame],
+ "images": images,
+ "feature_images": feature_images,
+ "feature_pc": feature_pcl,
}
def make_voxel_msg(self, voxel_grid):
msg = FeatureVoxelGrid()
msg.header.stamp = self.pcl_msg.header.stamp
- msg.header.frame_id = self.odom_frame
+ msg.header.frame_id = self.mapping_frame
msg.metadata.origin.x = voxel_grid.metadata.origin[0].item()
msg.metadata.origin.y = voxel_grid.metadata.origin[1].item()
@@ -429,7 +360,7 @@ def make_gridmap_msg(self, bev_grid):
# gridmap_layer_msg.data = flipped_layer_data[i].flatten().tolist()
gridmap_msg.data.append(gridmap_layer_msg)
- self.get_logger().info("time to flatten layer {}: {}".format(i, accum_time))
+ rospy.loginfo("time to flatten layer {}: {}".format(i, accum_time))
# add dummy elevation
gridmap_msg.layers.append("elevation")
layer_data = (
@@ -458,15 +389,23 @@ def make_gridmap_msg(self, bev_grid):
return gridmap_msg
+ def make_voxel_viz_msg(self, voxel_grid):
+ pts = voxel_grid.grid_indices_to_pts(voxel_grid.raster_indices_to_grid_indices(voxel_grid.raster_indices))
+ feats = voxel_grid.features
+ mask = voxel_grid.feature_mask
+
+ fpc = FeaturePointCloudTorch.from_torch(pts=pts, features=feats, mask=mask)
+
+ return self.make_pcl_msg(fpc)
+
def make_pcl_msg(self, pcl, vmin=None, vmax=None):
"""
Convert dino pcl into message
"""
- self.get_logger().info("{}".format(pcl.shape))
start_time = time.time()
- pcl_pos = pcl[:, :3].cpu().numpy()
+ pcl_pos = pcl.pts[pcl.feat_mask].cpu().numpy()
- pcl_cs = pcl[:, 3:6]
+ pcl_cs = pcl.features[:, :3]
if vmin is None or vmax is None:
vmin = pcl_cs.min(dim=0)[0].view(1, 3)
vmax = pcl_cs.max(dim=0)[0].view(1, 3)
@@ -479,34 +418,13 @@ def make_pcl_msg(self, pcl, vmin=None, vmax=None):
points = pcl_pos
rgb_values = (pcl_cs * 255.0).astype(np.uint8)
- # Prepare the data array with XYZ and RGB
- xyzcolor = np.zeros(
- points.shape[0],
- dtype=[
- ("x", np.float32),
- ("y", np.float32),
- ("z", np.float32),
- ("rgb", np.float32),
- ],
- )
-
- # Assign XYZ values
- xyzcolor["x"] = points[:, 0]
- xyzcolor["y"] = points[:, 1]
- xyzcolor["z"] = points[:, 2]
- color = np.zeros(
- points.shape[0], dtype=[("r", np.uint8), ("g", np.uint8), ("b", np.uint8)]
+ msg = self.xyz_array_to_point_cloud_msg(
+ points=pcl_pos,
+ frame=self.mapping_frame,
+ timestamp=self.pcl_msg.header.stamp,
+ rgb_values=rgb_values
)
- color["r"] = rgb_values[:, 0]
- color["g"] = rgb_values[:, 1]
- color["b"] = rgb_values[:, 2]
- xyzcolor["rgb"] = ros2_numpy.point_cloud2.merge_rgb_fields(color)
-
- msg = ros2_numpy.msgify(PointCloud2, xyzcolor)
- msg.header.frame_id = self.odom_frame
- msg.header.stamp = self.pcl_msg.header.stamp
- self.get_logger().info("pcl total time: {}".format(time.time() - start_time))
return msg
@@ -524,8 +442,6 @@ def xyz_array_to_point_cloud_msg(
# Create the message header
header = Header()
header.frame_id = frame
- if timestamp is None:
- timestamp = self.get_clock().now().to_msg() # ROS2 time function
header.stamp = timestamp
msg = PointCloud2()
@@ -593,7 +509,7 @@ def xyz_array_to_point_cloud_msg(
return msg
- def make_img_msg(self, dino_img, vmin=None, vmax=None):
+ def make_img_msg(self, dino_img, img_key, vmin=None, vmax=None):
if vmin is None or vmax is None:
viz_img = normalize_dino(dino_img[..., :3])
else:
@@ -604,7 +520,7 @@ def make_img_msg(self, dino_img, vmin=None, vmax=None):
viz_img = viz_img.cpu().numpy() * 255
img_msg = self.bridge.cv2_to_imgmsg(viz_img.astype(np.uint8), "rgb8")
- img_msg.header.stamp = self.img_msg.header.stamp
+ img_msg.header.stamp = self.image_data[img_key]['message'].header.stamp
return img_msg
def publish_messages(self, res):
@@ -653,60 +569,60 @@ def publish_messages(self, res):
timing_msg = Float32()
self.timing_pub.publish(timing_msg)
- def spin(self):
- self.get_logger().info("spinning...")
+ def spin(self, event):
+ rospy.loginfo("spinning...")
- start_time = time.time()
+ preproc_start_time = time.time()
res = self.preprocess_inputs()
- after_preprocess_time = time.time()
- if res:
- self.get_logger().info("updating localmap...")
-
- pts = res["dino_pcl"][:, :3]
- features = res["dino_pcl"][:, 3:]
-
- self.get_logger().info(
- "Got {} features, mapping first {}".format(
- features.shape[-1], self.localmapper.n_features
- )
- )
+ preproc_end_time = time.time()
- self.get_logger().info(str(res.keys()))
+ if res:
+ rospy.loginfo("updating localmap...")
+ update_start_time = time.time()
self.localmapper.update_pose(res["pos"])
self.localmapper.add_feature_pc(
- pts=pts, features=features[:, : self.localmapper.n_features]
+ pos=res['pos'], feat_pc=res["feature_pc"], do_raytrace=True
)
- self.localmapper.add_pc(res["pcl"][:, :3])
if self.do_terrain_estimation:
self.bev_grid = self.terrain_estimator.run(self.localmapper.voxel_grid)
- after_update_time = time.time()
- self.publish_messages(res)
- after_publish_time = time.time()
+ update_end_time = time.time()
+
+ pub_start_time = time.time()
+ msg = self.make_pcl_msg(res['feature_pc'])
+ self.pcl_pub.publish(msg)
+
+ msg = self.make_voxel_viz_msg(self.localmapper.voxel_grid)
+ self.voxel_pub.publish(msg)
- self.get_logger().info(
- "preprocess time: {}".format(after_preprocess_time - start_time)
+ for i, img_key in enumerate(self.image_keys):
+ feat_img = res["feature_images"][i]
+ pub = self.image_pubs[img_key]
+ msg = self.make_img_msg(feat_img, img_key)
+ pub.publish(msg)
+
+ pub_end_time = time.time()
+
+ rospy.loginfo(
+ "preprocess time: {}".format(preproc_end_time - preproc_start_time)
)
- self.get_logger().info(
- "update time: {}".format(after_update_time - start_time)
+ rospy.loginfo(
+ "update time: {}".format(update_end_time - update_start_time)
)
- self.get_logger().info(
- "publish time: {}".format(after_publish_time - after_update_time)
+ rospy.loginfo(
+ "publish time: {}".format(pub_end_time - pub_start_time)
)
- self.get_logger().info("total time: {}".format(time.time() - start_time))
-
-
-def main(args=None):
- rclpy.init(args=args)
+ rospy.loginfo("total time: {}".format(time.time() - preproc_start_time))
- visual_mapping_node = VoxelMappingNode()
- rclpy.spin(visual_mapping_node)
+if __name__ == '__main__':
+ rospy.init_node('visual_mapping')
- visual_mapping_node.destroy_node()
- rclpy.shutdown()
+ # config_fp = rospy.get_param("~config_fp")
+ config_fp = '/catkin_ws/src/vfm_voxel_mapping/visual_mapping_noetic/config/ros/rzr_voxel_mapping.yaml'
+ config = yaml.safe_load(open(config_fp, 'r'))
-if __name__ == "__main__":
- main()
\ No newline at end of file
+ visual_mapping_node = VoxelMappingNode(config)
+ rospy.spin()
\ No newline at end of file
diff --git a/setup.cfg b/setup.cfg
deleted file mode 100644
index 181b8b6..0000000
--- a/setup.cfg
+++ /dev/null
@@ -1,4 +0,0 @@
-[develop]
-script_dir=$base/lib/physics_atv_visual_mapping
-[install]
-install_scripts=$base/lib/physics_atv_visual_mapping
diff --git a/setup.py b/setup.py
index 2acb076..42d7c28 100644
--- a/setup.py
+++ b/setup.py
@@ -1,33 +1,9 @@
-from setuptools import find_packages, setup
-import os
-from glob import glob
+from distutils.core import setup
+from catkin_pkg.python_setup import generate_distutils_setup
-package_name = "physics_atv_visual_mapping"
-data_files = [
- ("share/ament_index/resource_index/packages", ["resource/" + package_name]),
- ("share/" + package_name, ["package.xml"]),
- (os.path.join("share", package_name), glob("launch/*.py")),
- (os.path.join("share", package_name, "config", "ros"), glob("config/ros/*.yaml")),
-]
-
-setup(
- name=package_name,
- version="0.0.0",
- packages=find_packages(exclude=["test"]),
- data_files=data_files,
- install_requires=["setuptools"],
- zip_safe=True,
- maintainer="cherieh",
- maintainer_email="cherieh@todo.todo",
- description="TODO: Package description",
- license="TODO: License declaration",
- tests_require=["pytest"],
- entry_points={
- "console_scripts": [
- "dino_localmapping = physics_atv_visual_mapping.dino_localmapping:main",
- "voxel_localmapping = physics_atv_visual_mapping.voxel_localmapping:main",
- "dino_cost = physics_atv_visual_mapping.dino_cost:main",
- "odom_to_tf = physics_atv_visual_mapping.odom_to_tf:main",
- ],
- },
+d = generate_distutils_setup(
+ packages=['physics_atv_visual_mapping'],
+ package_dir={'': 'src'}
)
+
+setup(**d)
\ No newline at end of file
diff --git a/physics_atv_visual_mapping/__init__.py b/src/physics_atv_visual_mapping/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/__init__.py
rename to src/physics_atv_visual_mapping/__init__.py
diff --git a/src/physics_atv_visual_mapping/geometry_utils.py b/src/physics_atv_visual_mapping/geometry_utils.py
new file mode 100644
index 0000000..7a07bee
--- /dev/null
+++ b/src/physics_atv_visual_mapping/geometry_utils.py
@@ -0,0 +1,221 @@
+#collection of trajectory/rotation math
+
+import torch
+import numpy as np
+import scipy.interpolate, scipy.spatial
+
+def quat_to_yaw(quat):
+ """
+ Convert a quaternion (as [x, y, z, w]) to yaw
+ """
+ return np.arctan2(2 * (quat[3]*quat[2] + quat[0]*quat[1]), 1 - 2 * (quat[1]**2 + quat[2]**2))
+
+def pose_to_htm(pose):
+ """convert a pose (position + quaternion) to a homogeneous transform matrix
+ """
+ p = pose[:3]
+ q = pose[3:7]
+
+ R = scipy.spatial.transform.Rotation.from_quat(q).as_matrix()
+
+ htm = np.eye(4)
+ htm[:3, :3] = R
+ htm[:3, -1] = p
+
+ return htm
+
+def htm_to_pose(htm):
+ """convert a htm (4x4) matrix to a pose (position + quaternion)
+ (quaternion will be [qx, qy, qz, qw])
+ """
+ R = htm[:3, :3]
+ p = htm[:3, -1]
+ q = scipy.spatial.transform.Rotation.from_matrix(R).as_quat()
+ pose = np.concatenate([p, q])
+ return pose
+
+def transform_points(points, htm):
+ """transform a set of points using a homogeneous transform matrix
+ Args:
+ points: [P x N] Tensor of points (assuming first three channels are [x,y,z])
+ htm: [4 x 4] transform patrix
+
+ Returns:
+ points: [P x N] Tensor of points where the first three channels have been transformed according to htm (note that this modifies points)
+ """
+ pt_pos = points[:, :3]
+ pt_pos = torch.cat([pt_pos, torch.ones_like(pt_pos[:, [0]])], dim=-1)
+ pt_tf_pos = htm.view(1, 4, 4) @ pt_pos.view(-1, 4, 1)
+ points[:, :3] = pt_tf_pos[:, :3, 0]
+ return points
+class TrajectoryInterpolator:
+ """
+ Helper class for interpolating trajectories
+ Expects either a 7-element trajectory as: [T x [x, y, z, qx, qy, qz, qw]]
+ or a 13-element trajectory as: [T x [x, y, z, qx, qy, qz, qw, vx, vy, vz, wx, wy, wz]]
+
+ funtionally, works identically to the scipy interpolation object
+ """
+ def __init__(self, times, traj, tol=1e-1, interp_kwargs={}):
+ """
+ Args:
+ traj: the traj to interpolate (of shape [T x {7,13}])
+ times: the times corresponding to the traj
+ tol: the amount of allowable extrapolation
+ """
+ assert len(traj.shape) == 2, 'Expected traj of shape [T x 7/13], got {}'.format(traj.shape)
+ assert traj.shape[-1] in [7, 13], 'Expected traj of shape [T x 7/13], got {}'.format(traj.shape)
+ assert times.shape[0] == traj.shape[0], 'Got {} times, but {} steps in traj'.format(times.shape[0], traj.shape[0])
+
+ self.is_velocity = (traj.shape[-1] == 13)
+ self.tol = tol
+
+
+ #add tol
+ times = np.concatenate([np.array([times[0]-self.tol]), times, np.array([times[-1] + self.tol])])
+ traj = np.concatenate([traj[[0]], traj, traj[[-1]]], axis=0)
+
+ #edge case check
+ idxs = np.argsort(times)
+
+ #interpolate traj to get accurate times
+ self.interp_x = scipy.interpolate.interp1d(times[idxs], traj[idxs, 0], **interp_kwargs)
+ self.interp_y = scipy.interpolate.interp1d(times[idxs], traj[idxs, 1], **interp_kwargs)
+ self.interp_z = scipy.interpolate.interp1d(times[idxs], traj[idxs, 2], **interp_kwargs)
+
+ rots = scipy.spatial.transform.Rotation.from_quat(traj[:, 3:7])
+ self.interp_q = scipy.spatial.transform.Slerp(times[idxs], rots[idxs], **interp_kwargs)
+
+ if self.is_velocity:
+ self.interp_vx = scipy.interpolate.interp1d(times[idxs], traj[idxs, 7], **interp_kwargs)
+ self.interp_vy = scipy.interpolate.interp1d(times[idxs], traj[idxs, 8], **interp_kwargs)
+ self.interp_vz = scipy.interpolate.interp1d(times[idxs], traj[idxs, 9], **interp_kwargs)
+
+ self.interp_wx = scipy.interpolate.interp1d(times[idxs], traj[idxs, 10], **interp_kwargs)
+ self.interp_wy = scipy.interpolate.interp1d(times[idxs], traj[idxs, 11], **interp_kwargs)
+ self.interp_wz = scipy.interpolate.interp1d(times[idxs], traj[idxs, 12], **interp_kwargs)
+
+ def __call__(self, qtimes):
+ """
+ Interpolate the traj according to qtimes.
+ Args:
+ qtimes: the set of times to query
+ """
+ if self.is_velocity:
+ xs = self.interp_x(qtimes)
+ ys = self.interp_y(qtimes)
+ zs = self.interp_z(qtimes)
+ qs = self.interp_q(qtimes).as_quat()
+
+ vxs = self.interp_vx(qtimes)
+ vys = self.interp_vy(qtimes)
+ vzs = self.interp_vz(qtimes)
+ wxs = self.interp_wx(qtimes)
+ wys = self.interp_wy(qtimes)
+ wzs = self.interp_wz(qtimes)
+
+ traj = np.concatenate([
+ np.stack([xs, ys, zs], axis=-1),
+ qs,
+ np.stack([vxs, vys, vzs, wxs, wys, wzs], axis=-1)
+ ], axis=-1)
+ else:
+ xs = self.interp_x(qtimes)
+ ys = self.interp_y(qtimes)
+ zs = self.interp_z(qtimes)
+ qs = self.interp_q(qtimes).as_quat()
+
+ traj = np.concatenate([
+ np.stack([xs, ys, zs], axis=-1),
+ qs
+ ], axis=-1)
+
+ return traj
+
+def make_footprint(length, width, nl, nw, length_offset, width_offset, device='cpu'):
+ xs = torch.linspace(-length/2., length/2., nl, device=device) + length_offset
+ ys = torch.linspace(-width/2., width/2., nw, device=device) + width_offset
+ footprint = torch.stack(torch.meshgrid(xs, ys, indexing='ij'), dim=-1).view(-1, 2)
+ return footprint
+
+def apply_footprint(traj, footprint):
+ """
+ Given a B x K x T x 3 tensor of states (last dim is [x, y, th]),
+ return a B x K x T x F x 2 tensor of positions (F is each footprint sample)
+ """
+ tdims = traj.shape[:-1]
+ nf = footprint.shape[0]
+
+ pos = traj[..., :2]
+ th = traj[..., 2]
+
+ R = torch.stack([
+ torch.stack([th.cos(), -th.sin()], dim=-1),
+ torch.stack([th.sin(), th.cos()], dim=-1),
+ ], dim=-2) #[B x K x T x 2 x 2]
+
+ R_expand = R.view(*tdims, 1, 2, 2) #[B x K x T x F x 2 x 2]
+ footprint_expand = footprint.view(1, 1, 1, nf, 2, 1) #[B x K x T x F x 2 x 1]
+
+ footprint_rot = (R_expand @ footprint_expand).view(*tdims, nf, 2) #[B x K x T X F x 2]
+ footprint_traj = pos.view(*tdims, 1, 2) + footprint_rot
+
+ return footprint_traj
+
+if __name__ == '__main__':
+ #test trajectory interpolator by reading a rosbag
+ import rosbag
+ import matplotlib.pyplot as plt
+
+ bag_fp = '/home/striest/Desktop/datasets/yamaha_maxent_irl/big_gridmaps/rosbags_train/20220630/2022-06-30-16-13-36_0.bag'
+ odom_topic = '/odometry/filtered_odom'
+
+ bag = rosbag.Bag(bag_fp, 'r')
+ traj = []
+ timestamps = []
+
+ for topic, msg, t in bag.read_messages(topics=[odom_topic]):
+ traj.append(np.array([
+ msg.pose.pose.position.x,
+ msg.pose.pose.position.y,
+ msg.pose.pose.position.z,
+ msg.pose.pose.orientation.x,
+ msg.pose.pose.orientation.y,
+ msg.pose.pose.orientation.z,
+ msg.pose.pose.orientation.w,
+ msg.twist.twist.linear.x,
+ msg.twist.twist.linear.y,
+ msg.twist.twist.linear.z,
+ msg.twist.twist.angular.x,
+ msg.twist.twist.angular.y,
+ msg.twist.twist.angular.z,
+ ]))
+ timestamps.append(msg.header.stamp.to_sec())
+
+ #zero x, y, z.
+ traj = np.stack(traj, axis=0)
+ traj[:, :3] -= traj[[0], :3]
+ timestamps = np.array(timestamps)
+
+ skip = 20
+ subtraj = traj[::skip]
+ subtimes = timestamps[::skip]
+
+ tinterp = TrajectoryInterpolator(subtimes, subtraj)
+ qtimes = timestamps[:-2*skip]
+
+ itraj = tinterp(qtimes)
+
+ fig, axs = plt.subplots(1, 2, figsize=(18, 9))
+ axs[0].scatter(subtraj[:, 0], subtraj[:, 1], c='b', marker='.', label='knot pts')
+ axs[0].plot(itraj[:, 0], itraj[:, 1], c='r', label='interp')
+ axs[0].legend()
+
+ colors = 'rgbcmyk'
+ for i, label in enumerate(['x', 'y', 'z', 'qx', 'qy', 'qz', 'qw', 'vx', 'vy', 'vz', 'wx', 'wy', 'wz']):
+ color = colors[i % len(colors)]
+ axs[1].scatter(subtimes, subtraj[:, i], c=color, label=label, s=1.)
+ axs[1].plot(qtimes, itraj[:, i], c=color, alpha=0.5)
+ axs[1].legend()
+
+ plt.show()
\ No newline at end of file
diff --git a/physics_atv_visual_mapping/image_processing/__init__.py b/src/physics_atv_visual_mapping/image_processing/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/__init__.py
rename to src/physics_atv_visual_mapping/image_processing/__init__.py
diff --git a/physics_atv_visual_mapping/image_processing/anyloc_utils.py b/src/physics_atv_visual_mapping/image_processing/anyloc_utils.py
similarity index 99%
rename from physics_atv_visual_mapping/image_processing/anyloc_utils.py
rename to src/physics_atv_visual_mapping/image_processing/anyloc_utils.py
index 9e51884..b54894f 100644
--- a/physics_atv_visual_mapping/image_processing/anyloc_utils.py
+++ b/src/physics_atv_visual_mapping/image_processing/anyloc_utils.py
@@ -139,7 +139,7 @@ def preprocess(self, img):
)
return img
- @torch.compile
+ # @torch.compile
def __call__(self, img: torch.Tensor) -> torch.Tensor:
"""
Parameters:
diff --git a/physics_atv_visual_mapping/image_processing/image_pipeline.py b/src/physics_atv_visual_mapping/image_processing/image_pipeline.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/image_pipeline.py
rename to src/physics_atv_visual_mapping/image_processing/image_pipeline.py
diff --git a/physics_atv_visual_mapping/image_processing/processing_blocks/__init__.py b/src/physics_atv_visual_mapping/image_processing/processing_blocks/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/processing_blocks/__init__.py
rename to src/physics_atv_visual_mapping/image_processing/processing_blocks/__init__.py
diff --git a/physics_atv_visual_mapping/image_processing/processing_blocks/base.py b/src/physics_atv_visual_mapping/image_processing/processing_blocks/base.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/processing_blocks/base.py
rename to src/physics_atv_visual_mapping/image_processing/processing_blocks/base.py
diff --git a/physics_atv_visual_mapping/image_processing/processing_blocks/dino.py b/src/physics_atv_visual_mapping/image_processing/processing_blocks/dino.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/processing_blocks/dino.py
rename to src/physics_atv_visual_mapping/image_processing/processing_blocks/dino.py
diff --git a/physics_atv_visual_mapping/image_processing/processing_blocks/pca.py b/src/physics_atv_visual_mapping/image_processing/processing_blocks/pca.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/processing_blocks/pca.py
rename to src/physics_atv_visual_mapping/image_processing/processing_blocks/pca.py
diff --git a/physics_atv_visual_mapping/image_processing/processing_blocks/pca_vlad.py b/src/physics_atv_visual_mapping/image_processing/processing_blocks/pca_vlad.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/processing_blocks/pca_vlad.py
rename to src/physics_atv_visual_mapping/image_processing/processing_blocks/pca_vlad.py
diff --git a/physics_atv_visual_mapping/image_processing/processing_blocks/radio.py b/src/physics_atv_visual_mapping/image_processing/processing_blocks/radio.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/processing_blocks/radio.py
rename to src/physics_atv_visual_mapping/image_processing/processing_blocks/radio.py
diff --git a/physics_atv_visual_mapping/image_processing/processing_blocks/vlad.py b/src/physics_atv_visual_mapping/image_processing/processing_blocks/vlad.py
similarity index 100%
rename from physics_atv_visual_mapping/image_processing/processing_blocks/vlad.py
rename to src/physics_atv_visual_mapping/image_processing/processing_blocks/vlad.py
diff --git a/physics_atv_visual_mapping/localmapping/__init__.py b/src/physics_atv_visual_mapping/localmapping/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/localmapping/__init__.py
rename to src/physics_atv_visual_mapping/localmapping/__init__.py
diff --git a/physics_atv_visual_mapping/localmapping/base.py b/src/physics_atv_visual_mapping/localmapping/base.py
similarity index 100%
rename from physics_atv_visual_mapping/localmapping/base.py
rename to src/physics_atv_visual_mapping/localmapping/base.py
diff --git a/physics_atv_visual_mapping/localmapping/bev/__init__.py b/src/physics_atv_visual_mapping/localmapping/bev/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/localmapping/bev/__init__.py
rename to src/physics_atv_visual_mapping/localmapping/bev/__init__.py
diff --git a/physics_atv_visual_mapping/localmapping/bev/bev_localmapper.py b/src/physics_atv_visual_mapping/localmapping/bev/bev_localmapper.py
similarity index 100%
rename from physics_atv_visual_mapping/localmapping/bev/bev_localmapper.py
rename to src/physics_atv_visual_mapping/localmapping/bev/bev_localmapper.py
diff --git a/physics_atv_visual_mapping/localmapping/metadata.py b/src/physics_atv_visual_mapping/localmapping/metadata.py
similarity index 100%
rename from physics_atv_visual_mapping/localmapping/metadata.py
rename to src/physics_atv_visual_mapping/localmapping/metadata.py
diff --git a/physics_atv_visual_mapping/localmapping/voxel/__init__.py b/src/physics_atv_visual_mapping/localmapping/voxel/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/localmapping/voxel/__init__.py
rename to src/physics_atv_visual_mapping/localmapping/voxel/__init__.py
diff --git a/physics_atv_visual_mapping/localmapping/voxel/raytracing.py b/src/physics_atv_visual_mapping/localmapping/voxel/raytracing.py
similarity index 100%
rename from physics_atv_visual_mapping/localmapping/voxel/raytracing.py
rename to src/physics_atv_visual_mapping/localmapping/voxel/raytracing.py
diff --git a/physics_atv_visual_mapping/localmapping/voxel/voxel_localmapper.py b/src/physics_atv_visual_mapping/localmapping/voxel/voxel_localmapper.py
similarity index 100%
rename from physics_atv_visual_mapping/localmapping/voxel/voxel_localmapper.py
rename to src/physics_atv_visual_mapping/localmapping/voxel/voxel_localmapper.py
diff --git a/physics_atv_visual_mapping/pointcloud_colorization/__init__.py b/src/physics_atv_visual_mapping/pointcloud_colorization/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/pointcloud_colorization/__init__.py
rename to src/physics_atv_visual_mapping/pointcloud_colorization/__init__.py
diff --git a/physics_atv_visual_mapping/pointcloud_colorization/torch_color_pcl_utils.py b/src/physics_atv_visual_mapping/pointcloud_colorization/torch_color_pcl_utils.py
similarity index 99%
rename from physics_atv_visual_mapping/pointcloud_colorization/torch_color_pcl_utils.py
rename to src/physics_atv_visual_mapping/pointcloud_colorization/torch_color_pcl_utils.py
index a082c3b..dd1f9c4 100644
--- a/physics_atv_visual_mapping/pointcloud_colorization/torch_color_pcl_utils.py
+++ b/src/physics_atv_visual_mapping/pointcloud_colorization/torch_color_pcl_utils.py
@@ -1,12 +1,9 @@
import numpy as np
-import rclpy
import torch
# import matplotlib.pyplot as plt
import cv2
-from functools import reduce
import time
-import struct
import std_msgs.msg
from sensor_msgs.msg import PointCloud2, PointField, Image, CameraInfo
from std_msgs.msg import Header
diff --git a/physics_atv_visual_mapping/terrain_estimation/__init__.py b/src/physics_atv_visual_mapping/terrain_estimation/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/__init__.py
rename to src/physics_atv_visual_mapping/terrain_estimation/__init__.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/__init__.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/__init__.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/__init__.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/__init__.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/base.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/base.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/base.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/base.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/bev_feature_splat.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/bev_feature_splat.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/bev_feature_splat.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/bev_feature_splat.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/elevation_filter.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/elevation_filter.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/elevation_filter.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/elevation_filter.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/elevation_stats.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/elevation_stats.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/elevation_stats.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/elevation_stats.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/mrf_terrain_estimation.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/mrf_terrain_estimation.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/mrf_terrain_estimation.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/mrf_terrain_estimation.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/porosity.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/porosity.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/porosity.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/porosity.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/slope.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/slope.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/slope.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/slope.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_aware_bev_feature_splat.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_aware_bev_feature_splat.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_aware_bev_feature_splat.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_aware_bev_feature_splat.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_diff.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_diff.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_diff.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_diff.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_inflation.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_inflation.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_inflation.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/terrain_inflation.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/test.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/test.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/test.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/test.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/processing_blocks/utils.py b/src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/utils.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/processing_blocks/utils.py
rename to src/physics_atv_visual_mapping/terrain_estimation/processing_blocks/utils.py
diff --git a/physics_atv_visual_mapping/terrain_estimation/terrain_estimation_pipeline.py b/src/physics_atv_visual_mapping/terrain_estimation/terrain_estimation_pipeline.py
similarity index 100%
rename from physics_atv_visual_mapping/terrain_estimation/terrain_estimation_pipeline.py
rename to src/physics_atv_visual_mapping/terrain_estimation/terrain_estimation_pipeline.py
diff --git a/physics_atv_visual_mapping/utils.py b/src/physics_atv_visual_mapping/utils.py
similarity index 69%
rename from physics_atv_visual_mapping/utils.py
rename to src/physics_atv_visual_mapping/utils.py
index d86dfa4..9f9af4d 100644
--- a/physics_atv_visual_mapping/utils.py
+++ b/src/physics_atv_visual_mapping/utils.py
@@ -2,8 +2,8 @@
import numpy as np
from numpy import pi as PI
-np.float = np.float64 # hack for numpify
-import ros2_numpy
+np.float = np.float64 #hack for numpify
+import ros_numpy
from scipy.spatial.transform import Rotation
@@ -11,6 +11,35 @@
collection of common geometry operations on poses, points, etc
"""
+def pcl_msg_to_xyz(pcl_msg):
+ pcl_np = ros_numpy.numpify(pcl_msg)
+ xyz = np.stack([
+ pcl_np['x'],
+ pcl_np['y'],
+ pcl_np['z']
+ ], axis=-1)
+
+ return torch.from_numpy(xyz).float()
+
+def pcl_msg_to_xyzrgb(pcl_msg):
+ pcl_np = ros_numpy.numpify(pcl_msg)
+ xyz = np.stack([
+ pcl_np['x'],
+ pcl_np['y'],
+ pcl_np['z']
+ ], axis=-1)
+
+ colors_raw = pcl_np['rgb']
+ red = ((colors_raw & 0x00FF0000)>>16)
+ green = ((colors_raw & 0x0000FF00)>>8)
+ blue = ((colors_raw & 0x000000FF)>>0)
+ colors = np.stack([red, green, blue], axis=-1)/255.
+
+ return torch.from_numpy(np.concatenate([
+ xyz,
+ colors
+ ], axis=-1)).float()
+
DEG_2_RAD = PI/180.
RAD_2_DEG = 180./PI
@@ -44,7 +73,7 @@ def tf_msg_to_htm(tf_msg):
]
)
- R = Rotation.from_quat(q).as_matrix()
+ R = Rotation.from_quat(q).as_dcm()
htm = np.eye(4)
htm[:3, :3] = R
@@ -53,33 +82,11 @@ def tf_msg_to_htm(tf_msg):
return torch.from_numpy(htm).float()
-def pcl_msg_to_xyz(pcl_msg):
- pcl_np = ros2_numpy.numpify(pcl_msg)
- xyz = np.stack(
- [pcl_np["x"].flatten(), pcl_np["y"].flatten(), pcl_np["z"].flatten()], axis=-1
- )
-
- return torch.from_numpy(xyz).float()
-
-
-def pcl_msg_to_xyzrgb(pcl_msg):
- pcl_np = ros2_numpy.numpify(pcl_msg)
- xyz = np.stack([pcl_np["x"], pcl_np["y"], pcl_np["z"]], axis=-1)
-
- colors_raw = pcl_np["rgb"]
- red = (colors_raw & 0x00FF0000) >> 16
- green = (colors_raw & 0x0000FF00) >> 8
- blue = (colors_raw & 0x000000FF) >> 0
- colors = np.stack([red, green, blue], axis=-1) / 255.0
-
- return torch.from_numpy(np.concatenate([xyz, colors], axis=-1)).float()
-
-
def pose_to_htm(pose):
p = pose[:3]
q = pose[3:7]
- R = Rotation.from_quat(q).as_matrix()
+ R = Rotation.from_quat(q).as_dcm()
htm = np.eye(4)
htm[:3, :3] = R