diff --git a/angel_system/global_step_prediction/global_step_predictor.py b/angel_system/global_step_prediction/global_step_predictor.py index 9e1b11255..1ba1f95ba 100644 --- a/angel_system/global_step_prediction/global_step_predictor.py +++ b/angel_system/global_step_prediction/global_step_predictor.py @@ -936,7 +936,13 @@ def plot_gt_vs_predicted_plus_activations(self, step_gts, fname_suffix=None): plt.legend() if not fname_suffix: - fname_suffix = f"vid{vid_id}" + fname_suffix = "vid_unknown" + + # check for a long path in the fname_suffix + if "/" in fname_suffix: + # get just the end part of the path + fname_suffix = fname_suffix.split("/")[-1] + recipe_type = self.determine_recipe_from_gt_first_step(step_gts) title = f"plot_pred_vs_gt_{recipe_type}_{fname_suffix}.png" plt.title(title) diff --git a/ansible/roles/provision-files/vars/main.yml b/ansible/roles/provision-files/vars/main.yml index f14d133da..332aefeb6 100644 --- a/ansible/roles/provision-files/vars/main.yml +++ b/ansible/roles/provision-files/vars/main.yml @@ -71,15 +71,15 @@ girder_file_downloads: sha512: e6b382f5ba3d4d7f17d29caa3f6ea06dd5db24fee3bf59b12ff748d58b1b064c374741c805ee7c07a2a6950761a38e5398f3707ed3e83d7d53218972c57d12a2 dest: "{{ stage_dirs.object_detector }}/m2_det.pt" # Activity classifier - - file_id: 671bff2983e1acbfd87e723f - sha512: 5165219f60e3a160a8ec51f0446db4ec23ed70c399df66f482a233566e177d1f8cf044fc6ccc5227f5c748cbe6d2f17143109e791a139dfc2225d75de67829b2 + - file_id: 673f69bef4ba555b2ab8ae15 + sha512: 91d69908ef49b4a43dd344ae36a1c11eda6afb9b15dd748292435343087e08a3b1c22347efe592d7f078669d52a3dc496ca605ba4463997878151973e4885e77 dest: "{{ stage_dirs.activity_classifier }}/m2_tcn.ckpt" - - file_id: 672543fcca1110761bd32f7d - sha512: aef3ab3f8927f83b7ecad3cc28ea7b42e662d5c11740df1015542df6740db3633a7829187e9f5a35b4a66ebfeb48756c6598e4b7e6f68cc1ae90912fd71e31dd + - file_id: 673f69b3f4ba555b2ab8ae12 + sha512: 68bea2bd8748cc2ad55ca8784d45b9b0c9f21bd3cbca9d8983d158281adf15197d52a2582477ff1ac79ece0ce9187eb466056d407cb3e54894c3f9fd47dc5e41 dest: "{{ stage_dirs.activity_classifier }}/m2_config.yaml" # Global Step predictor model - - file_id: 6723f3d283e1acbfd87e7269 - sha512: fb676aae6f768498df860bbf4a1621ee8403cba96a1b9acc088f5f23671c57ca1f06f6c535fa8b0eb5074c7fed56e8ff5a7f990e2893f3759662c76137db1746 + - file_id: 673f74eff4ba555b2ab8ae3e + sha512: 437f5557ff3670c75118d7699964ece42d3480502a71888de62c53c6998b42a297fb7b21ea0bc6bfc4da59e64f9730b69d61d55b5f2c9dd73d93eeee682d60de dest: "{{ stage_dirs.task_monitor }}/global_step_predictor_act_avgs_m2.npy" # ---- M3 ---- @@ -122,15 +122,15 @@ girder_file_downloads: sha512: 7183385f8eaca85997725a107a76034de2bd4a59c1434b4bdb7c1ac8931cf4b68a53f6e736734643386364b9f0856de795a14965b6a02bc5eb5891252e6a73c9 dest: "{{ stage_dirs.object_detector }}/r18_det.pt" # Activity classifier - - file_id: 672cf299ca1110761bd32ffd - sha512: 0bfd4c5a1f254120758b9802eefef8933bfebf80837bd61445c3c26228edaefa34f0ed81178670538426983a93d60198f1be82a767e350b26d4fb3d081972b0d + - file_id: 673f6e62f4ba555b2ab8ae35 + sha512: e731dad0d0688ede4fc1b04b7c6c67478971ab97da062813ec04571daca07f413aa4775c385c3e8b28a8f5f50f8157e68547d45d033ab4bb0926935ad39e7f26 dest: "{{ stage_dirs.activity_classifier }}/r18_tcn.ckpt" - - file_id: 672cf437ca1110761bd33009 - sha512: d99c484071f22661425a9673ff351db90ce2fba40f2faa56f0b388deb2d0cdcc96931a2627e5aec45ab5a4624c9ab04da23b63364dd6eb1d5641a90701737006 + - file_id: 673f6e58f4ba555b2ab8ae32 + sha512: 09a4c97e79c34ac95c95a4c63370b928527a98b9bab233aa12e4b14cfa95e0bb5a09bad90d19715ab656d17281ba5fbaa719647a5545bf2447ae584da5bd8d6a dest: "{{ stage_dirs.activity_classifier }}/r18_config.yaml" # Global Step predictor model - - file_id: 66464bf9687336214e7cdeae - sha512: bc7884c258cb8704372dd69a3e7d999eaf70d716d271982077c7216ef27ab01219ef1e488f6f9669e11a7e8aa6ffb9d8e07d74edc47294f90cc9e6a723f5a822 + - file_id: 673f8cbcf4ba555b2ab8ae42 + sha512: 4198d861961886b05b85dd178004ea2ca54ce4625bd9ea37a18f000f1090e8646e5838c1577090b799662e0cae779558f857b6a568c115f79bbca6d021a446f7 dest: "{{ stage_dirs.task_monitor }}/global_step_predictor_act_avgs_R18.npy" # List of git repositories to check out at a specific ref and then archive. diff --git a/ros/angel_msgs/CMakeLists.txt b/ros/angel_msgs/CMakeLists.txt index 692667a4e..eb5c103d9 100644 --- a/ros/angel_msgs/CMakeLists.txt +++ b/ros/angel_msgs/CMakeLists.txt @@ -34,6 +34,7 @@ set( message_files msg/HandJointPosesUpdate.msg msg/HeadsetAudioData.msg msg/HeadsetPoseData.msg + msg/ImageMetadata.msg msg/InterpretedAudioUserIntent.msg msg/InterpretedAudioUserEmotion.msg msg/JointKeypoints.msg diff --git a/ros/angel_msgs/msg/ImageMetadata.msg b/ros/angel_msgs/msg/ImageMetadata.msg new file mode 100644 index 000000000..12e2a940d --- /dev/null +++ b/ros/angel_msgs/msg/ImageMetadata.msg @@ -0,0 +1,13 @@ +# +# An image metadata message +# + +# Header should represent when this message was published not the image. +std_msgs/Header header + +# Timestamp of when the image was created. +builtin_interfaces/Time image_source_stamp + +# pixel size of the image +uint32 height +uint32 width diff --git a/ros/angel_system_nodes/angel_system_nodes/activity_classification/activity_classifier_tcn.py b/ros/angel_system_nodes/angel_system_nodes/activity_classification/activity_classifier_tcn.py index 9ec7e9a22..d5d9795d2 100644 --- a/ros/angel_system_nodes/angel_system_nodes/activity_classification/activity_classifier_tcn.py +++ b/ros/angel_system_nodes/angel_system_nodes/activity_classification/activity_classifier_tcn.py @@ -29,7 +29,7 @@ FramePoses, ) from tcn_hpl.models.ptg_module import PTGLitModule -from sensor_msgs.msg import CameraInfo +from angel_msgs.msg import ImageMetadata from angel_system.activity_classification.tcn_hpl.predict import ( ResultsCollector, @@ -254,7 +254,7 @@ def __init__(self): # runtime-thread allocation. # This is intentionally before runtime-loop initialization. self._img_ts_subscriber = self.create_subscription( - CameraInfo, + ImageMetadata, self._img_md_topic, self.img_md_callback, 1, @@ -385,12 +385,13 @@ def _thread_populate_from_coco(self, input_coco_path: Path) -> None: log.info(f"Queuing from COCO: n_dets={n_dets}, image_ts={image_ts}") self.det_callback(det_msg) # self.pose_callback(det_msg) - new_msg = CameraInfo() - new_msg.header.stamp = image_ts + new_msg = ImageMetadata() + new_msg.image_source_stamp = image_ts # set the image width and height from the image in the dataset img = dset.imgs[image_id] new_msg.width = img.get("width") new_msg.height = img.get("height") + new_msg.header.stamp = self.get_clock().now().to_msg() self.img_md_callback(new_msg) # Wait until `image_ts` was considered in the runtime loop before @@ -406,7 +407,7 @@ def _thread_populate_from_coco(self, input_coco_path: Path) -> None: log.info("Completed COCO file object yielding") self._rt_active.clear() - def img_md_callback(self, msg: CameraInfo) -> None: + def img_md_callback(self, msg: ImageMetadata) -> None: """ Capture a detection source image timestamp message. """ diff --git a/ros/angel_system_nodes/angel_system_nodes/data_publishers/hl2ss_ros_bridge.py b/ros/angel_system_nodes/angel_system_nodes/data_publishers/hl2ss_ros_bridge.py index beebf75df..ff1d2d389 100644 --- a/ros/angel_system_nodes/angel_system_nodes/data_publishers/hl2ss_ros_bridge.py +++ b/ros/angel_system_nodes/angel_system_nodes/data_publishers/hl2ss_ros_bridge.py @@ -9,7 +9,7 @@ ) import numpy as np from rclpy.node import Node -from sensor_msgs.msg import Image, CameraInfo +from sensor_msgs.msg import Image from shape_msgs.msg import ( Mesh, MeshTriangle, @@ -21,6 +21,7 @@ HeadsetAudioData, HeadsetPoseData, SpatialMesh, + ImageMetadata, ) from angel_utils import declare_and_get_parameters, RateTracker from angel_utils import make_default_main @@ -122,7 +123,7 @@ def __init__(self): Image, self._image_topic, 1 ) self.ros_frame_md_publisher = self.create_publisher( - CameraInfo, self._image_md_topic, 1 + ImageMetadata, self._image_md_topic, 1 ) self.connect_hl2ss_pv() log.info("PV client connected!") @@ -377,10 +378,11 @@ def pv_publisher(self) -> None: # Publish the image msg self.ros_frame_publisher.publish(image_msg) - new_msg = CameraInfo() - new_msg.header = image_msg.header + new_msg = ImageMetadata() + new_msg.image_source_stamp = image_msg.header.stamp new_msg.height = image_msg.height new_msg.width = image_msg.width + new_msg.header.stamp = self.get_clock().now().to_msg() self.ros_frame_md_publisher.publish(new_msg) # Publish the corresponding headset pose msg diff --git a/ros/angel_system_nodes/angel_system_nodes/data_publishers/image_metadata_relay.py b/ros/angel_system_nodes/angel_system_nodes/data_publishers/image_metadata_relay.py index 72fcec7f3..b16972cf9 100644 --- a/ros/angel_system_nodes/angel_system_nodes/data_publishers/image_metadata_relay.py +++ b/ros/angel_system_nodes/angel_system_nodes/data_publishers/image_metadata_relay.py @@ -5,10 +5,11 @@ from rclpy.callback_groups import MutuallyExclusiveCallbackGroup from rclpy.node import Node -from sensor_msgs.msg import Image, CameraInfo +from sensor_msgs.msg import Image from angel_utils import declare_and_get_parameters, RateTracker from angel_utils import make_default_main +from angel_msgs.msg import ImageMetadata PARAM_INPUT_TOPIC = "image_topic" @@ -37,22 +38,24 @@ def __init__(self): callback_group=MutuallyExclusiveCallbackGroup(), ) self._pub = self.create_publisher( - CameraInfo, + ImageMetadata, param_values[PARAM_OUTPUT_TOPIC], 1, callback_group=MutuallyExclusiveCallbackGroup(), ) def input_callback(self, msg: Image) -> None: - new_msg = CameraInfo() - new_msg.header = msg.header + new_msg = ImageMetadata() + new_msg.image_source_stamp = msg.header.stamp new_msg.height = msg.height new_msg.width = msg.width + new_msg.header.stamp = self.get_clock().now().to_msg() self._pub.publish(new_msg) self._rate_tracker.tick() self.get_logger().info( - f"[input_callback] CameraInfo message with TS={msg.header.stamp}, " + f"[input_callback] ImageMetadata message with TS={new_msg.header.stamp}, " + f"source TS={new_msg.image_source_stamp}, " f"image size {msg.width}x{msg.height}, " f"rate={self._rate_tracker.get_rate_avg()} Hz", ) diff --git a/ros/angel_system_nodes/angel_system_nodes/latency_tracker.py b/ros/angel_system_nodes/angel_system_nodes/latency_tracker.py index 5c515f50c..04609cf7b 100644 --- a/ros/angel_system_nodes/angel_system_nodes/latency_tracker.py +++ b/ros/angel_system_nodes/angel_system_nodes/latency_tracker.py @@ -5,7 +5,8 @@ from rclpy.node import Node from angel_system.utils.event import WaitAndClearEvent -from angel_utils.conversion import time_to_float +from angel_utils.conversion import time_to_float, time_to_int +from builtin_interfaces.msg import Time from angel_msgs.msg import ObjectDetection2dSet, HandJointPosesUpdate, ActivityDetection from angel_utils import ( @@ -14,7 +15,7 @@ make_default_main, ) from std_msgs.msg import String as ros2_string -from sensor_msgs.msg import CameraInfo +from angel_msgs.msg import ImageMetadata class LatencyTracker(Node): @@ -59,23 +60,23 @@ def __init__(self): # Single slot for latest image message to process detection over. # This should be written by the image topic subscriber callback, and # read from the runtime loop once per iteration. - self._cur_image_msg_lock = Lock() self._cur_det_msg_lock = Lock() self._cur_pose_msg_lock = Lock() self._cur_act_msg_lock = Lock() + self._image_lookup_lock = Lock() self._rate_tracker = RateTracker() - self._img_time = None self._det = None self._pose = None self._act = None + self._image_lookup = {} ########################################## # Initialize ROS hooks log.info("Setting up ROS subscriptions and publishers...") self._img_ts_subscriber = self.create_subscription( - CameraInfo, + ImageMetadata, self._image_md_topic, self.img_md_callback, 1, @@ -151,31 +152,41 @@ def rt_loop(self): msg = ros2_string() - # get latency from image source stamp vs det_msg's + # get latency from image source stamp vs the time the message was created det_lat = None if self._det: with self._cur_det_msg_lock: det_msg = self._det dt_time = time_to_float(det_msg.header.stamp) - img_time = time_to_float(det_msg.source_stamp) - det_lat = dt_time - img_time + img_time = self.get_msg_time_from_source(det_msg.source_stamp) + if img_time is not None: + det_lat = dt_time - time_to_float(img_time) + pose_lat = None if self._pose: with self._cur_pose_msg_lock: pose_msg = self._pose ps_time = time_to_float(pose_msg.header.stamp) - img_time = time_to_float(pose_msg.source_stamp) - pose_lat = ps_time - img_time + img_time = self.get_msg_time_from_source(pose_msg.source_stamp) + if img_time is not None: + pose_lat = ps_time - time_to_float(img_time) + act_lat_start = None act_lat_end = None if self._act: with self._cur_act_msg_lock: act_msg = self._act act_time = time_to_float(act_msg.header.stamp) - img_time = time_to_float(act_msg.source_stamp_start_frame) - act_lat_start = act_time - img_time - img_time = time_to_float(act_msg.source_stamp_end_frame) - act_lat_end = act_time - img_time + img_time = self.get_msg_time_from_source( + act_msg.source_stamp_start_frame + ) + if img_time is not None: + act_lat_start = act_time - time_to_float(img_time) + img_time = self.get_msg_time_from_source( + act_msg.source_stamp_end_frame + ) + if img_time is not None: + act_lat_end = act_time - time_to_float(img_time) # save the info to the message data = { @@ -198,7 +209,7 @@ def rt_loop(self): self._rate_tracker.tick() log.info(f"Latency Rate: {self._rate_tracker.get_rate_avg()} Hz") - def img_md_callback(self, msg: CameraInfo) -> None: + def img_md_callback(self, msg: ImageMetadata) -> None: """ Capture a detection source image timestamp message. """ @@ -206,8 +217,8 @@ def img_md_callback(self, msg: CameraInfo) -> None: if self._enable_trace_logging: log.info(f"Received image with TS: {msg.header.stamp}") - with self._cur_image_msg_lock: - self._img_time = msg.header.stamp + with self._image_lookup_lock: + self._image_lookup[time_to_int(msg.image_source_stamp)] = msg.header.stamp self._rt_awake_evt.set() def det_callback(self, msg: ObjectDetection2dSet) -> None: @@ -222,6 +233,10 @@ def act_callback(self, msg: ActivityDetection) -> None: with self._cur_act_msg_lock: self._act = msg + def get_msg_time_from_source(self, source_stamp: Time) -> Time: + with self._image_lookup_lock: + return self._image_lookup.get(time_to_int(source_stamp)) + def destroy_node(self): print("Stopping runtime") self.rt_stop() diff --git a/ros/angel_utils/python/angel_utils/activity_classification.py b/ros/angel_utils/python/angel_utils/activity_classification.py index cc01e0c99..c7eecabad 100644 --- a/ros/angel_utils/python/angel_utils/activity_classification.py +++ b/ros/angel_utils/python/angel_utils/activity_classification.py @@ -15,11 +15,11 @@ # ROS Message types from builtin_interfaces.msg import Time -from sensor_msgs.msg import CameraInfo from angel_msgs.msg import ( HandJointPosesUpdate, ObjectDetection2dSet, + ImageMetadata, ) from angel_system.utils.matching import descending_match_with_tolerance @@ -44,7 +44,7 @@ class InputWindow: # Buffer for patient poses patient_joint_kps: List[Optional[HandJointPosesUpdate]] # Buffer for the image metadata - camera_info: List[Optional[CameraInfo]] + camera_info: List[Optional[ImageMetadata]] def __len__(self): return len(self.frames) @@ -147,7 +147,7 @@ class InputBuffer: ) # buffer for camera info - camera_info: Deque[CameraInfo] = field( + camera_info: Deque[ImageMetadata] = field( default_factory=deque, init=False, repr=False ) @@ -186,7 +186,7 @@ def latest_time(self) -> Time: def queue_image( self, img_mat: npt.NDArray[np.uint8], - camera_msg: CameraInfo, + img_meta_msg: ImageMetadata, image_frame_number: int, ) -> bool: """ @@ -195,7 +195,7 @@ def queue_image( not newer than the current latest frame. """ # get the timestamp from the image header - img_header_stamp = camera_msg.header.stamp + img_header_stamp = img_meta_msg.image_source_stamp # self.get_logger_fn().info(f"image header stamp: {img_header_stamp}") # self.get_logger_fn().info(f"self.frames[-1][0] header stamp: {self.frames[-1][0]}") with self.__state_lock: @@ -212,7 +212,7 @@ def queue_image( # save the image frame self.frames.append((img_header_stamp, img_mat, image_frame_number)) # save the camera info - self.camera_info.append(camera_msg) + self.camera_info.append(img_meta_msg) return True def queue_hand_pose(self, msg: HandJointPosesUpdate) -> bool: @@ -394,11 +394,7 @@ def get_window( frames=window_frames, obj_dets=window_dets, patient_joint_kps=window_joint_kps, - camera_info=[ - ci - for ci in self.camera_info - if ci.header.stamp in window_frame_times - ], + camera_info=[ci for ci in self.camera_info], ) return output diff --git a/tmux/demos/medical/Kitware-M2-bag.yml b/tmux/demos/medical/Kitware-M2-bag.yml new file mode 100644 index 000000000..d7de45bbc --- /dev/null +++ b/tmux/demos/medical/Kitware-M2-bag.yml @@ -0,0 +1,159 @@ +# +# System configuration to run the ANGEL system for the Kitware system with the +# Kitware HL2 ARUI app. +# +# This configuration is for the M2 tourniquet task. +# + +name: kitware-m2 +root: <%= ENV["ANGEL_WORKSPACE_DIR"] %> + +# Optional tmux socket +# socket_name: foo + +# Note that the pre and post options have been deprecated and will be replaced by +# project hooks. + +# Project hooks + +# Runs on project start, always +# on_project_start: command +on_project_start: | + export ROS_NAMESPACE=${ROS_NAMESPACE:-/kitware} + export HL2_IP=${HL2_IP:-192.168.4.2} + export CONFIG_DIR=${ANGEL_WORKSPACE_DIR}/config + export NODE_CONFIG_DIR=${ANGEL_WORKSPACE_DIR}/src/angel_system_nodes/configs + export MODEL_DIR=${ANGEL_WORKSPACE_DIR}/model_files + + # Changing the domain ID was important at KHQ to unblock perceived network + # congestion slowdowns to message sending. + export ROS_DOMAIN_ID=77 + + # Set the frame-rate to be used by multiple sources. This should be in frames + # per second (Hz). + export FRAME_RATE=15 + +# Run on project start, the first time +# on_project_first_start: command + +# Run on project start, after the first time +# on_project_restart: command + +# Run on project exit ( detaching from tmux session ) +# on_project_exit: command + +# Run on project stop +# on_project_stop: command + +# Runs in each window and pane before window/pane specific commands. Useful for setting up interpreter versions. +# pre_window: rbenv shell 2.0.0-p247 + +# Pass command line options to tmux. Useful for specifying a different tmux.conf. +# tmux_options: -f ~/.tmux.mac.conf +tmux_options: -f <%= ENV["ANGEL_WORKSPACE_DIR"] %>/tmux/tmux.conf + +windows: + - sensor_input_bag: + layout: even-vertical + panes: + # Read sensor input from bag file. + - sensor_input: echo ros2 bag play ${ANGEL_WORKSPACE_DIR}/ros_bags/... + - run_image_metadata: ros2 run angel_system_nodes image_metadata_relay --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_topic:=PVFramesBGR + -p output_topic:=PVFramesBGR_md + - run_latency_node: ros2 run angel_system_nodes latency_tracker --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_md_topic:=PVFramesBGR_md + -p det_topic:=ObjectDetections2d + -p pose_topic:=PatientPose + -p activity_topic:=activity_topic + -p latency_topic:=latency + - datahub: ros2 run ros_tcp_endpoint default_server_endpoint --ros-args + -r __ns:=${ROS_NAMESPACE} + -p ROS_IP:=0.0.0.0 + + - pose_estimation: ros2 run angel_system_nodes pose_estimator --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_topic:=PVFramesBGR + -p det_topic:=pose_dets + -p pose_topic:=PatientPose + -p det_net_checkpoint:=${MODEL_DIR}/pose_estimation/pose_det_model.pth + -p pose_net_checkpoint:=${MODEL_DIR}/pose_estimation/pose_model.pth + -p det_config:=${ANGEL_WORKSPACE_DIR}/python-tpl/TCN_HPL/tcn_hpl/data/utils/pose_generation/configs/medic_pose.yaml + -p pose_config:=${ANGEL_WORKSPACE_DIR}/python-tpl/TCN_HPL/tcn_hpl/data/utils/pose_generation/configs/ViTPose_base_medic_casualty_256x192.py + -p cuda_device_id:=0 + + - object_and_hand_detection: ros2 run angel_system_nodes object_and_hand_detector --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_topic:=PVFramesBGR + -p det_topic:=ObjectDetections2d + -p object_net_checkpoint:=${MODEL_DIR}/object_detector/m2_det.pt + -p inference_img_size:=768 + -p hand_net_checkpoint:=${MODEL_DIR}/object_detector/hands_model.pt + -p cuda_device_id:=0 + + - activity_classifier: ros2 run angel_system_nodes activity_classifier_tcn --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_md_topic:=PVFramesBGR_md + -p det_topic:=ObjectDetections2d + -p pose_topic:=PatientPose + -p activity_config_file:=${CONFIG_DIR}/activity_labels/medical/m2.yaml + -p model_weights:=${MODEL_DIR}/activity_classifier/m2_tcn.ckpt + -p model_config:=${MODEL_DIR}/activity_classifier/m2_config.yaml + -p act_topic:=activity_topic + -p pose_repeat_rate:=7.5 + -p window_leads_with_objects:=true + -p model_device:=0 + + - task_monitor: + layout: even-vertical + panes: + - gsp: ros2 run angel_system_nodes global_step_predictor --ros-args + -r __ns:=${ROS_NAMESPACE} + -p config_file:=${CONFIG_DIR}/tasks/medical/multi-task-config-medical-m2.yaml + -p activity_config_file:=${CONFIG_DIR}/activity_labels/medical/m2.yaml + -p task_state_topic:=TaskUpdates + -p task_error_topic:=ARUISystemNotifications + -p system_command_topic:=SystemCommands + -p det_topic:=activity_topic + -p model_file:=${MODEL_DIR}/task_monitor/global_step_predictor_act_avgs_m2.npy + -p thresh_frame_count:=3 + -p deactivate_thresh_frame_count:=10 + -p threshold_multiplier_weak:=0.00 + -p threshold_frame_count_weak:=3 + -p step_mode:=granular + -p query_task_graph_topic:=query_task_graph + - echo: sleep 0.5 && ros2 topic echo --no-arr "${ROS_NAMESPACE}/TaskUpdates" + + - engineering-ui: + layout: even-vertical + panes: + - simple_2d_overlay: ros2 run angel_utils Simple2dDetectionOverlay --ros-args + -r __ns:=${ROS_NAMESPACE} + -p topic_input_images:=PVFramesBGR + -p topic_input_det_2d:=ObjectDetections2d + -p topic_input_joints:=PatientPose + -p topic_output_images:=pv_image_detections_2d + -p filter_top_k:=5 + -p max_image_history_seconds:=2.0 + -p publish_latency_seconds:=0.15 + - websocket: ros2 launch rosbridge_server rosbridge_websocket_launch.xml port:=9090 + - engineering_ui_server: node ros/angel_utils/multi_task_demo_ui/index.js + --namespace=${ROS_NAMESPACE} + --image_topic=pv_image_detections_2d/compressed + --query_task_graph_topic=query_task_graph + --task_updates_topic=TaskUpdates + --activity_detections_topic=activity_topic + --task_errors_topic=ARUISystemNotifications + + - feedback_generator: ros2 run angel_system_nodes feedback_generator --ros-args + -r __ns:=${ROS_NAMESPACE} + -p activity_detector_topic:=activity_topic + -p object_detection_topic:=ObjectDetections3d + -p task_monitor_topic:=TaskUpdates + -p arui_update_topic:=AruiUpdates + -p interp_user_intent_topic:=UserIntentPredicted + -p system_text_response_topic:=SystemTextResponse + -p system_notification_topic:=ARUISystemNotifications + -p task_error_topic:=ARUISystemNotifications diff --git a/tmux/demos/medical/Kitware-M2.yml b/tmux/demos/medical/Kitware-M2.yml index ae4287073..68740d86b 100644 --- a/tmux/demos/medical/Kitware-M2.yml +++ b/tmux/demos/medical/Kitware-M2.yml @@ -5,7 +5,7 @@ # This configuration is for the M2 tourniquet task. # -name: Kitware-M2-Tourniquet +name: kitware-m2 root: <%= ENV["ANGEL_WORKSPACE_DIR"] %> # Optional tmux socket @@ -56,8 +56,23 @@ windows: - sensor_input_bag: layout: even-vertical panes: - # Read sensor input from bag file. - - sensor_input: echo ros2 bag play ${ANGEL_WORKSPACE_DIR}/ros_bags/... + # Read sensor input from the hololens + # the image size is set to 1280x720 because of the pose estimation node: + # https://github.com/PTG-Kitware/angel_system/blob/master/ros/angel_system_nodes/angel_system_nodes/pose_estimation/pose_estimator.py#L211 + - hl2ss_bridge: ros2 run angel_system_nodes hl2ss_ros_bridge --ros-args + -r __ns:=${ROS_NAMESPACE} + -p ip_addr:=${HL2_IP} + -p image_topic:=PVFramesBGR + -p image_md_topic:=PVFramesBGR_md + -p hand_pose_topic:=HandJointPoseData + -p audio_topic:=HeadsetAudioData + -p sm_topic:=SpatialMapData + -p head_pose_topic:=HeadsetPoseData + -p pv_width:=1280 + -p pv_height:=720 + -p pv_framerate:=15 + -p sm_freq:=5 + -p rm_depth_AHAT:=disable - run_image_metadata: ros2 run angel_system_nodes image_metadata_relay --ros-args -r __ns:=${ROS_NAMESPACE} -p image_topic:=PVFramesBGR @@ -69,29 +84,9 @@ windows: -p pose_topic:=PatientPose -p activity_topic:=activity_topic -p latency_topic:=latency - -# - sensor_input_stream: -# layout: even-vertical -# panes: -# # Read sensor input from a HoloLens2 unit using HL2SS. -# - hl2ss_bridge: ros2 run angel_system_nodes hl2ss_ros_bridge --ros-args -# -r __ns:=${ROS_NAMESPACE} -# -p ip_addr:=${HL2_IP} -# -p image_topic:=PVFramesBGR -# -p image_md_topic:=PVFramesBGR_md -# -p hand_pose_topic:=disable -# -p audio_topic:=disable -# -p sm_topic:=disable -# -p head_pose_topic:=disable -# -p pv_width:=1280 -# -p pv_height:=720 -# -p pv_framerate:=${FRAME_RATE} -# -p sm_freq:=5 -# -p rm_depth_AHAT:=disable -# # Enable sending ROS2 messages to receiving agents in an HoloLens2 app. -# - datahub: ros2 run ros_tcp_endpoint default_server_endpoint --ros-args -# -r __ns:=${ROS_NAMESPACE} -# -p ROS_IP:=0.0.0.0 + - datahub: ros2 run ros_tcp_endpoint default_server_endpoint --ros-args + -r __ns:=${ROS_NAMESPACE} + -p ROS_IP:=0.0.0.0 - pose_estimation: ros2 run angel_system_nodes pose_estimator --ros-args -r __ns:=${ROS_NAMESPACE} diff --git a/tmux/demos/medical/Kitware-R18-bag.yml b/tmux/demos/medical/Kitware-R18-bag.yml new file mode 100644 index 000000000..799198d0f --- /dev/null +++ b/tmux/demos/medical/Kitware-R18-bag.yml @@ -0,0 +1,159 @@ +# +# System configuration to run the ANGEL system for the Kitware system with the +# Kitware HL2 ARUI app. +# +# This configuration is for the R18 Chest Seal task. +# + +name: kitware-r18 +root: <%= ENV["ANGEL_WORKSPACE_DIR"] %> + +# Optional tmux socket +# socket_name: foo + +# Note that the pre and post options have been deprecated and will be replaced by +# project hooks. + +# Project hooks + +# Runs on project start, always +# on_project_start: command +on_project_start: | + export ROS_NAMESPACE=${ROS_NAMESPACE:-/kitware} + export HL2_IP=${HL2_IP:-192.168.4.2} + export CONFIG_DIR=${ANGEL_WORKSPACE_DIR}/config + export NODE_CONFIG_DIR=${ANGEL_WORKSPACE_DIR}/src/angel_system_nodes/configs + export MODEL_DIR=${ANGEL_WORKSPACE_DIR}/model_files + + # Changing the domain ID was important at KHQ to unblock perceived network + # congestion slowdowns to message sending. + export ROS_DOMAIN_ID=77 + + # Set the frame-rate to be used by multiple sources. This should be in frames + # per second (Hz). + export FRAME_RATE=15 + +# Run on project start, the first time +# on_project_first_start: command + +# Run on project start, after the first time +# on_project_restart: command + +# Run on project exit ( detaching from tmux session ) +# on_project_exit: command + +# Run on project stop +# on_project_stop: command + +# Runs in each window and pane before window/pane specific commands. Useful for setting up interpreter versions. +# pre_window: rbenv shell 2.0.0-p247 + +# Pass command line options to tmux. Useful for specifying a different tmux.conf. +# tmux_options: -f ~/.tmux.mac.conf +tmux_options: -f <%= ENV["ANGEL_WORKSPACE_DIR"] %>/tmux/tmux.conf + +windows: + - sensor_input_bag: + layout: even-vertical + panes: + # Read sensor input from bag file. + - sensor_input: echo ros2 bag play ${ANGEL_WORKSPACE_DIR}/ros_bags/rosbag2-bbn_lab_data-r18-positive-30424_NE_recordings-R18_5-TEST_SET/ + - run_image_metadata: ros2 run angel_system_nodes image_metadata_relay --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_topic:=PVFramesBGR + -p output_topic:=PVFramesBGR_md + - run_latency_node: ros2 run angel_system_nodes latency_tracker --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_md_topic:=PVFramesBGR_md + -p det_topic:=ObjectDetections2d + -p pose_topic:=PatientPose + -p activity_topic:=activity_topic + -p latency_topic:=latency + - datahub: ros2 run ros_tcp_endpoint default_server_endpoint --ros-args + -r __ns:=${ROS_NAMESPACE} + -p ROS_IP:=0.0.0.0 + + - object_and_hand_detection: ros2 run angel_system_nodes object_and_hand_detector --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_topic:=PVFramesBGR + -p det_topic:=ObjectDetections2d + -p object_net_checkpoint:=${MODEL_DIR}/object_detector/r18_det.pt + -p inference_img_size:=768 + -p hand_net_checkpoint:=${MODEL_DIR}/object_detector/hands_model.pt + -p cuda_device_id:=0 + + - pose_estimation: ros2 run angel_system_nodes pose_estimator --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_topic:=PVFramesBGR + -p det_topic:=pose_dets + -p pose_topic:=PatientPose + -p det_net_checkpoint:=${MODEL_DIR}/pose_estimation/pose_det_model.pth + -p pose_net_checkpoint:=${MODEL_DIR}/pose_estimation/pose_model.pth + -p det_config:=${ANGEL_WORKSPACE_DIR}/python-tpl/TCN_HPL/tcn_hpl/data/utils/pose_generation/configs/medic_pose.yaml + -p pose_config:=${ANGEL_WORKSPACE_DIR}/python-tpl/TCN_HPL/tcn_hpl/data/utils/pose_generation/configs/ViTPose_base_medic_casualty_256x192.py + -p cuda_device_id:=0 + + - activity_classifier: ros2 run angel_system_nodes activity_classifier_tcn --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_md_topic:=PVFramesBGR_md + -p det_topic:=ObjectDetections2d + -p pose_topic:=PatientPose + -p activity_config_file:=${CONFIG_DIR}/activity_labels/medical/r18.yaml + -p model_weights:=${MODEL_DIR}/activity_classifier/r18_tcn.ckpt + -p model_config:=${MODEL_DIR}/activity_classifier/r18_config.yaml + -p act_topic:=activity_topic + -p pose_repeat_rate:=7.5 + -p window_leads_with_objects:=true + -p model_device:=0 + + - task_monitor: + layout: even-vertical + panes: + - gsp: ros2 run angel_system_nodes global_step_predictor --ros-args + -r __ns:=${ROS_NAMESPACE} + -p config_file:=${CONFIG_DIR}/tasks/medical/multi-task-config-medical-r18.yaml + -p activity_config_file:=${CONFIG_DIR}/activity_labels/medical/r18.yaml + -p task_state_topic:=TaskUpdates + -p task_error_topic:=ARUISystemNotifications + -p system_command_topic:=SystemCommands + -p det_topic:=activity_topic + -p model_file:=${MODEL_DIR}/task_monitor/global_step_predictor_act_avgs_R18.npy + -p thresh_frame_count:=3 + -p deactivate_thresh_frame_count:=5 + -p threshold_multiplier_weak:=0.00 + -p threshold_frame_count_weak:=3 + -p step_mode:=granular + -p query_task_graph_topic:=query_task_graph + - echo: sleep 0.5 && ros2 topic echo --no-arr "${ROS_NAMESPACE}/TaskUpdates" + + - engineering-ui: + layout: even-vertical + panes: + - simple_2d_overlay: ros2 run angel_utils Simple2dDetectionOverlay --ros-args + -r __ns:=${ROS_NAMESPACE} + -p topic_input_images:=PVFramesBGR + -p topic_input_det_2d:=ObjectDetections2d + -p topic_input_joints:=PatientPose + -p topic_output_images:=pv_image_detections_2d + -p filter_top_k:=5 + -p max_image_history_seconds:=2.0 + -p publish_latency_seconds:=0.15 + - websocket: ros2 launch rosbridge_server rosbridge_websocket_launch.xml port:=9090 + - engineering_ui_server: node ros/angel_utils/multi_task_demo_ui/index.js + --namespace=${ROS_NAMESPACE} + --image_topic=pv_image_detections_2d/compressed + --query_task_graph_topic=query_task_graph + --task_updates_topic=TaskUpdates + --activity_detections_topic=activity_topic + --task_errors_topic=ARUISystemNotifications + + - feedback_generator: ros2 run angel_system_nodes feedback_generator --ros-args + -r __ns:=${ROS_NAMESPACE} + -p activity_detector_topic:=activity_topic + -p object_detection_topic:=ObjectDetections3d + -p task_monitor_topic:=TaskUpdates + -p arui_update_topic:=AruiUpdates + -p interp_user_intent_topic:=UserIntentPredicted + -p system_text_response_topic:=SystemTextResponse + -p system_notification_topic:=ARUISystemNotifications + -p task_error_topic:=ARUISystemNotifications diff --git a/tmux/demos/medical/Kitware-R18.yml b/tmux/demos/medical/Kitware-R18.yml index 881b16395..3e7e36d60 100644 --- a/tmux/demos/medical/Kitware-R18.yml +++ b/tmux/demos/medical/Kitware-R18.yml @@ -5,7 +5,7 @@ # This configuration is for the R18 Chest Seal task. # -name: kitware-R18-Chest-Seal +name: kitware-r18 root: <%= ENV["ANGEL_WORKSPACE_DIR"] %> # Optional tmux socket @@ -56,10 +56,23 @@ windows: - sensor_input_bag: layout: even-vertical panes: - # Read sensor input from bag file. - # May have to use: --remap /kitware/image_0:=/kitware/PVFramesBGR -# - sensor_input: echo ros2 bag play ${ANGEL_WORKSPACE_DIR}/ros_bags/rosbag2-bbn_lab_data-r18-positive-seal_videos-seal_2-TEST_SET/ - - sensor_input: echo ros2 bag play ${ANGEL_WORKSPACE_DIR}/ros_bags/rosbag2-bbn_lab_data-r18-positive-30424_NE_recordings-R18_5-TEST_SET/ + # Read sensor input from the hololens + # the image size is set to 1280x720 because of the pose estimation node: + # https://github.com/PTG-Kitware/angel_system/blob/master/ros/angel_system_nodes/angel_system_nodes/pose_estimation/pose_estimator.py#L211 + - hl2ss_bridge: ros2 run angel_system_nodes hl2ss_ros_bridge --ros-args + -r __ns:=${ROS_NAMESPACE} + -p ip_addr:=${HL2_IP} + -p image_topic:=PVFramesBGR + -p image_md_topic:=PVFramesBGR_md + -p hand_pose_topic:=HandJointPoseData + -p audio_topic:=HeadsetAudioData + -p sm_topic:=SpatialMapData + -p head_pose_topic:=HeadsetPoseData + -p pv_width:=1280 + -p pv_height:=720 + -p pv_framerate:=15 + -p sm_freq:=5 + -p rm_depth_AHAT:=disable - run_image_metadata: ros2 run angel_system_nodes image_metadata_relay --ros-args -r __ns:=${ROS_NAMESPACE} -p image_topic:=PVFramesBGR @@ -71,49 +84,9 @@ windows: -p pose_topic:=PatientPose -p activity_topic:=activity_topic -p latency_topic:=latency - -# - sensor_input_stream: -# layout: even-vertical -# panes: -# # Read sensor input from a HoloLens2 unit using HL2SS. -# - sensor_input: echo ros2 run angel_system_nodes hl2ss_ros_bridge --ros-args -# -r __ns:=${ROS_NAMESPACE} -# -p ip_addr:=${HL2_IP} -# -p image_topic:=PVFramesBGR -# -p image_md_topic:=PVFramesBGR_md -# -p hand_pose_topic:=disable -# -p audio_topic:=HeadsetAudioData -# -p sm_topic:=disable -# -p head_pose_topic:=disable -# -p pv_width:=1280 -# -p pv_height:=720 -# -p pv_framerate:=${FRAME_RATE} -# -p sm_freq:=5 -# -p rm_depth_AHAT:=disable -# # Enable sending ROS2 messages to receiving agents in an HoloLens2 app. -# - datahub: ros2 run ros_tcp_endpoint default_server_endpoint --ros-args -# -r __ns:=${ROS_NAMESPACE} -# -p ROS_IP:=0.0.0.0 -# - run_latency_node: ros2 run angel_system_nodes latency_tracker --ros-args -# -r __ns:=${ROS_NAMESPACE} -# -p image_md_topic:=PVFramesBGR_md -# -p det_topic:=ObjectDetections2d -# -p pose_topic:=PatientPose -# -p activity_topic:=activity_topic -# -p latency_topic:=latency - - - activity_classifier: ros2 run angel_system_nodes activity_classifier_tcn --ros-args - -r __ns:=${ROS_NAMESPACE} - -p image_md_topic:=PVFramesBGR_md - -p det_topic:=ObjectDetections2d - -p pose_topic:=PatientPose - -p activity_config_file:=${CONFIG_DIR}/activity_labels/medical/r18.yaml - -p model_weights:=${MODEL_DIR}/activity_classifier/r18_tcn.ckpt - -p model_config:=${MODEL_DIR}/activity_classifier/r18_config.yaml - -p act_topic:=activity_topic - -p pose_repeat_rate:=7.5 - -p window_leads_with_objects:=true - -p model_device:=0 + - datahub: ros2 run ros_tcp_endpoint default_server_endpoint --ros-args + -r __ns:=${ROS_NAMESPACE} + -p ROS_IP:=0.0.0.0 - object_and_hand_detection: ros2 run angel_system_nodes object_and_hand_detector --ros-args -r __ns:=${ROS_NAMESPACE} @@ -135,6 +108,19 @@ windows: -p pose_config:=${ANGEL_WORKSPACE_DIR}/python-tpl/TCN_HPL/tcn_hpl/data/utils/pose_generation/configs/ViTPose_base_medic_casualty_256x192.py -p cuda_device_id:=0 + - activity_classifier: ros2 run angel_system_nodes activity_classifier_tcn --ros-args + -r __ns:=${ROS_NAMESPACE} + -p image_md_topic:=PVFramesBGR_md + -p det_topic:=ObjectDetections2d + -p pose_topic:=PatientPose + -p activity_config_file:=${CONFIG_DIR}/activity_labels/medical/r18.yaml + -p model_weights:=${MODEL_DIR}/activity_classifier/r18_tcn.ckpt + -p model_config:=${MODEL_DIR}/activity_classifier/r18_config.yaml + -p act_topic:=activity_topic + -p pose_repeat_rate:=7.5 + -p window_leads_with_objects:=true + -p model_device:=0 + - task_monitor: layout: even-vertical panes: @@ -148,9 +134,9 @@ windows: -p det_topic:=activity_topic -p model_file:=${MODEL_DIR}/task_monitor/global_step_predictor_act_avgs_R18.npy -p thresh_frame_count:=3 - -p deactivate_thresh_frame_count:=5 - -p threshold_multiplier_weak:=0.00 - -p threshold_frame_count_weak:=3 + -p deactivate_thresh_frame_count:=8 + -p threshold_multiplier_weak:=1.0 + -p threshold_frame_count_weak:=8 -p step_mode:=granular -p query_task_graph_topic:=query_task_graph - echo: sleep 0.5 && ros2 topic echo --no-arr "${ROS_NAMESPACE}/TaskUpdates" diff --git a/tmux/record/record_ros_bag.yml b/tmux/record/record_ros_bag.yml index 9c0affb7b..dc66cbca0 100644 --- a/tmux/record/record_ros_bag.yml +++ b/tmux/record/record_ros_bag.yml @@ -58,9 +58,8 @@ windows: - sensor_input: layout: even-vertical panes: - - datahub: ros2 run ros_tcp_endpoint default_server_endpoint --ros-args - -r __ns:=${ROS_NAMESPACE} - -p ROS_IP:=0.0.0.0 + # the image size is set to 1280x720 because of the pose estimation node: + # https://github.com/PTG-Kitware/angel_system/blob/master/ros/angel_system_nodes/angel_system_nodes/pose_estimation/pose_estimator.py#L211 - hl2ss_bridge: ros2 run angel_system_nodes hl2ss_ros_bridge --ros-args -r __ns:=${ROS_NAMESPACE} -p ip_addr:=${HL2_IP}