diff --git a/example_scripts/record_watch_only.py b/example_scripts/watch_only_record.py similarity index 100% rename from example_scripts/record_watch_only.py rename to example_scripts/watch_only_record.py diff --git a/example_scripts/stream_watch_only.py b/example_scripts/watch_only_stream.py similarity index 100% rename from example_scripts/stream_watch_only.py rename to example_scripts/watch_only_stream.py diff --git a/example_scripts/watch_phone_pocket_stream.py b/example_scripts/watch_phone_pocket_stream.py new file mode 100644 index 0000000..78c8ed9 --- /dev/null +++ b/example_scripts/watch_phone_pocket_stream.py @@ -0,0 +1,69 @@ +import argparse +import atexit +import logging +import queue +import signal +import threading + +from wear_mocap_ape import config +from wear_mocap_ape.data_types import messaging +from wear_mocap_ape.stream.listener.imu import ImuListener +from wear_mocap_ape.stream.publisher.kalman_pocket_phone_udp import KalmanPhonePocket + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + # Instantiate the parser + parser = argparse.ArgumentParser(description='') + + # Required IP argument + parser.add_argument('ip', type=str, help=f'put your local IP here.') + parser.add_argument('smooth', nargs='?', type=int, default=5, help=f'smooth predicted trajectories') + parser.add_argument('stream_mc', nargs='?', type=bool, default=True, help=f'whether you want to stream the full pose ensemble') + args = parser.parse_args() + + ip_arg = args.ip + smooth_arg = args.smooth + stream_mc_arg = args.stream_mc + + # data for left-hand mode + left_q = queue.Queue() + + # listen for imu data from phone and watch + imu_l = ImuListener( + ip=ip_arg, + msg_size=messaging.watch_phone_imu_msg_len, + port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT, + tag="LISTEN IMU LEFT" + ) + l_thread = threading.Thread( + target=imu_l.listen, + args=(left_q,) + ) + + # process into arm pose and body orientation + kpp = KalmanPhonePocket(ip=ip_arg, + smooth=smooth_arg, + num_ensemble=48, + port=config.PORT_PUB_LEFT_ARM, + window_size=10, + stream_mc=stream_mc_arg, + model_name="SW-model-sept-4") + p_thread = threading.Thread( + target=kpp.stream_wearable_devices, + args=(left_q, True,) + ) + + l_thread.start() + p_thread.start() + + + def terminate_all(*args): + imu_l.terminate() + kpp.terminate() + + + # make sure all handler exit on termination + atexit.register(terminate_all) + signal.signal(signal.SIGTERM, terminate_all) + signal.signal(signal.SIGINT, terminate_all) + diff --git a/example_scripts/record_watch_phone.py b/example_scripts/watch_phone_uarm_record.py similarity index 100% rename from example_scripts/record_watch_phone.py rename to example_scripts/watch_phone_uarm_record.py diff --git a/example_scripts/stream_watch_phone.py b/example_scripts/watch_phone_uarm_stream.py similarity index 99% rename from example_scripts/stream_watch_phone.py rename to example_scripts/watch_phone_uarm_stream.py index 906b015..7f2d076 100644 --- a/example_scripts/stream_watch_phone.py +++ b/example_scripts/watch_phone_uarm_stream.py @@ -91,6 +91,7 @@ def terminate_all(*args): imu_r.terminate() wp2ul.terminate() + # make sure all handler exit on termination atexit.register(terminate_all) signal.signal(signal.SIGTERM, terminate_all) diff --git a/experimental_scripts/voice_transcription.py b/experimental_scripts/voice_transcribe.py similarity index 100% rename from experimental_scripts/voice_transcription.py rename to experimental_scripts/voice_transcribe.py diff --git a/setup.cfg b/setup.cfg index d169270..5917482 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = wear_mocap_ape -version = 1.0.2 +version = 1.1.0 author = Fabian Weigend author_email = fweigend@asu.edu description = @@ -25,6 +25,8 @@ install_requires = pandas matplotlib pynput + einops + bayesian_torch packages = find: include_package_data = True diff --git a/src/wear_mocap_ape/config.py b/src/wear_mocap_ape/config.py index 7a3b86d..17af206 100644 --- a/src/wear_mocap_ape/config.py +++ b/src/wear_mocap_ape/config.py @@ -1,10 +1,10 @@ -import os +from pathlib import Path -proj_path = os.path.dirname(os.path.abspath(__file__)) +proj_path = Path(__file__).parent.absolute() PATHS = { - "deploy": f"{proj_path}/data_deploy/", - "skeleton": f"{proj_path}/data_deploy/" + "deploy": proj_path / "data_deploy", + "skeleton": proj_path / "data_deploy" } # ports for publishing to other services diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_KALMAN_STATE.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_KALMAN_STATE.pkl new file mode 100644 index 0000000..a135e39 Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_KALMAN_STATE.pkl differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_POS_RH_LARM_HAND.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_POS_RH_LARM_HAND.pkl new file mode 100644 index 0000000..a5063f0 Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_POS_RH_LARM_HAND.pkl differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_KALMAN_PHONE_POCKET.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_KALMAN_PHONE_POCKET.pkl new file mode 100644 index 0000000..372f79f Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_KALMAN_PHONE_POCKET.pkl differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl new file mode 100644 index 0000000..6247b29 Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CAL_LARM_UARM_HIPS.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CAL_LARM_UARM_HIPS.pkl new file mode 100644 index 0000000..1c35a1a Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CAL_LARM_UARM_HIPS.pkl differ diff --git a/src/wear_mocap_ape/data_deploy/kalman/SW-model-sept-4 b/src/wear_mocap_ape/data_deploy/kalman/SW-model-sept-4 new file mode 100644 index 0000000..67db508 Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/kalman/SW-model-sept-4 differ diff --git a/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/checkpoint.pt b/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/checkpoint.pt new file mode 100644 index 0000000..87d2541 Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/checkpoint.pt differ diff --git a/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/results.json b/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/results.json new file mode 100644 index 0000000..8a39d97 --- /dev/null +++ b/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/results.json @@ -0,0 +1,60 @@ +{ + "model": "ImuPoseLSTM", + "hidden_layer_count": 2, + "hidden_layer_size": 128, + "epochs": 200, + "batch_size": 64, + "learning_rate": 0.001, + "dropout": 0.4, + "sequence_len": 5, + "normalize": true, + "seq_overlap": true, + "create_norm_stats": true, + "hash": "04d2d059ee8980e13e5e16fe048b6bd0f8265c9a", + "y_targets_n": "ORI_CAL_LARM_UARM_HIPS", + "x_inputs_n": "WATCH_PH_HIP", + "y_targets_v": [ + "gt_larm_6drr_cal_1", + "gt_larm_6drr_cal_2", + "gt_larm_6drr_cal_3", + "gt_larm_6drr_cal_4", + "gt_larm_6drr_cal_5", + "gt_larm_6drr_cal_6", + "gt_uarm_6drr_cal_1", + "gt_uarm_6drr_cal_2", + "gt_uarm_6drr_cal_3", + "gt_uarm_6drr_cal_4", + "gt_uarm_6drr_cal_5", + "gt_uarm_6drr_cal_6", + "gt_hips_yrot_cal_sin", + "gt_hips_yrot_cal_cos" + ], + "x_inputs_v": [ + "sw_dt", + "sw_gyro_x", + "sw_gyro_y", + "sw_gyro_z", + "sw_lvel_x", + "sw_lvel_y", + "sw_lvel_z", + "sw_lacc_x", + "sw_lacc_y", + "sw_lacc_z", + "sw_grav_x", + "sw_grav_y", + "sw_grav_z", + "sw_6drr_cal_1", + "sw_6drr_cal_2", + "sw_6drr_cal_3", + "sw_6drr_cal_4", + "sw_6drr_cal_5", + "sw_6drr_cal_6", + "sw_pres_cal", + "ph_hips_yrot_cal_sin", + "ph_hips_yrot_cal_cos" + ], + "Loss/train": 0.13687036271612407, + "Loss/test": 0.28687360928033256, + "Loss/b_train": 0.1611349323569118, + "Loss/b_test": 0.2804641976381203 +} \ No newline at end of file diff --git a/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/checkpoint.pt b/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/checkpoint.pt new file mode 100644 index 0000000..f7122d2 Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/checkpoint.pt differ diff --git a/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/results.json b/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/results.json new file mode 100644 index 0000000..1e50e4d --- /dev/null +++ b/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/results.json @@ -0,0 +1,50 @@ +{ + "model": "DropoutFF", + "hidden_layer_count": 4, + "hidden_layer_size": 128, + "epochs": 200, + "batch_size": 64, + "learning_rate": 0.0001, + "dropout": 0.2, + "sequence_len": 5, + "normalize": true, + "seq_overlap": true, + "create_norm_stats": true, + "hash": "2b4e48b366d717b035751c40f977d9ae6c26d6b2", + "y_targets_n": "POS_RH_LARM_HAND", + "x_inputs_n": "WATCH_ONLY", + "y_targets_v": [ + "gt_hand_orig_rua_x", + "gt_hand_orig_rua_y", + "gt_hand_orig_rua_z", + "gt_larm_orig_rua_x", + "gt_larm_orig_rua_y", + "gt_larm_orig_rua_z" + ], + "x_inputs_v": [ + "sw_dt", + "sw_gyro_x", + "sw_gyro_y", + "sw_gyro_z", + "sw_lvel_x", + "sw_lvel_y", + "sw_lvel_z", + "sw_lacc_x", + "sw_lacc_y", + "sw_lacc_z", + "sw_grav_x", + "sw_grav_y", + "sw_grav_z", + "sw_6drr_cal_1", + "sw_6drr_cal_2", + "sw_6drr_cal_3", + "sw_6drr_cal_4", + "sw_6drr_cal_5", + "sw_6drr_cal_6", + "sw_pres_cal" + ], + "Loss/train": 0.24004665516297385, + "Loss/test": 0.377081323361239, + "Loss/b_train": 0.2682442721176126, + "Loss/b_test": 0.3734141369143522 +} \ No newline at end of file diff --git a/src/wear_mocap_ape/data_deploy/nn/2c700a1ca1af084eedbae5bdd86a5194e42ded4d/__init__.py b/src/wear_mocap_ape/data_deploy/nn/2c700a1ca1af084eedbae5bdd86a5194e42ded4d/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/src/wear_mocap_ape/data_deploy/nn/deploy_models.py b/src/wear_mocap_ape/data_deploy/nn/deploy_models.py index 25fd9f9..9196a34 100644 --- a/src/wear_mocap_ape/data_deploy/nn/deploy_models.py +++ b/src/wear_mocap_ape/data_deploy/nn/deploy_models.py @@ -1,5 +1,11 @@ from enum import Enum +class FF(Enum): + WATCH_ONLY = "2b4e48b366d717b035751c40f977d9ae6c26d6b2" + + class LSTM(Enum): WATCH_ONLY = "2c700a1ca1af084eedbae5bdd86a5194e42ded4d" + IMU_EXMP = "04d2d059ee8980e13e5e16fe048b6bd0f8265c9a" + WATCH_PHONE_POCKET = "ec70862ec17828320ca64a738b2a90cf5dcadced" diff --git a/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/checkpoint.pt b/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/checkpoint.pt new file mode 100644 index 0000000..bb5cd75 Binary files /dev/null and b/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/checkpoint.pt differ diff --git a/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/results.json b/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/results.json new file mode 100644 index 0000000..030e46e --- /dev/null +++ b/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/results.json @@ -0,0 +1,60 @@ +{ + "model": "DropoutLSTM", + "hidden_layer_count": 2, + "hidden_layer_size": 128, + "epochs": 200, + "batch_size": 64, + "learning_rate": 0.001, + "dropout": 0.4, + "sequence_len": 5, + "normalize": true, + "seq_overlap": true, + "create_norm_stats": true, + "hash": "ec70862ec17828320ca64a738b2a90cf5dcadced", + "y_targets_n": "ORI_CAL_LARM_UARM_HIPS", + "x_inputs_n": "WATCH_PH_HIP", + "y_targets_v": [ + "gt_larm_6drr_cal_1", + "gt_larm_6drr_cal_2", + "gt_larm_6drr_cal_3", + "gt_larm_6drr_cal_4", + "gt_larm_6drr_cal_5", + "gt_larm_6drr_cal_6", + "gt_uarm_6drr_cal_1", + "gt_uarm_6drr_cal_2", + "gt_uarm_6drr_cal_3", + "gt_uarm_6drr_cal_4", + "gt_uarm_6drr_cal_5", + "gt_uarm_6drr_cal_6", + "gt_hips_yrot_cal_sin", + "gt_hips_yrot_cal_cos" + ], + "x_inputs_v": [ + "sw_dt", + "sw_gyro_x", + "sw_gyro_y", + "sw_gyro_z", + "sw_lvel_x", + "sw_lvel_y", + "sw_lvel_z", + "sw_lacc_x", + "sw_lacc_y", + "sw_lacc_z", + "sw_grav_x", + "sw_grav_y", + "sw_grav_z", + "sw_6drr_cal_1", + "sw_6drr_cal_2", + "sw_6drr_cal_3", + "sw_6drr_cal_4", + "sw_6drr_cal_5", + "sw_6drr_cal_6", + "sw_pres_cal", + "ph_hips_yrot_cal_sin", + "ph_hips_yrot_cal_cos" + ], + "Loss/train": 0.19960083461272307, + "Loss/test": 0.2753936388019971, + "Loss/b_train": 0.19960083461272307, + "Loss/b_test": 0.2753936388019971 +} diff --git a/src/wear_mocap_ape/data_types/bone_map.py b/src/wear_mocap_ape/data_types/bone_map.py index 893e460..c581766 100644 --- a/src/wear_mocap_ape/data_types/bone_map.py +++ b/src/wear_mocap_ape/data_types/bone_map.py @@ -39,7 +39,7 @@ class BoneMap: """ # available without initialization - DEFAULT_LARM_LEN = 0.22 + DEFAULT_LARM_LEN = 0.22 # 0.22 DEFAULT_UARM_LEN = 0.30 # default left shoulder origin relative to hip DEFAULT_L_SHOU_ORIG_RH = np.array([-0.1704612, 0.4309841, -0.00670862]) @@ -143,4 +143,3 @@ def left_upper_arm_origin_g(self): @property def hip_origin_g(self): return self.__bonemap[1].default_pos # hips - diff --git a/src/wear_mocap_ape/data_types/messaging.py b/src/wear_mocap_ape/data_types/messaging.py index 112c38c..583bd36 100644 --- a/src/wear_mocap_ape/data_types/messaging.py +++ b/src/wear_mocap_ape/data_types/messaging.py @@ -56,14 +56,14 @@ "sw_grav_z": 22, # calibration measurements - # pressure at chest height - "sw_init_pres": 23, - # forward facing direction - "sw_forward_w": 24, - "sw_forward_x": 25, - "sw_forward_y": 26, - "sw_forward_z": 27 + "sw_forward_w": 23, + "sw_forward_x": 24, + "sw_forward_y": 25, + "sw_forward_z": 26, + + # pressure at chest height + "sw_init_pres": 27 } watch_only_imu_msg_len = len(WATCH_ONLY_IMU_LOOKUP) * 4 diff --git a/src/wear_mocap_ape/data_types/voice_commands.py b/src/wear_mocap_ape/data_types/voice_commands.py index f59459e..7fdd431 100644 --- a/src/wear_mocap_ape/data_types/voice_commands.py +++ b/src/wear_mocap_ape/data_types/voice_commands.py @@ -5,16 +5,24 @@ """ KEY_PHRASES = { # dual manipulation task - "blue sky": 0, # left - "red apple": 1, # right - - # ROS experiment commands + # "blue": 0, # left + # "red": 1, # right + # + # # ROS experiment commands "stop": 3, # stop current task - "move": 4, # carry on with original task - "continue": 4, # carry on with original task + # "move": 4, # carry on with original task + # "continue": 4, # same as "move" "follow": 5, # move EE to hand "open": 6, # open the gripper + "close": 7, + + # "handover": 6, # same as "open" + # "over": 6, + # "give": 6, + # "cube": 6, + "home": 10, + "record": 11 - # simple test - "test": 99 + # # simple test + # "test": 99 } diff --git a/src/wear_mocap_ape/estimate/estimate_joints.py b/src/wear_mocap_ape/estimate/estimate_joints.py index a7442e6..640d76a 100644 --- a/src/wear_mocap_ape/estimate/estimate_joints.py +++ b/src/wear_mocap_ape/estimate/estimate_joints.py @@ -5,8 +5,11 @@ FUNCTION_LOOKUP = { NNS_TARGETS.ORI_CALIB_UARM_LARM: lambda a, b: uarm_larm_6drr_to_origins(a, b), - NNS_TARGETS.CONST_XYZ: lambda a, b: hand_larm_xyz_to_origins(a, b), - NNS_TARGETS.ORI_CALIB_UARM_LARM_HIPS: lambda a, b: uarm_larm_hip_6dof_rh_to_origins_g(a, b) + NNS_TARGETS.POS_RH_LARM_HAND: lambda a, b: hand_larm_xyz_to_origins(a, b), + NNS_TARGETS.ORI_RH_UARM_LARM_HIPS: lambda a, b: uarm_larm_hip_6dof_rh_to_origins_g(a, b), + NNS_TARGETS.POS_RH_LARM_HAND_HIPS: lambda a, b: larm_hand_hip_pos_rua_to_origins_g(a, b), + # NNS_TARGETS.KALMAN_PHONE_POCKET: lambda a, b: larm_uarm_hip_6dof_rh_to_origins_g(a, b), + NNS_TARGETS.ORI_CAL_LARM_UARM_HIPS: lambda a, b: larm_uarm_hip_6dof_cal_to_origins_cal(a, b) } @@ -14,6 +17,99 @@ def arm_pose_from_nn_targets(preds: np.array, body_measurements: np.array, y_tar return FUNCTION_LOOKUP[y_targets](preds, body_measurements) +def larm_hand_hip_pos_rua_to_origins_g(preds: np.array, body_measure: np.array): + """ + :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] + :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] + :return: [hand_orig, larm_orig, uarm_orig, larm_quat_g, uarm_quat_g, hips_quat_g] + """ + # split combined pred rows back into separate arrays + p_hand_orig_rua, p_larm_orig_rua, hips_sin, hips_cos = preds[:, :3], preds[:, 3:6], preds[:, 6], preds[:, 7] + uarm_vecs, larm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] + + # transform to quats + larm_quat_rh = ts.quat_a_to_b(larm_vecs, p_hand_orig_rua - p_larm_orig_rua) + uarm_quat_rh = ts.quat_a_to_b(uarm_vecs, p_larm_orig_rua) + hips_quat_g = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) + + uarm_quat_g = ts.hamilton_product(hips_quat_g, uarm_quat_rh) + larm_quat_g = ts.hamilton_product(hips_quat_g, larm_quat_rh) + + p_uarm_orig_g = ts.quat_rotate_vector(hips_quat_g, uarm_orig_rh) # relative to hips + + # transform to global positions + p_larm_orig_g = p_uarm_orig_g + p_larm_orig_rua + p_hand_orig_g = p_uarm_orig_g + p_hand_orig_rua + return np.hstack([ + p_hand_orig_g, + p_larm_orig_g, + p_uarm_orig_g, + larm_quat_g, + uarm_quat_g, + hips_quat_g + ]) + + +def larm_uarm_hip_6dof_rh_to_origins_g(preds: np.array, body_measure: np.array): + """ + :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] + :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] + :return: [hand_orig, larm_orig, uarm_orig, larm_quat_g, uarm_quat_g, hips_quat_g] + """ + # split combined pred rows back into separate arrays + larm_6drr, uarm_6drr, hips_sin, hips_cos = preds[:, :6], preds[:, 6:12], preds[:, 12], preds[:, 13] + larm_vecs, uarm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] + + # transform to quats + uarm_quat_rh = ts.six_drr_1x6_to_quat(uarm_6drr) + larm_quat_rh = ts.six_drr_1x6_to_quat(larm_6drr) + hips_quat_g = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) + + uarm_quat_g = ts.hamilton_product(hips_quat_g, uarm_quat_rh) + larm_quat_g = ts.hamilton_product(hips_quat_g, larm_quat_rh) + + # estimate origins in respective reference frame + p_uarm_orig_g = ts.quat_rotate_vector(hips_quat_g, uarm_orig_rh) # relative to hips + p_larm_orig_g = ts.quat_rotate_vector(uarm_quat_g, uarm_vecs) + p_uarm_orig_g + p_hand_orig_g = ts.quat_rotate_vector(larm_quat_g, larm_vecs) + p_larm_orig_g + + # transform to global positions + return np.hstack([ + p_hand_orig_g, + p_larm_orig_g, + p_uarm_orig_g, + larm_quat_g, + uarm_quat_g, + hips_quat_g + ]) + + +def larm_uarm_hip_6dof_cal_to_origins_cal(preds: np.array, body_measure: np.array): + # split combined pred rows back into separate arrays + larm_6drr, uarm_6drr, hips_sin, hips_cos = preds[:, :6], preds[:, 6:12], preds[:, 12], preds[:, 13] + larm_vecs, uarm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] + + # transform to quats + uarm_quat_cal = ts.six_drr_1x6_to_quat(uarm_6drr) + larm_quat_cal = ts.six_drr_1x6_to_quat(larm_6drr) + hips_quat_cal = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) + + # estimate origins in respective reference frame + p_uarm_orig_cal = ts.quat_rotate_vector(hips_quat_cal, uarm_orig_rh) + p_larm_orig_cal = ts.quat_rotate_vector(uarm_quat_cal, uarm_vecs) + p_uarm_orig_cal + p_hand_orig_cal = ts.quat_rotate_vector(larm_quat_cal, larm_vecs) + p_larm_orig_cal + + # transform to global positions + return np.hstack([ + p_hand_orig_cal, + p_larm_orig_cal, + p_uarm_orig_cal, + larm_quat_cal, + uarm_quat_cal, + hips_quat_cal + ]) + + def uarm_larm_hip_6dof_rh_to_origins_g(preds: np.array, body_measure: np.array): """ :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] @@ -55,7 +151,7 @@ def uarm_larm_6drr_to_origins(preds: np.array, body_measure: np.array): takes predictions from a model that has 6dof for lower and upper arm as well as upper arm radius as its outputs :param preds: [uarm_6drr, larm_6drr] - :param body_measure: [uarm_vec, larm_vec] + :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] :return: [hand_orig, larm_orig, larm_quat_rh, uarm_quat_rh] """ @@ -65,7 +161,40 @@ def uarm_larm_6drr_to_origins(preds: np.array, body_measure: np.array): # get the default arm vectors from the row-by-row body measurements data # lengths may vary if data is shuffled and from different participants uarm_vecs = body_measure[:, :3] - larm_vecs = body_measure[:, 3:] + larm_vecs = body_measure[:, 3:6] + uarm_orig = body_measure[:, 6:] + + # transform 6dof rotation representations back into quaternions + uarm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(uarm_6drr) + p_uarm_quat_rh = ts.rot_mat_1x9_to_quat(uarm_rot_mat) + + larm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(larm_6drr) + p_larm_quat_rh = ts.rot_mat_1x9_to_quat(larm_rot_mat) + + # get the transition from upper arm origin to lower arm origin + p_larm_orig_rh = ts.quat_rotate_vector(p_uarm_quat_rh, uarm_vecs) + uarm_orig + + # get transitions from lower arm origin to hand + # RE stands for relative to elbow (lower arm origin) + rotated_lower_arms_re = ts.quat_rotate_vector(p_larm_quat_rh, larm_vecs) + p_hand_orig_rh = rotated_lower_arms_re + p_larm_orig_rh + + return np.hstack([ + p_hand_orig_rh, + p_larm_orig_rh, + p_larm_quat_rh, + p_uarm_quat_rh + ]) + + +def larm_uarm_6drr_to_origins(preds: np.array, body_measure: np.array): + # split combined pred rows back into separate arrays + larm_6drr, uarm_6drr = preds[:, :6], preds[:, 6:] + + # get the default arm vectors from the row-by-row body measurements data + # lengths may vary if data is shuffled and from different participants + larm_vecs = body_measure[:, :3] + uarm_vecs = body_measure[:, 3:] # transform 6dof rotation representations back into quaternions uarm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(uarm_6drr) diff --git a/src/wear_mocap_ape/estimate/kalman_models.py b/src/wear_mocap_ape/estimate/kalman_models.py new file mode 100644 index 0000000..589ebc8 --- /dev/null +++ b/src/wear_mocap_ape/estimate/kalman_models.py @@ -0,0 +1,215 @@ +from bayesian_torch.layers.flipout_layers.linear_flipout import LinearFlipout +from torch.distributions.multivariate_normal import MultivariateNormal +from einops import rearrange, repeat +import torch +import numpy as np +import torch.nn as nn +import torch.nn.functional as F + +class utils: + def __init__(self, num_ensemble, dim_x, dim_z): + self.num_ensemble = num_ensemble + self.dim_x = dim_x + self.dim_z = dim_z + + def multivariate_normal_sampler(self, mean, cov, k): + sampler = MultivariateNormal(mean, cov) + return sampler.sample((k,)) + + def format_state(self, state): + state = repeat(state, "k dim -> n k dim", n=self.num_ensemble) + state = rearrange(state, "n k dim -> (n k) dim") + cov = torch.eye(self.dim_x) * 0.1 + init_dist = self.multivariate_normal_sampler( + torch.zeros(self.dim_x), cov, self.num_ensemble + ) + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + init_dist = init_dist.to(device) + state = state + init_dist + state = state.to(dtype=torch.float32) + return state + + + +class Seq_MLP_process_model(nn.Module): + def __init__(self, num_ensemble, dim_x, win_size, dim_model, num_heads): + super(Seq_MLP_process_model, self).__init__() + self.num_ensemble = num_ensemble + self.dim_x = dim_x + self.dim_model = dim_model + self.num_heads = num_heads + self.win_size = win_size + + self.bayes1 = LinearFlipout(in_features=self.dim_x * win_size, out_features=256) + self.bayes3 = LinearFlipout(in_features=256, out_features=512) + self.bayes_m2 = torch.nn.Linear(512, self.dim_x) + + def forward(self, input): + batch_size = input.shape[0] + input = rearrange(input, "n en k dim -> (n en) (k dim)") + # branch of the state + x, _ = self.bayes1(input) + x = F.leaky_relu(x) + x, _ = self.bayes3(x) + x = F.leaky_relu(x) + x = self.bayes_m2(x) + output = rearrange(x, "(bs en) dim -> bs en dim", en=self.num_ensemble) + return output + + +class NewObservationNoise(nn.Module): + def __init__(self, dim_z, r_diag): + """ + observation noise model is used to learn the observation noise covariance matrix + R from the learned observation, kalman filter require a explicit matrix for R + therefore we construct the diag of R to model the noise here + input -> [batch_size, 1, encoding/dim_z] + output -> [batch_size, dim_z, dim_z] + """ + super(NewObservationNoise, self).__init__() + self.dim_z = dim_z + self.r_diag = r_diag + + self.fc1 = nn.Linear(self.dim_z, 32) + self.fc2 = nn.Linear(32, self.dim_z) + + def forward(self, inputs): + device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + batch_size = inputs.shape[0] + constant = np.ones(self.dim_z) * 1e-3 + init = np.sqrt(np.square(self.r_diag) - constant) + diag = self.fc1(inputs) + diag = F.relu(diag) + diag = self.fc2(diag) + diag = torch.square(diag + torch.Tensor(constant).to(device)) + torch.Tensor( + init + ).to(device) + diag = rearrange(diag, "bs k dim -> (bs k) dim") + R = torch.diag_embed(diag) + return R + + +class SeqSensorModel(nn.Module): + """ + the sensor model takes the current raw sensor (usually high-dimensional images) + and map the raw sensor to low-dimension + Many advanced model architecture can be explored here, i.e., Vision transformer, FlowNet, + RAFT, and ResNet families, etc. + + input -> [batch_size, 1, win, raw_input] + output -> [batch_size, num_ensemble, dim_z] + """ + + def __init__(self, num_ensemble, dim_z, win_size, input_size_1): + super(SeqSensorModel, self).__init__() + self.dim_z = dim_z + self.num_ensemble = num_ensemble + + self.fc2 = nn.Linear(input_size_1 * win_size, 256) + self.fc3 = LinearFlipout(256, 256) + self.fc5 = LinearFlipout(256, 64) + self.fc6 = LinearFlipout(64, self.dim_z) + + def forward(self, x): + batch_size = x.shape[0] + x = rearrange(x, "bs k en dim -> bs (k en dim)") + x = repeat(x, "bs dim -> bs k dim", k=self.num_ensemble) + x = rearrange(x, "bs k dim -> (bs k) dim") + + x = self.fc2(x) + x = F.leaky_relu(x) + x, _ = self.fc3(x) + x = F.leaky_relu(x) + x, _ = self.fc5(x) + x = F.leaky_relu(x) + encoding = x + obs, _ = self.fc6(x) + obs = rearrange( + obs, "(bs k) dim -> bs k dim", bs=batch_size, k=self.num_ensemble + ) + obs_z = torch.mean(obs, axis=1) + obs_z = rearrange(obs_z, "bs (k dim) -> bs k dim", k=1) + encoding = rearrange( + encoding, "(bs k) dim -> bs k dim", bs=batch_size, k=self.num_ensemble + ) + encoding = torch.mean(encoding, axis=1) + encoding = rearrange(encoding, "(bs k) dim -> bs k dim", bs=batch_size, k=1) + return obs, obs_z, encoding + + + + +class new_smartwatch_model(nn.Module): + def __init__(self, num_ensemble, win_size, dim_x, dim_z, input_size_1): + super(new_smartwatch_model, self).__init__() + self.num_ensemble = num_ensemble + self.dim_x = dim_x + self.dim_z = dim_z + self.win_size = win_size + self.r_diag = np.ones((self.dim_z)).astype(np.float32) * 0.05 + self.r_diag = self.r_diag.astype(np.float32) + + # instantiate model + self.process_model = Seq_MLP_process_model( + self.num_ensemble, self.dim_x, self.win_size, 256, 8 + ) + self.sensor_model = SeqSensorModel( + self.num_ensemble, self.dim_z, win_size, input_size_1 + ) + self.observation_noise = NewObservationNoise(self.dim_z, self.r_diag) + + def forward(self, inputs, states): + # decompose inputs and states + batch_size = inputs[0].shape[0] + raw_obs = inputs + state_old = states + + ##### prediction step ##### + state_pred = self.process_model(state_old) + m_A = torch.mean(state_pred, axis=1) # m_A -> [bs, dim_x] + + # zero mean + mean_A = repeat(m_A, "bs dim -> bs k dim", k=self.num_ensemble) + A = state_pred - mean_A + A = rearrange(A, "bs k dim -> bs dim k") + + ##### update step ##### + + # since observation model is identity function + H_X = state_pred + mean = torch.mean(H_X, axis=1) + H_X_mean = rearrange(mean, "bs (k dim) -> bs k dim", k=1) + m = repeat(mean, "bs dim -> bs k dim", k=self.num_ensemble) + H_A = H_X - m + # transpose operation + H_XT = rearrange(H_X, "bs k dim -> bs dim k") + H_AT = rearrange(H_A, "bs k dim -> bs dim k") + + # get learned observation + ensemble_z, z, encoding = self.sensor_model(raw_obs) + + # measurement update + y = rearrange(ensemble_z, "bs k dim -> bs dim k") + R = self.observation_noise(z) + + innovation = (1 / (self.num_ensemble - 1)) * torch.matmul(H_AT, H_A) + R + inv_innovation = torch.linalg.inv(innovation) + K = (1 / (self.num_ensemble - 1)) * torch.matmul( + torch.matmul(A, H_A), inv_innovation + ) + + gain = rearrange(torch.matmul(K, y - H_XT), "bs dim k -> bs k dim") + state_new = state_pred + gain + + # gather output + m_state_new = torch.mean(state_new, axis=1) + m_state_new = rearrange(m_state_new, "bs (k dim) -> bs k dim", k=1) + m_state_pred = rearrange(m_A, "bs (k dim) -> bs k dim", k=1) + output = ( + state_new.to(dtype=torch.float32), + m_state_new.to(dtype=torch.float32), + m_state_pred.to(dtype=torch.float32), + z.to(dtype=torch.float32), + ensemble_z.to(dtype=torch.float32), + ) + return output \ No newline at end of file diff --git a/src/wear_mocap_ape/estimate/models.py b/src/wear_mocap_ape/estimate/models.py index c4c76a0..dc296d1 100644 --- a/src/wear_mocap_ape/estimate/models.py +++ b/src/wear_mocap_ape/estimate/models.py @@ -107,6 +107,47 @@ def monte_carlo_predictions(self, return self(rep_data, hs) +class ImuPoseLSTM(torch.nn.Module): + def __init__(self, + input_size, + hidden_layer_size, + hidden_layer_count, + output_size, + dropout=0.2): + """ + We kept the unused parameters to be compatible with our regular training code + """ + + super().__init__() + self.input_layer = torch.nn.Linear(input_size, 256) + self.lstm = torch.nn.LSTM(256, + hidden_size=256, + num_layers=2, + batch_first=True, + bidirectional=False) + self._activation_function = F.relu + self.output_layer = torch.nn.Linear(256, output_size) + self.output_size = output_size + self.input_size = input_size + + def forward(self, x, hs=None): + """ + :param x: expects data as a 3D tensor [batch_size, sequence, input] + :param hs: Defaults to zeros if (h_0, c_0) is not provided + :return: + """ + x = self._activation_function(self.input_layer(x)) + a, hs_n = self.lstm(x, hs) + return self.output_layer(a) + + def monte_carlo_predictions(self, n_samples: int, x: torch.tensor): + """ + This model does not have a dropout layer, therefore MC predictions are the same as regular forward passes. + We kept this function such that it is compatible with our evaluation code + """ + return self(x, None) + + class DropoutFF2D(torch.nn.Module): def __init__(self, @@ -252,6 +293,8 @@ def load_deployed_model_from_hash(hash_str: str): params["model"] = DropoutLSTM elif params["model"] == "DropoutFF": params["model"] = DropoutFF + elif params["model"] == "ImuPoseLSTM": + params["model"] = ImuPoseLSTM else: raise UserWarning(f"{params['model']} not handled") diff --git a/src/wear_mocap_ape/estimate/phone_pocket_free_hips_udp.py b/src/wear_mocap_ape/estimate/phone_pocket_free_hips_udp.py index 91749a0..ed331c6 100644 --- a/src/wear_mocap_ape/estimate/phone_pocket_free_hips_udp.py +++ b/src/wear_mocap_ape/estimate/phone_pocket_free_hips_udp.py @@ -1,15 +1,21 @@ import logging import socket import struct +import threading import time from datetime import datetime import queue +from pathlib import Path import numpy as np +import pandas as pd import torch +from wear_mocap_ape import config +from wear_mocap_ape.data_deploy.nn import deploy_models from wear_mocap_ape.data_types.bone_map import BoneMap from wear_mocap_ape.estimate import models, estimate_joints +from wear_mocap_ape.stream.listener.imu import ImuListener from wear_mocap_ape.utility import transformations as ts, data_stats from wear_mocap_ape.data_types import messaging from wear_mocap_ape.utility.names import NNS_TARGETS, NNS_INPUTS @@ -20,17 +26,21 @@ def __init__(self, ip: str, port: int, model_hash: str, - smooth: int = 5, + smooth: int = 1, stream_monte_carlo=True, monte_carlo_samples=25, bonemap: BoneMap = None, tag: str = "PUB FREE HIPS"): + # smooth should not be smaller 1 + smooth = max(1, smooth) + self.__tag = tag self.__port = port self.__ip = ip self.__stream_mc = stream_monte_carlo self.__mc_samples = monte_carlo_samples + self.__active = False # average over multiple time steps self.__smooth = smooth @@ -45,25 +55,21 @@ def __init__(self, self.__prev_t = datetime.now() self.__row_hist = [] + self.last_msg = np.zeros(25) + # use arm length measurements for predictions if bonemap is None: # default values self.__larm_vec = np.array([-BoneMap.DEFAULT_LARM_LEN, 0, 0]) self.__uarm_vec = np.array([-BoneMap.DEFAULT_UARM_LEN, 0, 0]) - self.__shou_orig = BoneMap.DEFAULT_L_SHOU_ORIG_RH - + self.__uarm_orig = BoneMap.DEFAULT_L_SHOU_ORIG_RH else: self.__larm_vec = bonemap.left_lower_arm_vec self.__uarm_vec = bonemap.left_upper_arm_vec - self.__shou_orig = bonemap.left_upper_arm_origin_rh + self.__uarm_orig = bonemap.left_upper_arm_origin_rh # for quicker access we store a matrix with relevant body measurements for quick multiplication - self.__body_measurements = np.repeat( - np.r_[self.__uarm_vec, self.__larm_vec, self.__shou_orig][np.newaxis, :], - self.__mc_samples * self.__smooth, - axis=0 - ) - + self.__body_measurements = np.r_[self.__larm_vec, self.__uarm_vec, self.__uarm_orig][np.newaxis, :] self.__udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # load the trained network @@ -87,36 +93,42 @@ def __init__(self, self.__xx_m, self.__xx_s = 0., 1. self.__yy_m, self.__yy_s = 0., 1. - def calibrate_imus_with_offset(self, row: np.array): + def terminate(self): + self.__active = False + + @property + def sequence_len(self): + return self.__sequence_len + def parse_row_to_xx(self, row): # get relevant entries from the row - sw_fwd = np.array([ + sw_fwd = np.c_[ row[self.__slp["sw_forward_w"]], row[self.__slp["sw_forward_x"]], row[self.__slp["sw_forward_y"]], row[self.__slp["sw_forward_z"]] - ]) + ] - sw_rot = np.array([ + sw_rot = np.c_[ row[self.__slp["sw_rotvec_w"]], row[self.__slp["sw_rotvec_x"]], row[self.__slp["sw_rotvec_y"]], row[self.__slp["sw_rotvec_z"]] - ]) + ] - ph_fwd = np.array([ + ph_fwd = np.c_[ row[self.__slp["ph_forward_w"]], row[self.__slp["ph_forward_x"]], row[self.__slp["ph_forward_y"]], row[self.__slp["ph_forward_z"]] - ]) + ] - ph_rot = np.array([ + ph_rot = np.c_[ row[self.__slp["ph_rotvec_w"]], row[self.__slp["ph_rotvec_x"]], row[self.__slp["ph_rotvec_y"]], row[self.__slp["ph_rotvec_z"]] - ]) + ] # estimate north quat to align Z-axis of global and android coord system r = ts.android_quat_to_global_no_north(sw_fwd) - y_rot = ts.reduce_global_quat_to_y_rot(r) + y_rot = ts.reduce_global_quat_to_y_rot(r)[0] quat_north = ts.euler_to_quat(np.array([0, -y_rot, 0])) - # calibrate watch sw_cal_g = ts.android_quat_to_global(sw_rot, quat_north) + sw_6drr_cal = ts.quat_to_6drr_1x6(sw_cal_g) # the device orientations if the calib position with left arm forward is perfect hips_dst_g = np.array([1, 0, 0, 0]) @@ -125,101 +137,29 @@ def calibrate_imus_with_offset(self, row: np.array): ph_off_g = ts.hamilton_product(ts.quat_invert(ph_fwd_g), hips_dst_g) ph_cal_g = ts.hamilton_product(ph_rot_g, ph_off_g) - return sw_cal_g, ph_cal_g - - def row_to_pose(self, row): - - # second-wise updates to determine message frequency - now = datetime.now() - if (now - self.__start).seconds >= 5: - self.__start = now - logging.info(f"[{self.__tag}] {self.__dat / 5} Hz") - self.__dat = 0 - delta_t = now - self.__prev_t - delta_t = delta_t.microseconds * 0.000001 - self.__prev_t = now - - sw_cal_g, ph_cal_g = self.calibrate_imus_with_offset(row) - # hip y rotation from phone hips_y_rot = ts.reduce_global_quat_to_y_rot(ph_cal_g) - hips_quat_g = ts.euler_to_quat(np.array([0, hips_y_rot, 0])) hips_yrot_cal_sin = np.sin(hips_y_rot) hips_yrot_cal_cos = np.cos(hips_y_rot) - # relative smartwatch orientation - sw_cal_rh = ts.hamilton_product(ts.quat_invert(hips_quat_g), sw_cal_g) - sw_rot_6drr = ts.quat_to_6drr_1x6(sw_cal_rh) # pressure - calibrated initial pressure = relative pressure r_pres = row[self.__slp["sw_pres"]] - row[self.__slp["sw_init_pres"]] # assemble the entire input vector of one time step - xx = np.hstack([ - delta_t, + return np.c_[ + row[self.__slp["sw_dt"]], row[self.__slp["sw_gyro_x"]], row[self.__slp["sw_gyro_y"]], row[self.__slp["sw_gyro_z"]], row[self.__slp["sw_lvel_x"]], row[self.__slp["sw_lvel_y"]], row[self.__slp["sw_lvel_z"]], row[self.__slp["sw_lacc_x"]], row[self.__slp["sw_lacc_y"]], row[self.__slp["sw_lacc_z"]], row[self.__slp["sw_grav_x"]], row[self.__slp["sw_grav_y"]], row[self.__slp["sw_grav_z"]], - sw_rot_6drr, - r_pres - ]) - - if self.__normalize: - # normalize measurements with pre-calculated mean and std - xx = (xx - self.__xx_m) / self.__xx_s - - # sequences are used for recurrent nets. Stack time steps along 2nd axis - self.__row_hist.append(xx) - if len(self.__row_hist) < self.__sequence_len: - return None - - while len(self.__row_hist) > self.__sequence_len: - del self.__row_hist[0] - xx = np.vstack(self.__row_hist) - - # finally, cast to a torch tensor with batch size 1 - xx = torch.tensor(xx[None, :, :]) - with torch.no_grad(): - # make mote carlo predictions if the model makes use of dropout - t_preds = self.__nn_model.monte_carlo_predictions(x=xx, n_samples=self.__mc_samples) - - # if on GPU copy the tensor to host memory first - if self.__device.type == 'cuda': - t_preds = t_preds.cpu() - t_preds = t_preds.numpy() - - # we are only interested in the last prediction of the sequence - t_preds = t_preds[:, -1, :] - - if self.__normalize: - # transform predictions back from normalized labels - t_preds = t_preds * self.__yy_s + self.__yy_m - - # store t_preds in history if smoothing is required - if self.__smooth > 1: - self.__smooth_hist.append(t_preds) - if len(self.__smooth_hist) < self.__smooth: - return None - - while len(self.__smooth_hist) > self.__smooth: - del self.__smooth_hist[0] - - t_preds = np.vstack(self.__smooth_hist) - - t_preds = np.c_[ - t_preds, - np.repeat(hips_yrot_cal_sin, t_preds.shape[0]), - np.repeat(hips_yrot_cal_cos, t_preds.shape[0]) + sw_6drr_cal, + r_pres, + hips_yrot_cal_sin, + hips_yrot_cal_cos ] - # finally, estimate hand and lower arm origins from prediction data - est = estimate_joints.arm_pose_from_nn_targets( - preds=t_preds, - body_measurements=self.__body_measurements, - y_targets=NNS_TARGETS.ORI_CALIB_UARM_LARM_HIPS - ) - + def msg_from_est(self, est): # estimate mean of rotations if we got multiple MC predictions if est.shape[0] > 1: # Calculate the mean of all predictions mean @@ -228,97 +168,299 @@ def row_to_pose(self, row): p_uarm_quat_g = ts.average_quaternions(est[:, 13:17]) # get the transition from upper arm origin to lower arm origin - p_uarm_orig_rh = ts.quat_rotate_vector(p_hips_quat_g, self.__shou_orig) - p_larm_orig_rh = ts.quat_rotate_vector(p_uarm_quat_g, self.__uarm_vec) + p_uarm_orig_rh - p_hand_orig_rh = ts.quat_rotate_vector(p_larm_quat_g, self.__larm_vec) + p_larm_orig_rh - + p_uarm_orig_g = ts.quat_rotate_vector(p_hips_quat_g, self.__uarm_orig) + p_larm_orig_g = ts.quat_rotate_vector(p_uarm_quat_g, self.__uarm_vec) + p_uarm_orig_g + p_hand_orig_g = ts.quat_rotate_vector(p_larm_quat_g, self.__larm_vec) + p_larm_orig_g else: - p_hand_orig_rh = est[0, 0:3] - p_larm_orig_rh = est[0, 3:6] - p_uarm_orig_rh = est[0, 6:9] + p_hand_orig_g = est[0, 0:3] + p_larm_orig_g = est[0, 3:6] + p_uarm_orig_g = est[0, 6:9] p_larm_quat_g = est[0, 9:13] p_uarm_quat_g = est[0, 13:17] p_hips_quat_g = est[0, 17:] # this is the list for the actual joint positions and rotations - msg = [ - # hand - p_larm_quat_g[0], - p_larm_quat_g[1], - p_larm_quat_g[2], - p_larm_quat_g[3], - - p_hand_orig_rh[0], - p_hand_orig_rh[1], - p_hand_orig_rh[2], - - # larm - p_larm_quat_g[0], - p_larm_quat_g[1], - p_larm_quat_g[2], - p_larm_quat_g[3], - - p_larm_orig_rh[0], - p_larm_orig_rh[1], - p_larm_orig_rh[2], - - # uarm - p_uarm_quat_g[0], - p_uarm_quat_g[1], - p_uarm_quat_g[2], - p_uarm_quat_g[3], - - p_uarm_orig_rh[0], - p_uarm_orig_rh[1], - p_uarm_orig_rh[2], - - # hips - p_hips_quat_g[0], - p_hips_quat_g[1], - p_hips_quat_g[2], - p_hips_quat_g[3] - ] + msg = np.hstack([ + p_larm_quat_g, # hand rot + p_hand_orig_g, # hand orig + p_larm_quat_g, # larm rot + p_larm_orig_g, # larm orig + p_uarm_quat_g, # uarm rot + p_uarm_orig_g, # uarm orig + p_hips_quat_g # hips rot + ]) # now we attach the monte carlo predictions for XYZ positions if self.__stream_mc: if est.shape[0] > 1: - for e_row in est: - msg += list(e_row[:9]) + msg = np.hstack([msg, est[:, :9].flatten()]) - return msg + return np.array(msg) def stream_loop(self, sensor_q: queue): - logging.info("[{}] starting Unity stream loop".format(self.__tag)) - # used to estimate delta time and processing speed in Hz self.__start = datetime.now() self.__dat = 0 self.__prev_t = datetime.now() - + row_hist = [] # this loops while the socket is listening and/or receiving data - while True: + self.__active = True + while self.__active: # get the most recent smartwatch data row from the queue - row = sensor_q.get() + row_hist.append(self.parse_row_to_xx(sensor_q.get())) + # add rows until the queue is nearly empty while sensor_q.qsize() > 5: - row = sensor_q.get() + # add row to history and immediately continue with the next + row_hist.append(self.parse_row_to_xx(sensor_q.get())) + + # if not enough data is available yet, simply repeat the input as a first estimate + while len(row_hist) < self.__sequence_len: + row_hist.append(row_hist[-1].copy()) - # process received data - if row is not None: + # if the history is too long, delete old values + while len(row_hist) > self.__sequence_len: + del row_hist[0] - # get message as numpy array - np_msg = self.row_to_pose(row) - # can return None if not enough historical data for smoothing is available - if np_msg is None: - continue + # get message as numpy array + est = self.make_prediction_from_row_hist(row_hist) + + # estimate mean of rotations if we got multiple MC predictions + if est.shape[0] > 1: + # Calculate the mean of all predictions mean + p_hips_quat_g = ts.average_quaternions(est[:, 17:]) + p_larm_quat_g = ts.average_quaternions(est[:, 9:13]) + p_uarm_quat_g = ts.average_quaternions(est[:, 13:17]) + + # get the transition from upper arm origin to lower arm origin + p_uarm_orig_g = ts.quat_rotate_vector(p_hips_quat_g, self.__uarm_orig) + p_larm_orig_g = ts.quat_rotate_vector(p_uarm_quat_g, self.__uarm_vec) + p_uarm_orig_g + p_hand_orig_g = ts.quat_rotate_vector(p_larm_quat_g, self.__larm_vec) + p_larm_orig_g + else: + p_hand_orig_g = est[0, 0:3] + p_larm_orig_g = est[0, 3:6] + p_uarm_orig_g = est[0, 6:9] + p_larm_quat_g = est[0, 9:13] + p_uarm_quat_g = est[0, 13:17] + p_hips_quat_g = est[0, 17:] + + # this is the list for the actual joint positions and rotations + msg = np.hstack([ + p_larm_quat_g, # hand rot + p_hand_orig_g, # hand orig + p_larm_quat_g, # larm rot + p_larm_orig_g, # larm orig + p_uarm_quat_g, # uarm rot + p_uarm_orig_g, # uarm orig + p_hips_quat_g # hips rot + ]) + + # now we attach the monte carlo predictions for XYZ positions + if self.__stream_mc: + if est.shape[0] > 1: + msg = np.hstack([msg, est[:, :9].flatten()]) + + np_msg = np.array(msg) + self.last_msg = np_msg + + # five-secondly updates to log message frequency + now = datetime.now() + if (now - self.__start).seconds >= 5: + self.__start = now + logging.info(f"[{self.__tag}] {self.__dat / 5} Hz") + self.__dat = 0 + + # send message to Unity + self.send_np_msg(msg=np_msg) + self.__dat += 1 + time.sleep(0.01) + + def errors_from_file(self, file_p: Path, publish: bool = False): + logging.info(f"[{self.__tag}] processing from file") + hand_l_err, hand_r_err = [], [] + elbow_l_err, elbow_r_err = [], [] + hip_rot_errors = [] + + row_hist = [] + dat = pd.read_csv(file_p) + for i, row in dat.iterrows(): + ## PREDICTIONS + xx = row[self.__x_inputs.value].to_numpy() + row_hist.append(xx) + + # if not enough data is available yet, simply repeat the input as a first estimate + while len(row_hist) < self.__sequence_len: + row_hist.append(xx) + + # if the history is too long, delete old values + while len(row_hist) > self.__sequence_len: + del row_hist[0] + + # get message as numpy array + est = self.make_prediction_from_row_hist(row_hist) + + # estimate mean of rotations if we got multiple MC predictions + if est.shape[0] > 1: + # Calculate the mean of all predictions mean + p_larm_quat_rh = ts.average_quaternions(est[:, 9:13]) + p_uarm_quat_rh = ts.average_quaternions(est[:, 13:17]) + p_hips_quat_rh = ts.average_quaternions(est[:, 17:]) + + # get the transition from upper arm origin to lower arm origin + p_uarm_orig_rh = ts.quat_rotate_vector(p_hips_quat_rh, self.__uarm_orig) + p_larm_orig_rh = ts.quat_rotate_vector(p_uarm_quat_rh, self.__uarm_vec) + p_uarm_orig_rh + p_hand_orig_rh = ts.quat_rotate_vector(p_larm_quat_rh, self.__larm_vec) + p_larm_orig_rh + else: + p_hand_orig_rh = est[0, 0:3] + p_larm_orig_rh = est[0, 3:6] + p_uarm_orig_rh = est[0, 6:9] + p_larm_quat_rh = est[0, 9:13] + p_uarm_quat_rh = est[0, 13:17] + p_hips_quat_rh = est[0, 17:] + + # publish the estimation for our unity visualization + if publish: + msg = np.hstack([ + p_larm_quat_rh, + p_hand_orig_rh, + p_larm_quat_rh, + p_larm_orig_rh, + p_uarm_quat_rh, + p_uarm_orig_rh, + p_hips_quat_rh + ]) + if est.shape[0] > 1: + if est.shape[0] > 1: + msg = np.hstack([msg, est[:, :9].flatten()]) + + # five-secondly updates to log message frequency + now = datetime.now() + if (now - self.__start).seconds >= 5: + self.__start = now + logging.info(f"[{self.__tag}] {self.__dat / 5} Hz") + self.__dat = 0 # send message to Unity - self.send_np_msg(msg=np_msg) + self.send_np_msg(msg=msg) self.__dat += 1 - time.sleep(0.01) + + # GROUND TRUTH + yy = row[self.__y_targets.value].to_numpy()[np.newaxis, :] + est = estimate_joints.arm_pose_from_nn_targets( + preds=yy, + body_measurements=self.__body_measurements, + y_targets=self.__y_targets + ) + gt_hand_orig_rh = est[0, 0:3] + gt_larm_orig_rh = est[0, 3:6] + gt_larm_quat_g = est[0, 9:13] + gt_uarm_quat_g = est[0, 13:17] + gt_hips_quat_g = est[0, 17:] + + hand_l_err.append(np.linalg.norm(gt_hand_orig_rh - p_hand_orig_rh) * 100) + hre = ts.quat_to_euler(gt_larm_quat_g) - ts.quat_to_euler(p_larm_quat_rh) + hree = np.sum(np.abs(np.degrees(np.arctan2(np.sin(hre), np.cos(hre))))) + hand_r_err.append(hree) + + elbow_l_err.append(np.linalg.norm(gt_larm_orig_rh - p_larm_orig_rh) * 100) + ere = ts.quat_to_euler(gt_uarm_quat_g) - ts.quat_to_euler(p_uarm_quat_rh) + eree = np.sum(np.abs(np.degrees(np.arctan2(np.sin(ere), np.cos(ere))))) + elbow_r_err.append(eree) + + ydiff = ts.quat_to_euler(gt_hips_quat_g)[1] - ts.quat_to_euler(p_hips_quat_rh)[1] + err = np.abs(np.degrees(np.arctan2(np.sin(ydiff), np.cos(ydiff)))) + hip_rot_errors.append(err) + + if (int(i) + 1) % 100 == 0: + logging.info(f"[{self.__tag}] processed {i} rows") + + return hand_l_err, hand_r_err, elbow_l_err, elbow_r_err, hip_rot_errors def send_np_msg(self, msg: np.array) -> int: # craft UDP message and send msg = struct.pack('f' * len(msg), *msg) return self.__udp_socket.sendto(msg, (self.__ip, self.__port)) + + def make_prediction_from_row_hist(self, row_hist): + # stack rows to one big array + seq = np.vstack(row_hist.copy()) + + if self.__normalize: + # normalize measurements with pre-calculated mean and std + xx = (seq - self.__xx_m) / self.__xx_s + + # finally, cast to a torch tensor with batch size 1 + xx = torch.tensor(xx[None, :, :]) + with torch.no_grad(): + # make mote carlo predictions if the model makes use of dropout + t_preds = self.__nn_model.monte_carlo_predictions(x=xx, n_samples=self.__mc_samples) + + # if on GPU copy the tensor to host memory first + if self.__device.type == 'cuda': + t_preds = t_preds.cpu() + t_preds = t_preds.numpy() + + # we are only interested in the last prediction of the sequence + t_preds = t_preds[:, -1, :] + + if self.__normalize: + # transform predictions back from normalized labels + t_preds = t_preds * self.__yy_s + self.__yy_m + + est = estimate_joints.arm_pose_from_nn_targets( + preds=t_preds, + body_measurements=self.__body_measurements, + y_targets=self.__y_targets + ) + + # store t_preds in history if smoothing is required + if self.__smooth > 1: + self.__smooth_hist.append(est) + while len(self.__smooth_hist) < self.__smooth: + self.__smooth_hist.append(est) + while len(self.__smooth_hist) > self.__smooth: + del self.__smooth_hist[0] + est = np.vstack(self.__smooth_hist) + + return est + + +def run_free_hips_pocket_udp(ip: str, + model_hash: str = deploy_models.LSTM.WATCH_PHONE_POCKET.value, + stream_monte_carlo: bool = False, + smooth: int = 10) -> FreeHipsPocketUDP: + """ + convenience function to create a default running instance + """ + + # data for left-hand mode + left_q = queue.Queue() + + # listen for imu data from phone and watch + imu_l = ImuListener( + ip=ip, + msg_size=messaging.watch_phone_imu_msg_len, + port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT, + tag="LISTEN IMU LEFT" + ) + l_thread = threading.Thread( + target=imu_l.listen, + args=(left_q,) + ) + l_thread.start() + + # process into arm pose and body orientation + fhp = FreeHipsPocketUDP(ip=ip, + model_hash=model_hash, + smooth=smooth, + port=config.PORT_PUB_LEFT_ARM, + monte_carlo_samples=25, + stream_monte_carlo=stream_monte_carlo) + p_thread = threading.Thread( + target=fhp.stream_loop, + args=(left_q,) + ) + p_thread.start() + + return fhp diff --git a/src/wear_mocap_ape/estimate/watch_only.py b/src/wear_mocap_ape/estimate/watch_only.py index d94c808..1f3f1a5 100644 --- a/src/wear_mocap_ape/estimate/watch_only.py +++ b/src/wear_mocap_ape/estimate/watch_only.py @@ -38,14 +38,17 @@ def __init__(self, self.__mc_samples = monte_carlo_samples self.__last_msg = None - # use arm length measurements for predictions + # use body measurements for transitions if bonemap is None: - # default values self.__larm_vec = np.array([-BoneMap.DEFAULT_LARM_LEN, 0, 0]) self.__uarm_vec = np.array([-BoneMap.DEFAULT_UARM_LEN, 0, 0]) + self.__uarm_orig = BoneMap.DEFAULT_L_SHOU_ORIG_RH else: - self.__larm_vec = bonemap.left_lower_arm_vec - self.__uarm_vec = bonemap.left_upper_arm_vec + # get values from bone map + self.__larm_vec = np.array([-bonemap.left_lower_arm_length, 0, 0]) + self.__uarm_vec = np.array([-bonemap.left_upper_arm_length, 0, 0]) + self.__uarm_orig = bonemap.left_upper_arm_origin_rh + self.__hips_quat = np.array([1, 0, 0, 0]) # load the trained network torch.set_default_dtype(torch.float64) @@ -93,11 +96,7 @@ def processing_loop(self, sensor_q: queue): slp = messaging.WATCH_ONLY_IMU_LOOKUP # for quicker access we store a matrix with relevant body measurements for quick multiplication - body_measurements = np.repeat( - np.r_[self.__uarm_vec, self.__larm_vec][np.newaxis, :], - self.__mc_samples * self.__smooth, - axis=0 - ) + body_measurements = np.r_[self.__uarm_vec, self.__larm_vec, self.__uarm_orig][np.newaxis, :] # this loops while the socket is listening and/or receiving data while self.__active: @@ -203,58 +202,33 @@ def processing_loop(self, sensor_q: queue): pred_larm_rot_rh = ts.average_quaternions(est[:, 6:10]) pred_uarm_rot_rh = ts.average_quaternions(est[:, 10:]) # use body measurements for transitions - uarm_vec = body_measurements[0, :3] - larm_vec = body_measurements[0, 3:] # get the transition from upper arm origin to lower arm origin - pred_larm_origin_rua = ts.quat_rotate_vector(pred_uarm_rot_rh, uarm_vec) + pred_larm_origin_rh = ts.quat_rotate_vector(pred_uarm_rot_rh, self.__uarm_vec) + self.__uarm_orig # get transitions from lower arm origin to hand - rotated_lower_arms_re = ts.quat_rotate_vector(pred_larm_rot_rh, larm_vec) - pred_hand_origin_rua = rotated_lower_arms_re + pred_larm_origin_rua + rotated_lower_arms_re = ts.quat_rotate_vector(pred_larm_rot_rh, self.__larm_vec) + pred_hand_origin_rh = rotated_lower_arms_re + pred_larm_origin_rh else: - pred_hand_origin_rua = est[0, :3] - pred_larm_origin_rua = est[0, 3:6] + pred_hand_origin_rh = est[0, :3] + pred_larm_origin_rh = est[0, 3:6] pred_larm_rot_rh = est[0, 6:10] pred_uarm_rot_rh = est[0, 10:] - # # hand from watch - # fwd_to_left = np.array([0.7071068, 0., 0.7071068, 0.]) # a 90deg y rotation - # hand_rot_r = transformations.watch_left_to_global(sw_quat_cal) - # hand_rot_r = transformations.hamilton_product(hand_rot_r, fwd_to_left) - hand_rot_r = pred_larm_rot_rh - # this is the list for the actual joint positions and rotations - basic_value_list = [ - # we pass a hand orientation too, for future work - # currently, hand rotation is smartwatch rotation - hand_rot_r[0], - hand_rot_r[1], - hand_rot_r[2], - hand_rot_r[3], - - pred_hand_origin_rua[0], - pred_hand_origin_rua[1], - pred_hand_origin_rua[2], - - pred_larm_rot_rh[0], - pred_larm_rot_rh[1], - pred_larm_rot_rh[2], - pred_larm_rot_rh[3], - - pred_larm_origin_rua[0], - pred_larm_origin_rua[1], - pred_larm_origin_rua[2], - - pred_uarm_rot_rh[0], - pred_uarm_rot_rh[1], - pred_uarm_rot_rh[2], - pred_uarm_rot_rh[3] - ] + basic_value_list = np.hstack([ + pred_larm_rot_rh, # hand quat + pred_hand_origin_rh, # hand pos + pred_larm_rot_rh, # larm quat + pred_larm_origin_rh, + pred_uarm_rot_rh, # uarm quat + self.__uarm_orig, + self.__hips_quat + ]) # now we attach the monte carlo predictions for XYZ positions if self.__stream_mc: if est.shape[0] > 1: for e_row in est: - basic_value_list += list(e_row[:6]) + basic_value_list = np.hstack([basic_value_list, e_row[:6]]) # store as last msg for getter self.__last_msg = basic_value_list.copy() @@ -264,5 +238,5 @@ def processing_loop(self, sensor_q: queue): dat += 1 @abstractmethod - def process_msg(self, msg: list): + def process_msg(self, msg: np.array): return diff --git a/src/wear_mocap_ape/estimate/watch_phone.py b/src/wear_mocap_ape/estimate/watch_phone.py index 5c6b514..da06628 100644 --- a/src/wear_mocap_ape/estimate/watch_phone.py +++ b/src/wear_mocap_ape/estimate/watch_phone.py @@ -1,4 +1,5 @@ import logging +import time from abc import abstractmethod from datetime import datetime import queue @@ -31,15 +32,19 @@ def __init__(self, if bonemap is None: self.__larm_vec = np.array([BoneMap.DEFAULT_LARM_LEN, 0, 0]) self.__uarm_vec = np.array([BoneMap.DEFAULT_UARM_LEN, 0, 0]) + self.__uarm_orig = BoneMap.DEFAULT_L_SHOU_ORIG_RH else: # get values from bone map self.__larm_vec = np.array([bonemap.left_lower_arm_length, 0, 0]) self.__uarm_vec = np.array([bonemap.left_upper_arm_length, 0, 0]) + self.__uarm_orig = bonemap.left_upper_arm_origin_rh + self.__hips_quat = np.array([1, 0, 0, 0]) # in left hand mode, the arm is stretched along the negative X-axis in T pose if left_hand_mode: self.__larm_vec[0] = -self.__larm_vec[0] self.__uarm_vec[0] = -self.__uarm_vec[0] + self.__uarm_orig = self.__uarm_orig * np.array([-1, 1, 1]) # invert x def calibrate_imus_with_offset(self, row: np.array): @@ -119,11 +124,13 @@ def row_to_arm_pose(self, row): hand_origin_rua = larm_rotated + larm_origin_rua # this is the list for the actual joint positions and rotations return np.hstack([ - avg_larm_rot_r, # in this case, hand rotation is larm rotation - hand_origin_rua, - avg_larm_rot_r, - larm_origin_rua, - avg_uarm_rot_r + avg_larm_rot_r, # hand quat + hand_origin_rua, # hand orig + avg_larm_rot_r, # larm quat + larm_origin_rua, # larm orig + avg_uarm_rot_r, # uarm quat + self.__uarm_orig, + self.__hips_quat ]) def terminate(self): @@ -164,6 +171,7 @@ def stream_loop(self, sensor_q: queue): # send message to Unity self.process_msg(msg=np_msg) + time.sleep(0.01) dat += 1 @abstractmethod diff --git a/src/wear_mocap_ape/record/watch_only_rec.py b/src/wear_mocap_ape/record/watch_only_rec.py index 0772340..46c7700 100644 --- a/src/wear_mocap_ape/record/watch_only_rec.py +++ b/src/wear_mocap_ape/record/watch_only_rec.py @@ -2,6 +2,8 @@ import time from pathlib import Path +import numpy as np + from wear_mocap_ape.data_deploy.nn import deploy_models from wear_mocap_ape.data_types.bone_map import BoneMap from wear_mocap_ape.estimate.watch_only import WatchOnly @@ -51,11 +53,12 @@ def __init__(self, with open(self.__file, 'w') as fd: fd.write(",".join(header) + "\n") - def process_msg(self, msg: list): + def process_msg(self, msg: np.array): """ The paren class calls this method whenever a new arm pose estimation finished """ + msg = msg.tolist() with open(self.__file, 'a') as fd: msg.insert(0, datetime.datetime.now()) msg = [str(x) for x in msg] diff --git a/src/wear_mocap_ape/stream/listener/audio.py b/src/wear_mocap_ape/stream/listener/audio.py index 504db15..d5a4219 100755 --- a/src/wear_mocap_ape/stream/listener/audio.py +++ b/src/wear_mocap_ape/stream/listener/audio.py @@ -10,14 +10,16 @@ import wear_mocap_ape.config as config import wear_mocap_ape.utility as utility +from wear_mocap_ape import data_types +from wear_mocap_ape.data_types import voice_commands class AudioListener: def __init__(self, ip: str, port: int = config.PORT_LISTEN_WATCH_PHONE_AUDIO, tag: str = "AUDIO"): # Audio recording parameters # See https://github.com/googleapis/python-speech/blob/main/samples/microphone/transcribe_streaming_infinite.py - self.__sample_rate = 16000 - self.__chunk_size = 1600 # int(16000 / 10) # 100ms + self.__sample_rate = 32000 + self.__chunk_size = 800 # int(16000 / 10) # 100ms self.__language_code = "en-US" # a BCP-47 language tag self.__ip = ip self.__port = port # the dual Port @@ -93,7 +95,7 @@ def __stream_mic(self, q_out: queue): # this function waits try: data, _ = s.recvfrom(self.__chunk_size) - except TimeoutError: + except socket.timeout: logging.info(f"[{self.__tag}] timed out") continue @@ -173,7 +175,7 @@ def __transcribe(self, q_in: queue, q_out: queue): phrase = str(transcript).lower() logging.info("[THREAD TRANSCRIBE] {}".format(phrase)) - for k, v in utility.voice_commands.KEY_PHRASES.items(): + for k, v in voice_commands.KEY_PHRASES.items(): if k in phrase: q_out.put(v) logging.info("[THREAD TRANSCRIBE] sent {} queue size {}".format(k, q_out.qsize())) @@ -183,5 +185,5 @@ def __transcribe(self, q_in: queue, q_out: queue): if __name__ == "__main__": logging.basicConfig(level=logging.INFO) - wp_audio = AudioListener(port=config.PORT_LISTEN_WATCH_AUDIO) + wp_audio = AudioListener(ip="10.218.100.139", port=config.PORT_LISTEN_WATCH_PHONE_AUDIO) wp_audio.play_stream_loop() diff --git a/src/wear_mocap_ape/stream/listener/imu.py b/src/wear_mocap_ape/stream/listener/imu.py index 2dc7025..152c9af 100755 --- a/src/wear_mocap_ape/stream/listener/imu.py +++ b/src/wear_mocap_ape/stream/listener/imu.py @@ -46,7 +46,7 @@ def listen(self, q: queue): # receive and queue the data try: data, _ = s.recvfrom(self.__msg_size) - except TimeoutError: + except socket.timeout: logging.info(f"[{self.__tag}] timed out") continue diff --git a/src/wear_mocap_ape/stream/publisher/kalman_pocket_phone_udp.py b/src/wear_mocap_ape/stream/publisher/kalman_pocket_phone_udp.py new file mode 100644 index 0000000..813b124 --- /dev/null +++ b/src/wear_mocap_ape/stream/publisher/kalman_pocket_phone_udp.py @@ -0,0 +1,532 @@ +import logging +import socket +import struct +import threading +from datetime import datetime +from pathlib import Path + +import pandas as pd +import torch +import queue +import numpy as np + +from einops import rearrange + +from wear_mocap_ape.estimate import kalman_models +from wear_mocap_ape import config +from wear_mocap_ape.estimate import estimate_joints +from wear_mocap_ape.data_types.bone_map import BoneMap +from wear_mocap_ape.stream.listener.imu import ImuListener +from wear_mocap_ape.utility import transformations as ts, data_stats +from wear_mocap_ape.data_types import messaging +from wear_mocap_ape.utility.names import NNS_TARGETS, NNS_INPUTS + + +class KalmanPhonePocket: + def __init__(self, + ip, + port, + smooth: int = 1, + num_ensemble: int = 32, + model_name="SW-model", + window_size: int = 10, + stream_mc: bool = True, + tag: str = "KALMAN POCKET PHONE"): + + self.__y_targets = NNS_TARGETS.ORI_CAL_LARM_UARM_HIPS + self.x_inputs = NNS_INPUTS.WATCH_PH_HIP + self.__prev_time = datetime.now() + + self.__tag = tag + self.__active = False + self.__smooth = smooth + self.__smooth_hist = [] + self.__stream_mc = stream_mc + + self.__row_hist = [] + + self.__uarm_vec = np.array([-BoneMap.DEFAULT_UARM_LEN, 0, 0]) + self.__larm_vec = np.array([-BoneMap.DEFAULT_LARM_LEN, 0, 0]) + self.__shou_orig = BoneMap.DEFAULT_L_SHOU_ORIG_RH + + self.__port = port + self.__ip = ip + self.__udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.__udp_socket.settimeout(5) + self.last_msg = None + self.__normalize = True + + self.batch_size = 1 + self.dim_x = 14 + self.dim_z = 14 + self.dim_a = "" + self.dim_gt = 14 + self.sensor_len = 1 + self.channel_img_1 = "" + self.channel_img_2 = "" + self.input_size_1 = 22 + self.input_size_2 = "" + self.input_size_3 = "" + self.num_ensemble = num_ensemble + self.win_size = window_size + self.mode = "Test" + + self.model = kalman_models.new_smartwatch_model( + self.num_ensemble, + self.win_size, + self.dim_x, + self.dim_z, + self.input_size_1 + ) + + self.utils_ = kalman_models.utils(self.num_ensemble, self.dim_x, self.dim_z) + + # Check model type + if not isinstance(self.model, torch.nn.Module): + raise TypeError("model must be an instance of nn.Module") + self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") + if torch.cuda.is_available(): + self.model.cuda() + + # load normalized data stats if required + stats = data_stats.get_norm_stats( + x_inputs=self.x_inputs, + y_targets=self.__y_targets + ) + + # data is normalized and has to be transformed with pre-calculated mean and std + self.__xx_m, self.__xx_s = stats["xx_m"], stats["xx_s"] + self.__yy_m, self.__yy_s = stats["yy_m"], stats["yy_s"] + + # Load the pretrained model + if torch.cuda.is_available(): + checkpoint = torch.load(config.PATHS["deploy"] / "kalman" / model_name) + self.model.load_state_dict(checkpoint["model"]) + else: + checkpoint = torch.load(config.PATHS["deploy"] / "kalman" / model_name, + map_location=torch.device("cpu")) + self.model.load_state_dict(checkpoint["model"]) + self.model.eval() + + # INIT MODEL + # for quicker access we store a matrix with relevant body measurements for quick multiplication + self.__body_measurements = np.r_[self.__larm_vec, self.__uarm_vec, self.__shou_orig][np.newaxis, :] + self.__init_step = 0 + + self.__larm_vert = np.array([ + [-BoneMap.DEFAULT_LARM_LEN, 0.03, 0.03], + [-BoneMap.DEFAULT_LARM_LEN, 0.03, -0.03], + [-BoneMap.DEFAULT_LARM_LEN, -0.03, -0.03], + [-BoneMap.DEFAULT_LARM_LEN, -0.03, 0.03], + [0, 0.03, 0.03], + [0, 0.03, -0.03], + [0, -0.03, -0.03], + [0, -0.03, 0.03], + ]) + self.__uarm_vert = np.array([ + [-BoneMap.DEFAULT_UARM_LEN, 0.05, 0.05], + [-BoneMap.DEFAULT_UARM_LEN, 0.05, -0.05], + [-BoneMap.DEFAULT_UARM_LEN, -0.05, -0.05], + [-BoneMap.DEFAULT_UARM_LEN, -0.05, 0.05], + [0, 0.05, 0.05], + [0, 0.05, -0.05], + [0, -0.05, -0.05], + [0, -0.05, 0.05], + ]) + + def is_active(self): + return self.__active + + def terminate(self): + self.__active = False + + def get_y_targets(self): + return self.__y_targets + + def get_body_measurements(self): + return self.__body_measurements + + def errors_from_file(self, file_p: Path, publish: bool = False): + logging.info(f"[{self.__tag}] processing from file") + # init some dummy input + input_state = np.zeros((self.batch_size, self.num_ensemble, self.win_size, self.dim_x)) + input_state = torch.tensor(input_state, dtype=torch.float32) + input_state = input_state.to(self.device) + + hand_l_err, hand_r_err = [], [] + elbow_l_err, elbow_r_err = [], [] + hip_rot_errors = [] + + dat = pd.read_csv(file_p) + for i, row in dat.iterrows(): + ## PREDICTIONS + xx = row[self.x_inputs.value].to_numpy() + t_pred, input_state = self.make_prediction(xx, input_state) + + est = estimate_joints.arm_pose_from_nn_targets( + preds=t_pred, + body_measurements=self.__body_measurements, + y_targets=self.__y_targets + ) + # estimate mean of rotations if we got multiple MC predictions + if est.shape[0] > 1: + # Calculate the mean of all predictions mean + p_larm_quat_rh = ts.average_quaternions(est[:, 9:13]) + p_uarm_quat_rh = ts.average_quaternions(est[:, 13:17]) + p_hips_quat_rh = ts.average_quaternions(est[:, 17:]) + + # get the transition from upper arm origin to lower arm origin + p_uarm_orig_rh = ts.quat_rotate_vector(p_hips_quat_rh, self.__shou_orig) + p_larm_orig_rh = ts.quat_rotate_vector(p_uarm_quat_rh, self.__uarm_vec) + p_uarm_orig_rh + p_hand_orig_rh = ts.quat_rotate_vector(p_larm_quat_rh, self.__larm_vec) + p_larm_orig_rh + else: + p_hand_orig_rh = est[0, 0:3] + p_larm_orig_rh = est[0, 3:6] + p_uarm_orig_rh = est[0, 6:9] + p_larm_quat_rh = est[0, 9:13] + p_uarm_quat_rh = est[0, 13:17] + p_hips_quat_rh = est[0, 17:] + + # publish the estimation for our unity visualization + if publish: + msg = np.hstack([ + p_larm_quat_rh, + p_hand_orig_rh, + p_larm_quat_rh, + p_larm_orig_rh, + p_uarm_quat_rh, + p_uarm_orig_rh, + p_hips_quat_rh + ]) + if est.shape[0] > 1: + for e_row in est: + msg = np.hstack([msg, e_row[:9]]) + self.send_np_msg(msg) + + # GROUND TRUTH + yy = row[self.__y_targets.value].to_numpy()[np.newaxis, :] + est = estimate_joints.arm_pose_from_nn_targets( + preds=yy, + body_measurements=self.__body_measurements, + y_targets=self.__y_targets + ) + gt_hand_orig_rh = est[0, 0:3] + gt_larm_orig_rh = est[0, 3:6] + gt_uarm_orig_rh = est[0, 6:9] + gt_larm_quat_g = est[0, 9:13] + gt_uarm_quat_g = est[0, 13:17] + gt_hips_quat_g = est[0, 17:] + + # p_larm_vrtx = ts.quat_rotate_vector(p_larm_quat_rh, self.__larm_vert) + p_larm_orig_rh + # p_uarm_vrtx = ts.quat_rotate_vector(p_uarm_quat_rh, self.__uarm_vert) + p_uarm_orig_rh + # + # gt_larm_vrtx = ts.quat_rotate_vector(gt_larm_quat_g, self.__larm_vert) + gt_larm_orig_rh + # gt_uarm_vrtx = ts.quat_rotate_vector(gt_uarm_quat_g, self.__uarm_vert) + gt_uarm_orig_rh + # + # le = gt_larm_vrtx - p_larm_vrtx + # ue = gt_uarm_vrtx - p_uarm_vrtx + # ae = np.vstack([le, ue]) + # me = np.linalg.norm(ae, axis=1) + # + # mpjve.append(np.mean(me) * 100) + + # hand_l_err.append(np.linalg.norm(gt_hand_orig_rh - p_hand_orig_rh) * 100) + # q = ts.hamilton_product(gt_larm_quat_g, ts.quat_invert(p_larm_quat_rh)) + # e = ts.quat_to_euler(q) + # hand_r_err.append(np.sum(np.abs(np.degrees(e)))) + # + # elbow_l_err.append(np.linalg.norm(gt_larm_orig_rh - p_larm_orig_rh) * 100) + # eq = ts.hamilton_product(gt_uarm_quat_g, ts.quat_invert(p_uarm_quat_rh)) + # ee = ts.quat_to_euler(eq) + # elbow_r_err.append(np.sum(np.abs(np.degrees(ee)))) + # + # ydiff = ts.hamilton_product(gt_hips_quat_g, ts.quat_invert(p_hips_quat_rh)) + # ydiffe = ts.quat_to_euler(ydiff)[1] + # err = np.abs(np.degrees(np.arctan2(np.sin(ydiffe), np.cos(ydiffe)))) + # hip_rot_errors.append(err) + + hand_l_err.append(np.linalg.norm(gt_hand_orig_rh - p_hand_orig_rh) * 100) + hre = ts.quat_to_euler(gt_larm_quat_g) - ts.quat_to_euler(p_larm_quat_rh) + hree = np.sum(np.abs(np.degrees(np.arctan2(np.sin(hre), np.cos(hre))))) + hand_r_err.append(hree) + + elbow_l_err.append(np.linalg.norm(gt_larm_orig_rh - p_larm_orig_rh) * 100) + ere = ts.quat_to_euler(gt_uarm_quat_g) - ts.quat_to_euler(p_uarm_quat_rh) + eree = np.sum(np.abs(np.degrees(np.arctan2(np.sin(ere), np.cos(ere))))) + elbow_r_err.append(eree) + + ydiff = ts.quat_to_euler(gt_hips_quat_g)[1] - ts.quat_to_euler(p_hips_quat_rh)[1] + err = np.abs(np.degrees(np.arctan2(np.sin(ydiff), np.cos(ydiff)))) + hip_rot_errors.append(err) + + + if (int(i) + 1) % 100 == 0: + logging.info(f"[{self.__tag}] processed {i} rows") + + return hand_l_err, hand_r_err, elbow_l_err, elbow_r_err, hip_rot_errors + + def stream_wearable_devices(self, sensor_q: queue = None, send_msg: bool = True): + logging.info(f"[{self.__tag}] wearable streaming loop") + self.__init_step = 0 + + # init some dummy input + # input -> [batch_size, ensemble, timestep, dim_x] + input_state = np.zeros((self.batch_size, self.num_ensemble, self.win_size, self.dim_x)) + input_state = torch.tensor(input_state, dtype=torch.float32) + input_state = input_state.to(self.device) + + # used to estimate delta time and processing speed in Hz + start = datetime.now() + self.__prev_time = start + dat = 0 + + # this loops while the socket is listening and/or receiving data + self.__active = True + + # dataset = pickle.load(open(dojo_config.PATHS["deploy"] / "test_dataset.pkl", "rb")) + # simple lookup for values of interest + slp = messaging.WATCH_PHONE_IMU_LOOKUP + + logging.info("loaded") + while self.__active: + + # processing speed output + now = datetime.now() + if (now - start).seconds >= 5: + start = now + logging.info(f"[{self.__tag}] {dat / 5} Hz") + dat = 0 + delta_t = now - self.__prev_time + delta_t = delta_t.microseconds * 0.000001 + self.__prev_time = now + + # get the most recent smartwatch data row from the queue + row = sensor_q.get() + while sensor_q.qsize() > 5: + row = sensor_q.get() + + # process the data + # pressure - calibrated initial pressure = relative pressure + r_pres = row[slp["sw_pres"]] - row[slp["sw_init_pres"]] + # calibrate smartwatch rotation + sw_rot = np.array([ + row[slp["sw_rotvec_w"]], + row[slp["sw_rotvec_x"]], + row[slp["sw_rotvec_y"]], + row[slp["sw_rotvec_z"]] + ]) + sw_fwd = np.array([ + row[slp["sw_forward_w"]], + row[slp["sw_forward_x"]], + row[slp["sw_forward_y"]], + row[slp["sw_forward_z"]] + ]) + + # estimate north quat to align Z-axis of global and android coord system + r = ts.android_quat_to_global_no_north(sw_fwd) + y_rot = ts.reduce_global_quat_to_y_rot(r) + quat_north = ts.euler_to_quat(np.array([0, -y_rot, 0])) + # calibrate watch + sw_cal_g = ts.android_quat_to_global(sw_rot, quat_north) + sw_6drr_cal = ts.quat_to_6drr_1x6(sw_cal_g) + + ph_fwd = np.array([ + row[slp["ph_forward_w"]], row[slp["ph_forward_x"]], + row[slp["ph_forward_y"]], row[slp["ph_forward_z"]] + ]) + ph_rot = np.array([ + row[slp["ph_rotvec_w"]], row[slp["ph_rotvec_x"]], + row[slp["ph_rotvec_y"]], row[slp["ph_rotvec_z"]] + ]) + # the device orientations if the calib position with left arm forward is perfect + hips_dst_g = np.array([1, 0, 0, 0]) + ph_rot_g = ts.android_quat_to_global(ph_rot, quat_north) + ph_fwd_g = ts.android_quat_to_global(ph_fwd, quat_north) + ph_off_g = ts.hamilton_product(ts.quat_invert(ph_fwd_g), hips_dst_g) + ph_cal_g = ts.hamilton_product(ph_rot_g, ph_off_g) + + # hip y rotation from phone + hips_y_rot = ts.reduce_global_quat_to_y_rot(ph_cal_g) + hips_yrot_cal_sin = np.sin(hips_y_rot) + hips_yrot_cal_cos = np.cos(hips_y_rot) + + # assemble the entire input vector of one time step + xx = np.hstack([ + delta_t, + row[slp["sw_gyro_x"]], row[slp["sw_gyro_y"]], row[slp["sw_gyro_z"]], + row[slp["sw_lvel_x"]], row[slp["sw_lvel_y"]], row[slp["sw_lvel_z"]], + row[slp["sw_lacc_x"]], row[slp["sw_lacc_y"]], row[slp["sw_lacc_z"]], + row[slp["sw_grav_x"]], row[slp["sw_grav_y"]], row[slp["sw_grav_z"]], + sw_6drr_cal, + r_pres, + hips_yrot_cal_sin, + hips_yrot_cal_cos + ]) + + # normalize measurements with pre-calculated mean and std + + # finally get predicted positions etc + t_pred, input_state = self.make_prediction(xx, input_state) + + est = estimate_joints.arm_pose_from_nn_targets( + preds=t_pred, + body_measurements=self.__body_measurements, + y_targets=self.__y_targets + ) + + # estimate mean of rotations if we got multiple MC predictions + if est.shape[0] > 1: + # Calculate the mean of all predictions mean + p_hips_quat_g = ts.average_quaternions(est[:, 17:]) + p_larm_quat_g = ts.average_quaternions(est[:, 9:13]) + p_uarm_quat_g = ts.average_quaternions(est[:, 13:17]) + + # get the transition from upper arm origin to lower arm origin + p_uarm_orig_g = ts.quat_rotate_vector(p_hips_quat_g, self.__shou_orig) + p_larm_orig_g = ts.quat_rotate_vector(p_uarm_quat_g, self.__uarm_vec) + p_uarm_orig_g + p_hand_orig_g = ts.quat_rotate_vector(p_larm_quat_g, self.__larm_vec) + p_larm_orig_g + else: + p_hand_orig_g = est[0, 0:3] + p_larm_orig_g = est[0, 3:6] + p_uarm_orig_g = est[0, 6:9] + p_larm_quat_g = est[0, 9:13] + p_uarm_quat_g = est[0, 13:17] + p_hips_quat_g = est[0, 17:] + + # this is the list for the actual joint positions and rotations + msg = np.hstack([ + p_larm_quat_g, # hand rot + p_hand_orig_g, # hand orig + p_larm_quat_g, # larm rot + p_larm_orig_g, # larm orig + p_uarm_quat_g, # uarm rot + p_uarm_orig_g, # uarm orig + p_hips_quat_g # hips rot + ]) + # store as last msg for getter + self.last_msg = msg.copy() + + if self.__stream_mc: + msg = list(msg) + # now we attach the monte carlo predictions for XYZ positions + if est.shape[0] > 1: + for e_row in est: + msg += list(e_row[:6]) + + if send_msg: + self.send_np_msg(msg) + dat += 1 + + def send_np_msg(self, msg: np.array) -> int: + # craft UDP message and send + msg = struct.pack('f' * len(msg), *msg) + try: + return self.__udp_socket.sendto(msg, (self.__ip, self.__port)) + except TimeoutError: + logging.info(f"[{self.__tag}] timed out") + return -1 + + def make_prediction(self, xx, input_state): + + if self.__normalize: + xx = (xx - self.__xx_m) / self.__xx_s + + self.__row_hist.append(xx) + # if not enough data is available yet, simply repeat the input as a first estimate + while len(self.__row_hist) < self.win_size: + self.__row_hist.append(xx) + # if the history is too long, delete old values + while len(self.__row_hist) > self.win_size: + del self.__row_hist[0] + + xx_seq = np.vstack(self.__row_hist) # seq x feat + xx_seq = rearrange(xx_seq, "(bs seq) (en feat) -> bs seq en feat", bs=1, en=1) + xx_seq = torch.tensor(xx_seq, dtype=torch.float32).to(self.device) + + with torch.no_grad(): + output = self.model(xx_seq, input_state) + + # not enough history yet. Make sensor model predictions until we have a time-window worth of data + if self.__init_step <= self.win_size: + # there will be no prediction in this time window + pred_ = output[3] + pred_ = rearrange(pred_, "bs en dim -> (bs en) dim") + pred_ = self.utils_.format_state(pred_) + pred_ = rearrange( + pred_, "(bs en) (k dim) -> bs en k dim", bs=self.batch_size, k=1 + ) + input_state = torch.cat( + (input_state[:, :, 1:, :], pred_), axis=2 + ) + input_state = input_state.to(self.device) + self.__init_step += 1 + # if on GPU copy the tensor to host memory first + if self.device.type == 'cuda': + smp = output[3].cpu().detach().numpy()[0] + else: + smp = output[3].detach().numpy()[0] + return smp, input_state # return only sensor model prediction + + ensemble = output[0] # -> output ensemble + learned_obs = output[3] # -> learned observations + ensemble_ = rearrange(ensemble, "bs (en k) dim -> bs en k dim", k=1) + input_state = torch.cat( + (input_state[:, :, 1:, :], ensemble_), axis=2 + ) + input_state = input_state.to(self.device) + + # if on GPU copy the tensor to host memory first + if self.device.type == 'cuda': + ensemble = ensemble.cpu() + # learned_obs = learned_obs.cpu() + + # get the output + t_preds = ensemble.detach().numpy()[0] + # t_preds = learned_obs.detach().numpy()[0] + + if self.__normalize: + t_preds = t_preds * self.__yy_s + self.__yy_m + + # store t_preds in history if smoothing is required + if self.__smooth > 1: + self.__smooth_hist.append(t_preds) + while len(self.__smooth_hist) < self.__smooth: + self.__smooth_hist.append(t_preds) + while len(self.__smooth_hist) > self.__smooth: + del self.__smooth_hist[0] + t_preds = np.vstack(self.__smooth_hist) + + return t_preds, input_state # return entire ensemble + + +def run_kalman(ip: str, ensembles=64, smooth: int = 4, publish_to_unity = False) -> KalmanPhonePocket: + # data for left-hand mode + left_q = queue.Queue() + + # listen for imu data from phone and watch + imu_l = ImuListener( + ip=ip, + msg_size=messaging.watch_phone_imu_msg_len, + port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT, + tag="LISTEN IMU LEFT" + ) + l_thread = threading.Thread( + target=imu_l.listen, + args=(left_q,) + ) + l_thread.start() + + kpp = KalmanPhonePocket(ip=ip, + smooth=smooth, + num_ensemble=ensembles, + port=config.PORT_PUB_LEFT_ARM, + window_size=5, + stream_mc=False, # False + model_name="SW-model") + p_thread = threading.Thread( + target=kpp.stream_wearable_devices, + args=(left_q, publish_to_unity,) + ) + p_thread.start() + + return kpp diff --git a/src/wear_mocap_ape/utility/names.py b/src/wear_mocap_ape/utility/names.py index fc3f51e..702dff5 100644 --- a/src/wear_mocap_ape/utility/names.py +++ b/src/wear_mocap_ape/utility/names.py @@ -8,18 +8,29 @@ class NNS_TARGETS(Enum): "gt_larm_6drr_rh_1", "gt_larm_6drr_rh_2", "gt_larm_6drr_rh_3", # larm 6drr "gt_larm_6drr_rh_4", "gt_larm_6drr_rh_5", "gt_larm_6drr_rh_6" ] - CONST_XYZ = [ + POS_RH_LARM_HAND = [ "gt_hand_orig_rua_x", "gt_hand_orig_rua_y", "gt_hand_orig_rua_z", "gt_larm_orig_rua_x", "gt_larm_orig_rua_y", "gt_larm_orig_rua_z" ] - ORI_CALIB_UARM_LARM_HIPS = [ + POS_RH_LARM_HAND_HIPS = [ + "gt_hand_orig_rua_x", "gt_hand_orig_rua_y", "gt_hand_orig_rua_z", + "gt_larm_orig_rua_x", "gt_larm_orig_rua_y", "gt_larm_orig_rua_z", + "gt_hips_yrot_cal_sin", "gt_hips_yrot_cal_cos" # hips calibrated y rot + ] + ORI_RH_UARM_LARM_HIPS = [ "gt_uarm_6drr_rh_1", "gt_uarm_6drr_rh_2", "gt_uarm_6drr_rh_3", # uarm 6drr "gt_uarm_6drr_rh_4", "gt_uarm_6drr_rh_5", "gt_uarm_6drr_rh_6", "gt_larm_6drr_rh_1", "gt_larm_6drr_rh_2", "gt_larm_6drr_rh_3", # larm 6drr "gt_larm_6drr_rh_4", "gt_larm_6drr_rh_5", "gt_larm_6drr_rh_6", "gt_hips_yrot_cal_sin", "gt_hips_yrot_cal_cos" # hips calibrated y rot ] - + ORI_CAL_LARM_UARM_HIPS = [ + "gt_larm_6drr_cal_1", "gt_larm_6drr_cal_2", "gt_larm_6drr_cal_3", # larm 6drr + "gt_larm_6drr_cal_4", "gt_larm_6drr_cal_5", "gt_larm_6drr_cal_6", + "gt_uarm_6drr_cal_1", "gt_uarm_6drr_cal_2", "gt_uarm_6drr_cal_3", # uarm 6drr + "gt_uarm_6drr_cal_4", "gt_uarm_6drr_cal_5", "gt_uarm_6drr_cal_6", + "gt_hips_yrot_cal_sin", "gt_hips_yrot_cal_cos" # hips calibrated y rot + ] class NNS_INPUTS(Enum): WATCH_ONLY = [ @@ -32,7 +43,7 @@ class NNS_INPUTS(Enum): "sw_6drr_cal_4", "sw_6drr_cal_5", "sw_6drr_cal_6", "sw_pres_cal" ] - WATCH_PHONE_CALIB = [ + WATCH_PH_HIP = [ "sw_dt", "sw_gyro_x", "sw_gyro_y", "sw_gyro_z", "sw_lvel_x", "sw_lvel_y", "sw_lvel_z", @@ -41,14 +52,11 @@ class NNS_INPUTS(Enum): "sw_6drr_cal_1", "sw_6drr_cal_2", "sw_6drr_cal_3", "sw_6drr_cal_4", "sw_6drr_cal_5", "sw_6drr_cal_6", "sw_pres_cal", - "ph_gyro_x", "ph_gyro_y", "ph_gyro_z", - "ph_lvel_x", "ph_lvel_y", "ph_lvel_z", - "ph_lacc_x", "ph_lacc_y", "ph_lacc_z", - "ph_grav_x", "ph_grav_y", "ph_grav_z", - "ph_6drr_cal_1", "ph_6drr_cal_2", "ph_6drr_cal_3", - "ph_6drr_cal_4", "ph_6drr_cal_5", "ph_6drr_cal_6" + "ph_hips_yrot_cal_sin", + "ph_hips_yrot_cal_cos" + ] - WATCH_PHONE_CALIB_REC_HIP = [ + WATCH_PHONE_CALIB = [ "sw_dt", "sw_gyro_x", "sw_gyro_y", "sw_gyro_z", "sw_lvel_x", "sw_lvel_y", "sw_lvel_z", @@ -62,7 +70,5 @@ class NNS_INPUTS(Enum): "ph_lacc_x", "ph_lacc_y", "ph_lacc_z", "ph_grav_x", "ph_grav_y", "ph_grav_z", "ph_6drr_cal_1", "ph_6drr_cal_2", "ph_6drr_cal_3", - "ph_6drr_cal_4", "ph_6drr_cal_5", "ph_6drr_cal_6", - "gt_hips_yrot_cal_sin_tm1", - "gt_hips_yrot_cal_cos_tm1" + "ph_6drr_cal_4", "ph_6drr_cal_5", "ph_6drr_cal_6" ]