diff --git a/MANIFEST.in b/MANIFEST.in index 8f2f065..5c78a0a 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,2 +1,2 @@ -global-include *.json *.xml *.pt *.pkl +global-include *.json *.xml *.pt *.pkl SW-v3.8-model-436400 diff --git a/example_scripts/watch_only_record.py b/example_scripts/record/watch_only_record.py similarity index 100% rename from example_scripts/watch_only_record.py rename to example_scripts/record/watch_only_record.py diff --git a/example_scripts/watch_phone_uarm_record.py b/example_scripts/record/watch_phone_uarm_record.py similarity index 100% rename from example_scripts/watch_phone_uarm_record.py rename to example_scripts/record/watch_phone_uarm_record.py diff --git a/example_scripts/stream/watch_only.py b/example_scripts/stream/watch_only.py new file mode 100644 index 0000000..9048580 --- /dev/null +++ b/example_scripts/stream/watch_only.py @@ -0,0 +1,67 @@ +import atexit +import logging +import queue +import signal +import threading +import wear_mocap_ape.config as config + +from wear_mocap_ape.stream.publisher.watch_only_udp import WatchOnlyNnUDP +from wear_mocap_ape.stream.listener.imu import ImuListener +from wear_mocap_ape.data_types import messaging + +import argparse + + +def run_watch_only_nn_udp(ip, smooth, stream_mc): + # listener and publisher run in separate threads. Listener fills the queue, publisher empties it + q = queue.Queue() + + # the listener fills the que with received and parsed smartwatch data + imu_l = ImuListener( + ip=ip, + msg_size=messaging.watch_only_imu_msg_len, + port=config.PORT_LISTEN_WATCH_IMU_LEFT # the smartwatch app sends on this PORT + ) + imu_w_l = threading.Thread( + target=imu_l.listen, + args=(q,) + ) + + # the publisher estimates arm poses from queued data and publishes them via UDP to given IP and PORT + joints_p = WatchOnlyNnUDP( + ip=ip, + port=config.PORT_PUB_LEFT_ARM, + smooth=smooth, + stream_mc=stream_mc + ) + udp_publisher = threading.Thread( + target=joints_p.processing_loop, + args=(q,) + ) + + imu_w_l.start() + udp_publisher.start() + + def terminate_all(*args): + imu_l.terminate() + joints_p.terminate() + + # make sure all handler exit on termination + atexit.register(terminate_all) + signal.signal(signal.SIGTERM, terminate_all) + signal.signal(signal.SIGINT, terminate_all) + return joints_p + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + # Instantiate the parser + parser = argparse.ArgumentParser(description='streams data from the watch in standalone mode') + + # Required IP argument + parser.add_argument('ip', type=str, + help=f'put your local IP here. ' + f'The script will publish arm ' + f'pose data on PORT {config.PORT_PUB_LEFT_ARM}') + args = parser.parse_args() + + run_watch_only_nn_udp(ip=args.ip, smooth=5, stream_mc=True) diff --git a/example_scripts/watch_phone_pocket_stream.py b/example_scripts/stream/watch_phone_pocket_kalman.py similarity index 63% rename from example_scripts/watch_phone_pocket_stream.py rename to example_scripts/stream/watch_phone_pocket_kalman.py index f9f72ca..85d1366 100644 --- a/example_scripts/watch_phone_pocket_stream.py +++ b/example_scripts/stream/watch_phone_pocket_kalman.py @@ -8,33 +8,18 @@ from wear_mocap_ape import config from wear_mocap_ape.data_types import messaging from wear_mocap_ape.stream.listener.imu import ImuListener -from wear_mocap_ape.stream.publisher.watch_phone_pocket_udp import WatchPhonePocketUDP +from wear_mocap_ape.stream.publisher.watch_phone_pocket_kalman_udp import WatchPhonePocketKalmanUDP -if __name__ == "__main__": - logging.basicConfig(level=logging.INFO) - # Instantiate the parser - parser = argparse.ArgumentParser(description='') - - # Required IP argument - parser.add_argument('ip', type=str, help=f'put your local IP here.') - parser.add_argument('smooth', nargs='?', type=int, default=5, help=f'smooth predicted trajectories') - parser.add_argument('stream_mc', nargs='?', type=bool, default=True, - help=f'whether you want to stream the full pose ensemble') - args = parser.parse_args() - - ip_arg = args.ip - smooth_arg = args.smooth - stream_mc_arg = args.stream_mc +def run_watch_phone_pocket_kalman(ip: str, smooth: int, stream_mc: bool) -> WatchPhonePocketKalmanUDP: # data for left-hand mode left_q = queue.Queue() # listen for imu data from phone and watch imu_l = ImuListener( - ip=ip_arg, + ip=ip, msg_size=messaging.watch_phone_imu_msg_len, - port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT, - tag="LISTEN IMU LEFT" + port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT ) l_thread = threading.Thread( target=imu_l.listen, @@ -42,28 +27,42 @@ ) # process into arm pose and body orientation - kpp = WatchPhonePocketUDP(ip=ip_arg, - smooth=smooth_arg, - num_ensemble=48, - port=config.PORT_PUB_LEFT_ARM, - window_size=10, - stream_mc=stream_mc_arg, - model_name="SW-model-sept-4") + kpp = WatchPhonePocketKalmanUDP(ip=ip, + smooth=smooth, + num_ensemble=48, + port=config.PORT_PUB_LEFT_ARM, + window_size=10, + stream_mc=stream_mc, + model_hash="SW-model-1211") p_thread = threading.Thread( - target=kpp.stream_wearable_devices, - args=(left_q, True,) + target=kpp.processing_loop, + args=(left_q,) ) l_thread.start() p_thread.start() - def terminate_all(*args): imu_l.terminate() kpp.terminate() - # make sure all handler exit on termination atexit.register(terminate_all) signal.signal(signal.SIGTERM, terminate_all) signal.signal(signal.SIGINT, terminate_all) + return kpp + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + # Instantiate the parser + parser = argparse.ArgumentParser(description='') + + # Required IP argument + parser.add_argument('ip', type=str, help=f'put your local IP here.') + parser.add_argument('smooth', nargs='?', type=int, default=5, help=f'smooth predicted trajectories') + parser.add_argument('stream_mc', nargs='?', type=bool, default=True, + help=f'whether you want to stream the full pose ensemble') + args = parser.parse_args() + + run_watch_phone_pocket_kalman(ip=args.ip, smooth=args.smooth, stream_mc=args.stream_mc) diff --git a/example_scripts/stream/watch_phone_pocket_nn.py b/example_scripts/stream/watch_phone_pocket_nn.py new file mode 100644 index 0000000..59e3ad5 --- /dev/null +++ b/example_scripts/stream/watch_phone_pocket_nn.py @@ -0,0 +1,77 @@ +import argparse +import atexit +import logging +import queue +import signal +import threading + +from wear_mocap_ape import config +from wear_mocap_ape.data_deploy.nn import deploy_models +from wear_mocap_ape.data_types import messaging +from wear_mocap_ape.stream.listener.imu import ImuListener +from wear_mocap_ape.stream.publisher.watch_phone_pocket_nn_udp import WatchPhonePocketNnUDP + + +def run_watch_phone_pocket_nn_udp(ip: str, smooth: int, stream_mc: bool) -> WatchPhonePocketNnUDP: + # data for left-hand mode + q = queue.Queue() + + # listen for imu data from phone and watch + imu_listener = ImuListener( + ip=ip, + msg_size=messaging.watch_phone_imu_msg_len, + port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT + ) + imu_thread = threading.Thread( + target=imu_listener.listen, + args=(q,) + ) + + # process into arm pose and body orientation + estimator = WatchPhonePocketNnUDP(ip=ip, + port=config.PORT_PUB_LEFT_ARM, + smooth=smooth, + model_hash=deploy_models.LSTM.WATCH_PHONE_POCKET.value, + stream_mc=stream_mc, + mc_samples=60) + udp_thread = threading.Thread( + target=estimator.processing_loop, + args=(q,) + ) + + imu_thread.start() + udp_thread.start() + + def terminate_all(*args): + imu_listener.terminate() + estimator.terminate() + + # make sure all handler exit on termination + atexit.register(terminate_all) + signal.signal(signal.SIGTERM, terminate_all) + signal.signal(signal.SIGINT, terminate_all) + + return estimator + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + + # Parse arguments + parser = argparse.ArgumentParser(description='') + + # Required IP argument + parser.add_argument('ip', type=str, help=f'put your local IP here.') + parser.add_argument('smooth', nargs='?', type=int, default=5, help=f'smooth predicted trajectories') + parser.add_argument('--stream_mc', action='store_true') + parser.add_argument('--no-stream_mc', dest='stream_mc', action='store_false') + parser.set_defaults(stream_mc=True) + + args = parser.parse_args() + + ip_arg = args.ip + smooth_arg = args.smooth + stream_mc_arg = args.stream_mc + + # run the predictions + run_watch_phone_pocket_nn_udp(ip_arg, smooth_arg, stream_mc_arg) diff --git a/example_scripts/stream/watch_phone_uarm.py b/example_scripts/stream/watch_phone_uarm.py new file mode 100644 index 0000000..465cc78 --- /dev/null +++ b/example_scripts/stream/watch_phone_uarm.py @@ -0,0 +1,70 @@ +import argparse +import atexit +import logging +import queue +import signal +import threading + +import wear_mocap_ape.config as config +from wear_mocap_ape.stream.listener.imu import ImuListener +from wear_mocap_ape.data_types import messaging +from wear_mocap_ape.stream.publisher.watch_phone_uarm_udp import WatchPhoneUarmUDP + + +def run_watch_phone_uarm_udp(ip, smooth): + # data processing happens in independent threads. + # We exchange data via queues. + left_q = queue.Queue() # data for left-hand mode + + # left listener + imu_l = ImuListener( + ip=ip, + msg_size=messaging.watch_phone_imu_msg_len, + port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT, + ) + l_thread = threading.Thread( + target=imu_l.listen, + args=(left_q,) + ) + + # left publisher + wp2ul = WatchPhoneUarmUDP( + ip=ip, + port=config.PORT_PUB_LEFT_ARM, + smooth=smooth + ) + ul_thread = threading.Thread( + target=wp2ul.processing_loop, + args=(left_q,) + ) + l_thread.start() + ul_thread.start() + + def terminate_all(*args): + imu_l.terminate() + wp2ul.terminate() + + # make sure all handler exit on termination + atexit.register(terminate_all) + signal.signal(signal.SIGTERM, terminate_all) + signal.signal(signal.SIGINT, terminate_all) + + return wp2ul + + +if __name__ == "__main__": + # enable basic logging + logging.basicConfig(level=logging.INFO) + + # Instantiate the parser + parser = argparse.ArgumentParser(description='streams data from the watch in standalone mode') + + # Required IP argument + parser.add_argument('ip', type=str, + help=f'put your local IP here. ' + f'The script will publish arm ' + f'pose data on PORT {config.PORT_PUB_LEFT_ARM} (left hand)') + parser.add_argument('smooth', nargs='?', type=int, default=5, help=f'smooth predicted trajectories') + args = parser.parse_args() + + run_watch_phone_uarm_udp(ip=args.ip, smooth=args.smooth) diff --git a/example_scripts/stream/watch_phone_uarm_nn.py b/example_scripts/stream/watch_phone_uarm_nn.py new file mode 100644 index 0000000..d73748d --- /dev/null +++ b/example_scripts/stream/watch_phone_uarm_nn.py @@ -0,0 +1,71 @@ +import argparse +import atexit +import logging +import queue +import signal +import threading + +import wear_mocap_ape.config as config +from wear_mocap_ape.stream.listener.imu import ImuListener +from wear_mocap_ape.stream.publisher.watch_phone_uarm_nn_udp import WatchPhoneUarmNnUDP +from wear_mocap_ape.data_types import messaging + + +def run_watch_phone_uarm_nn_udp(ip, smooth, stream_mc): + # data processing happens in independent threads. + # We exchange data via queues. + left_q = queue.Queue() # data for left-hand mode + + # left listener + imu_l = ImuListener( + ip=ip, + msg_size=messaging.watch_phone_imu_msg_len, + port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT, + ) + l_thread = threading.Thread( + target=imu_l.listen, + args=(left_q,) + ) + + # left publisher + wp2ul = WatchPhoneUarmNnUDP( + ip=ip, + port=config.PORT_PUB_LEFT_ARM, + smooth=smooth, + stream_mc=stream_mc, + ) + ul_thread = threading.Thread( + target=wp2ul.processing_loop, + args=(left_q,) + ) + l_thread.start() + ul_thread.start() + + def terminate_all(*args): + imu_l.terminate() + wp2ul.terminate() + + # make sure all handler exit on termination + atexit.register(terminate_all) + signal.signal(signal.SIGTERM, terminate_all) + signal.signal(signal.SIGINT, terminate_all) + + return wp2ul + + +if __name__ == "__main__": + # enable basic logging + logging.basicConfig(level=logging.INFO) + + # Instantiate the parser + parser = argparse.ArgumentParser(description='streams data from the watch in standalone mode') + + # Required IP argument + parser.add_argument('ip', type=str, + help=f'put your local IP here. ' + f'The script will publish arm ' + f'pose data on PORT {config.PORT_PUB_LEFT_ARM} (left hand)') + parser.add_argument('smooth', nargs='?', type=int, default=5, help=f'smooth predicted trajectories') + args = parser.parse_args() + + run_watch_phone_uarm_nn_udp(ip=args.ip, smooth=args.smooth, stream_mc=False) diff --git a/example_scripts/watch_only_stream.py b/example_scripts/watch_only_stream.py deleted file mode 100644 index ee4c2d8..0000000 --- a/example_scripts/watch_only_stream.py +++ /dev/null @@ -1,62 +0,0 @@ -import atexit -import logging -import queue -import signal -import threading -import wear_mocap_ape.config as config - -from wear_mocap_ape.stream.publisher.watch_only_udp import WatchOnlyUDP -from wear_mocap_ape.stream.listener.imu import ImuListener -from wear_mocap_ape.data_types import messaging - -import argparse - -# Instantiate the parser -parser = argparse.ArgumentParser(description='streams data from the watch in standalone mode') - -# Required IP argument -parser.add_argument('ip', type=str, - help=f'put your local IP here. ' - f'The script will publish arm ' - f'pose data on PORT {config.PORT_PUB_LEFT_ARM}') -args = parser.parse_args() - -logging.basicConfig(level=logging.INFO) - -# listener and publisher run in separate threads. Listener fills the queue, publisher empties it -q = queue.Queue() - -# the listener fills the que with received and parsed smartwatch data -imu_l = ImuListener( - ip=args.ip, - msg_size=messaging.watch_only_imu_msg_len, - port=config.PORT_LISTEN_WATCH_IMU_LEFT # the smartwatch app sends on this PORT -) -imu_w_l = threading.Thread( - target=imu_l.listen, - args=(q,) -) - -# the publisher estimates arm poses from queued data and publishes them via UDP to given IP and PORT -joints_p = WatchOnlyUDP( - ip=args.ip, - port=config.PORT_PUB_LEFT_ARM -) -udp_publisher = threading.Thread( - target=joints_p.processing_loop, - args=(q,) -) - -imu_w_l.start() -udp_publisher.start() - - -def terminate_all(*args): - imu_l.terminate() - joints_p.terminate() - - -# make sure all handler exit on termination -atexit.register(terminate_all) -signal.signal(signal.SIGTERM, terminate_all) -signal.signal(signal.SIGINT, terminate_all) diff --git a/example_scripts/watch_phone_uarm_stream.py b/example_scripts/watch_phone_uarm_stream.py deleted file mode 100644 index 1fa84d8..0000000 --- a/example_scripts/watch_phone_uarm_stream.py +++ /dev/null @@ -1,98 +0,0 @@ -import argparse -import atexit -import logging -import queue -import signal -import threading - -import wear_mocap_ape.config as config -from wear_mocap_ape.stream.listener.imu import ImuListener -from wear_mocap_ape.stream.publisher.watch_phone_uarm_udp import WatchPhoneUarmUDP -from wear_mocap_ape.data_types import messaging - -# enable basic logging -logging.basicConfig(level=logging.INFO) - -# Instantiate the parser -parser = argparse.ArgumentParser(description='streams data from the watch in standalone mode') - -# Required IP argument -parser.add_argument('ip', type=str, - help=f'put your local IP here. ' - f'The script will publish arm ' - f'pose data on PORT {config.PORT_PUB_LEFT_ARM} (left hand)' - f'or PORT {config.PORT_PUB_RIGHT_ARM} (right hand)') -args = parser.parse_args() -ip = args.ip - -# data processing happens in independent threads. -# We exchange data via queues. -# This script has two separate queues for the case that the -# user streams from a left-hand and a right-hand device at the same time -left_q = queue.Queue() # data for left-hand mode -right_q = queue.Queue() # data for right-hand mode - -# left listener -imu_l = ImuListener( - ip=ip, - msg_size=messaging.watch_phone_imu_msg_len, - port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT, - tag="LISTEN IMU LEFT" -) -l_thread = threading.Thread( - target=imu_l.listen, - args=(left_q,) -) - -# right listener -imu_r = ImuListener( - ip=ip, - msg_size=messaging.watch_phone_imu_msg_len, - port=config.PORT_LISTEN_WATCH_PHONE_IMU_RIGHT, - tag="LISTEN IMU RIGHT" -) -r_thread = threading.Thread( - target=imu_r.listen, - args=(right_q,) -) - -# left publisher -wp2ul = WatchPhoneUarmUDP( - ip=ip, - port=config.PORT_PUB_LEFT_ARM, - tag="PUBLISH LEFT" -) -ul_thread = threading.Thread( - target=wp2ul.stream_loop, - args=(left_q,) -) - -# right publisher -wp2ur = WatchPhoneUarmUDP( - ip=ip, - port=config.PORT_PUB_RIGHT_ARM, - tag="PUBLISH RIGHT", - left_hand_mode=False -) -ur_thread = threading.Thread( - target=wp2ur.stream_loop, - args=(right_q,) -) - -l_thread.start() -r_thread.start() -ul_thread.start() -ur_thread.start() - - -def terminate_all(*args): - imu_l.terminate() - wp2ur.terminate() - imu_r.terminate() - wp2ul.terminate() - - -# make sure all handler exit on termination -atexit.register(terminate_all) -signal.signal(signal.SIGTERM, terminate_all) -signal.signal(signal.SIGINT, terminate_all) diff --git a/experimental_scripts/watch_phone_watch_only.py b/experimental_scripts/watch_phone_watch_only.py new file mode 100644 index 0000000..7b683aa --- /dev/null +++ b/experimental_scripts/watch_phone_watch_only.py @@ -0,0 +1,70 @@ +import atexit +import logging +import queue +import signal +import threading +import wear_mocap_ape.config as config + +from wear_mocap_ape.stream.publisher.watch_only_udp import WatchOnlyNnUDP +from wear_mocap_ape.stream.listener.imu import ImuListener +from wear_mocap_ape.data_types import messaging + +import argparse + + +def run_watch_phone_watch_only_nn_udp(ip, smooth, stream_mc): + # listener and publisher run in separate threads. Listener fills the queue, publisher empties it + q = queue.Queue() + + # listen for imu data from phone and watch + imu_l = ImuListener( + ip=ip, + msg_size=messaging.watch_phone_imu_msg_len, + port=config.PORT_LISTEN_WATCH_PHONE_IMU_LEFT + ) + imu_thread = threading.Thread( + target=imu_l.listen, + args=(q,) + ) + + # the publisher estimates arm poses from queued data and publishes them via UDP to given IP and PORT + estimator = WatchOnlyNnUDP( + ip=ip, + port=config.PORT_PUB_LEFT_ARM, + smooth=smooth, + stream_mc=stream_mc, + mc_samples=50, + watch_phone=True + ) + udp_thread = threading.Thread( + target=estimator.processing_loop, + args=(q,) + ) + + imu_thread.start() + udp_thread.start() + + def terminate_all(*args): + imu_l.terminate() + estimator.terminate() + + # make sure all handler exit on termination + atexit.register(terminate_all) + signal.signal(signal.SIGTERM, terminate_all) + signal.signal(signal.SIGINT, terminate_all) + return estimator + + +if __name__ == "__main__": + logging.basicConfig(level=logging.INFO) + # Instantiate the parser + parser = argparse.ArgumentParser(description='streams data from the watch in standalone mode') + + # Required IP argument + parser.add_argument('ip', type=str, + help=f'put your local IP here. ' + f'The script will publish arm ' + f'pose data on PORT {config.PORT_PUB_LEFT_ARM}') + args = parser.parse_args() + + run_watch_phone_watch_only_nn_udp(ip=args.ip, smooth=5, stream_mc=True) diff --git a/setup.cfg b/setup.cfg index 5917482..21b2245 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = wear_mocap_ape -version = 1.1.0 +version = 1.2.0 author = Fabian Weigend author_email = fweigend@asu.edu description = diff --git a/src/wear_mocap_ape/config.py b/src/wear_mocap_ape/config.py index 17af206..d4e8ff4 100644 --- a/src/wear_mocap_ape/config.py +++ b/src/wear_mocap_ape/config.py @@ -9,17 +9,13 @@ # ports for publishing to other services PORT_PUB_LEFT_ARM = 50003 -PORT_PUB_RIGHT_ARM = 50004 - # Listener ports for receiving data -# watch and phone -PORT_LISTEN_WATCH_PHONE_IMU_LEFT = 65000 -PORT_LISTEN_WATCH_PHONE_IMU_RIGHT = 65003 -PORT_LISTEN_WATCH_PHONE_AUDIO = 65001 -# watch only -PORT_LISTEN_WATCH_AUDIO = 46001 -PORT_LISTEN_WATCH_IMU_LEFT = 46000 +PORT_LISTEN_WATCH_PHONE_IMU_LEFT = 65000 # watch and phone either mode +PORT_LISTEN_WATCH_IMU_LEFT = 46000 # watch only # experimental modes +PORT_LISTEN_AUDIO = 65001 PORT_PUB_TRANSCRIBED_KEYS = 50006 +# PORT_LISTEN_WATCH_PHONE_IMU_RIGHT = 65003 +# PORT_PUB_RIGHT_ARM = 50004 diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_KALMAN_STATE.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_KALMAN_STATE.pkl deleted file mode 100644 index a135e39..0000000 Binary files a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_KALMAN_STATE.pkl and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_ORI_CALIB_UARM_LARM.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_ORI_CALIB_UARM_LARM.pkl deleted file mode 100644 index d5c3ece..0000000 Binary files a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_ORI_CALIB_UARM_LARM.pkl and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_POS_RH_LARM_HAND.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_POS_RH_LARM_HAND.pkl deleted file mode 100644 index a5063f0..0000000 Binary files a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_ONLY_POS_RH_LARM_HAND.pkl and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PHONE_CALIB_REC_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PHONE_CALIB_REC_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl deleted file mode 100644 index 0557d08..0000000 Binary files a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PHONE_CALIB_REC_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_KALMAN_PHONE_POCKET.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_KALMAN_PHONE_POCKET.pkl deleted file mode 100644 index 372f79f..0000000 Binary files a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_KALMAN_PHONE_POCKET.pkl and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl deleted file mode 100644 index 6247b29..0000000 Binary files a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CALIB_UARM_LARM_HIPS.pkl and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CAL_LARM_UARM_HIPS.pkl b/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CAL_LARM_UARM_HIPS.pkl deleted file mode 100644 index 1c35a1a..0000000 Binary files a/src/wear_mocap_ape/data_deploy/data_stats/WATCH_PH_HIP_ORI_CAL_LARM_UARM_HIPS.pkl and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/kalman/SW-model-sept-4 b/src/wear_mocap_ape/data_deploy/kalman/SW-model-sept-4 deleted file mode 100644 index 67db508..0000000 Binary files a/src/wear_mocap_ape/data_deploy/kalman/SW-model-sept-4 and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/checkpoint.pt b/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/checkpoint.pt deleted file mode 100644 index 87d2541..0000000 Binary files a/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/checkpoint.pt and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/results.json b/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/results.json deleted file mode 100644 index 8a39d97..0000000 --- a/src/wear_mocap_ape/data_deploy/nn/04d2d059ee8980e13e5e16fe048b6bd0f8265c9a/results.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "model": "ImuPoseLSTM", - "hidden_layer_count": 2, - "hidden_layer_size": 128, - "epochs": 200, - "batch_size": 64, - "learning_rate": 0.001, - "dropout": 0.4, - "sequence_len": 5, - "normalize": true, - "seq_overlap": true, - "create_norm_stats": true, - "hash": "04d2d059ee8980e13e5e16fe048b6bd0f8265c9a", - "y_targets_n": "ORI_CAL_LARM_UARM_HIPS", - "x_inputs_n": "WATCH_PH_HIP", - "y_targets_v": [ - "gt_larm_6drr_cal_1", - "gt_larm_6drr_cal_2", - "gt_larm_6drr_cal_3", - "gt_larm_6drr_cal_4", - "gt_larm_6drr_cal_5", - "gt_larm_6drr_cal_6", - "gt_uarm_6drr_cal_1", - "gt_uarm_6drr_cal_2", - "gt_uarm_6drr_cal_3", - "gt_uarm_6drr_cal_4", - "gt_uarm_6drr_cal_5", - "gt_uarm_6drr_cal_6", - "gt_hips_yrot_cal_sin", - "gt_hips_yrot_cal_cos" - ], - "x_inputs_v": [ - "sw_dt", - "sw_gyro_x", - "sw_gyro_y", - "sw_gyro_z", - "sw_lvel_x", - "sw_lvel_y", - "sw_lvel_z", - "sw_lacc_x", - "sw_lacc_y", - "sw_lacc_z", - "sw_grav_x", - "sw_grav_y", - "sw_grav_z", - "sw_6drr_cal_1", - "sw_6drr_cal_2", - "sw_6drr_cal_3", - "sw_6drr_cal_4", - "sw_6drr_cal_5", - "sw_6drr_cal_6", - "sw_pres_cal", - "ph_hips_yrot_cal_sin", - "ph_hips_yrot_cal_cos" - ], - "Loss/train": 0.13687036271612407, - "Loss/test": 0.28687360928033256, - "Loss/b_train": 0.1611349323569118, - "Loss/b_test": 0.2804641976381203 -} \ No newline at end of file diff --git a/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/checkpoint.pt b/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/checkpoint.pt deleted file mode 100644 index f7122d2..0000000 Binary files a/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/checkpoint.pt and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/results.json b/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/results.json deleted file mode 100644 index 1e50e4d..0000000 --- a/src/wear_mocap_ape/data_deploy/nn/2b4e48b366d717b035751c40f977d9ae6c26d6b2/results.json +++ /dev/null @@ -1,50 +0,0 @@ -{ - "model": "DropoutFF", - "hidden_layer_count": 4, - "hidden_layer_size": 128, - "epochs": 200, - "batch_size": 64, - "learning_rate": 0.0001, - "dropout": 0.2, - "sequence_len": 5, - "normalize": true, - "seq_overlap": true, - "create_norm_stats": true, - "hash": "2b4e48b366d717b035751c40f977d9ae6c26d6b2", - "y_targets_n": "POS_RH_LARM_HAND", - "x_inputs_n": "WATCH_ONLY", - "y_targets_v": [ - "gt_hand_orig_rua_x", - "gt_hand_orig_rua_y", - "gt_hand_orig_rua_z", - "gt_larm_orig_rua_x", - "gt_larm_orig_rua_y", - "gt_larm_orig_rua_z" - ], - "x_inputs_v": [ - "sw_dt", - "sw_gyro_x", - "sw_gyro_y", - "sw_gyro_z", - "sw_lvel_x", - "sw_lvel_y", - "sw_lvel_z", - "sw_lacc_x", - "sw_lacc_y", - "sw_lacc_z", - "sw_grav_x", - "sw_grav_y", - "sw_grav_z", - "sw_6drr_cal_1", - "sw_6drr_cal_2", - "sw_6drr_cal_3", - "sw_6drr_cal_4", - "sw_6drr_cal_5", - "sw_6drr_cal_6", - "sw_pres_cal" - ], - "Loss/train": 0.24004665516297385, - "Loss/test": 0.377081323361239, - "Loss/b_train": 0.2682442721176126, - "Loss/b_test": 0.3734141369143522 -} \ No newline at end of file diff --git a/src/wear_mocap_ape/data_deploy/nn/2c700a1ca1af084eedbae5bdd86a5194e42ded4d/checkpoint.pt b/src/wear_mocap_ape/data_deploy/nn/2c700a1ca1af084eedbae5bdd86a5194e42ded4d/checkpoint.pt deleted file mode 100644 index fa97f5c..0000000 Binary files a/src/wear_mocap_ape/data_deploy/nn/2c700a1ca1af084eedbae5bdd86a5194e42ded4d/checkpoint.pt and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/nn/2c700a1ca1af084eedbae5bdd86a5194e42ded4d/results.json b/src/wear_mocap_ape/data_deploy/nn/2c700a1ca1af084eedbae5bdd86a5194e42ded4d/results.json deleted file mode 100644 index c0016bb..0000000 --- a/src/wear_mocap_ape/data_deploy/nn/2c700a1ca1af084eedbae5bdd86a5194e42ded4d/results.json +++ /dev/null @@ -1,54 +0,0 @@ -{ - "model": "DropoutLSTM", - "hidden_layer_count": 3, - "hidden_layer_size": 128, - "epochs": 200, - "batch_size": 128, - "learning_rate": 0.0001, - "dropout": 0.2, - "sequence_len": 15, - "normalize": true, - "hash": "2c700a1ca1af084eedbae5bdd86a5194e42ded4d", - "y_targets_n": "ORI_CALIB_UARM_LARM", - "x_inputs_n": "WATCH_ONLY", - "y_targets_v": [ - "gt_uarm_6drr_rh_1", - "gt_uarm_6drr_rh_2", - "gt_uarm_6drr_rh_3", - "gt_uarm_6drr_rh_4", - "gt_uarm_6drr_rh_5", - "gt_uarm_6drr_rh_6", - "gt_larm_6drr_rh_1", - "gt_larm_6drr_rh_2", - "gt_larm_6drr_rh_3", - "gt_larm_6drr_rh_4", - "gt_larm_6drr_rh_5", - "gt_larm_6drr_rh_6" - ], - "x_inputs_v": [ - "sw_dt", - "sw_gyro_x", - "sw_gyro_y", - "sw_gyro_z", - "sw_lvel_x", - "sw_lvel_y", - "sw_lvel_z", - "sw_lacc_x", - "sw_lacc_y", - "sw_lacc_z", - "sw_grav_x", - "sw_grav_y", - "sw_grav_z", - "sw_6drr_cal_1", - "sw_6drr_cal_2", - "sw_6drr_cal_3", - "sw_6drr_cal_4", - "sw_6drr_cal_5", - "sw_6drr_cal_6", - "sw_pres_cal" - ], - "Loss/train": 0.10688399363547474, - "Loss/test": 0.25043141344165, - "Loss/b_train": 0.1418339469824598, - "Loss/b_test": 0.2444741313940434 -} \ No newline at end of file diff --git a/src/wear_mocap_ape/data_deploy/nn/deploy_models.py b/src/wear_mocap_ape/data_deploy/nn/deploy_models.py index 9196a34..e482e70 100644 --- a/src/wear_mocap_ape/data_deploy/nn/deploy_models.py +++ b/src/wear_mocap_ape/data_deploy/nn/deploy_models.py @@ -1,11 +1,7 @@ from enum import Enum -class FF(Enum): - WATCH_ONLY = "2b4e48b366d717b035751c40f977d9ae6c26d6b2" - - class LSTM(Enum): - WATCH_ONLY = "2c700a1ca1af084eedbae5bdd86a5194e42ded4d" - IMU_EXMP = "04d2d059ee8980e13e5e16fe048b6bd0f8265c9a" - WATCH_PHONE_POCKET = "ec70862ec17828320ca64a738b2a90cf5dcadced" + WATCH_PHONE_POCKET = "" + WATCH_PHONE_UARM = "" + WATCH_ONLY = "f99ed72bc0d1aeb27c533f1bd93010e0164fcde6" diff --git a/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/checkpoint.pt b/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/checkpoint.pt deleted file mode 100644 index bb5cd75..0000000 Binary files a/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/checkpoint.pt and /dev/null differ diff --git a/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/results.json b/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/results.json deleted file mode 100644 index 030e46e..0000000 --- a/src/wear_mocap_ape/data_deploy/nn/ec70862ec17828320ca64a738b2a90cf5dcadced/results.json +++ /dev/null @@ -1,60 +0,0 @@ -{ - "model": "DropoutLSTM", - "hidden_layer_count": 2, - "hidden_layer_size": 128, - "epochs": 200, - "batch_size": 64, - "learning_rate": 0.001, - "dropout": 0.4, - "sequence_len": 5, - "normalize": true, - "seq_overlap": true, - "create_norm_stats": true, - "hash": "ec70862ec17828320ca64a738b2a90cf5dcadced", - "y_targets_n": "ORI_CAL_LARM_UARM_HIPS", - "x_inputs_n": "WATCH_PH_HIP", - "y_targets_v": [ - "gt_larm_6drr_cal_1", - "gt_larm_6drr_cal_2", - "gt_larm_6drr_cal_3", - "gt_larm_6drr_cal_4", - "gt_larm_6drr_cal_5", - "gt_larm_6drr_cal_6", - "gt_uarm_6drr_cal_1", - "gt_uarm_6drr_cal_2", - "gt_uarm_6drr_cal_3", - "gt_uarm_6drr_cal_4", - "gt_uarm_6drr_cal_5", - "gt_uarm_6drr_cal_6", - "gt_hips_yrot_cal_sin", - "gt_hips_yrot_cal_cos" - ], - "x_inputs_v": [ - "sw_dt", - "sw_gyro_x", - "sw_gyro_y", - "sw_gyro_z", - "sw_lvel_x", - "sw_lvel_y", - "sw_lvel_z", - "sw_lacc_x", - "sw_lacc_y", - "sw_lacc_z", - "sw_grav_x", - "sw_grav_y", - "sw_grav_z", - "sw_6drr_cal_1", - "sw_6drr_cal_2", - "sw_6drr_cal_3", - "sw_6drr_cal_4", - "sw_6drr_cal_5", - "sw_6drr_cal_6", - "sw_pres_cal", - "ph_hips_yrot_cal_sin", - "ph_hips_yrot_cal_cos" - ], - "Loss/train": 0.19960083461272307, - "Loss/test": 0.2753936388019971, - "Loss/b_train": 0.19960083461272307, - "Loss/b_test": 0.2753936388019971 -} diff --git a/src/wear_mocap_ape/data_types/bone.py b/src/wear_mocap_ape/data_types/bone.py index cc8a88b..b2e5e22 100644 --- a/src/wear_mocap_ape/data_types/bone.py +++ b/src/wear_mocap_ape/data_types/bone.py @@ -10,7 +10,7 @@ class Bone: def __init__(self, bone_id: int, default_pos, - default_rot=np.array([1, 0, 0, 0], dtype=np.float64) # identity quaternion w,x,y,z + default_rot=np.array([1, 0, 0, 0]) # identity quaternion w,x,y,z ): self.bone_id = bone_id # int self.default_pos = default_pos # position as vec3 diff --git a/src/wear_mocap_ape/data_types/bone_map.py b/src/wear_mocap_ape/data_types/bone_map.py index c581766..fb65d4b 100644 --- a/src/wear_mocap_ape/data_types/bone_map.py +++ b/src/wear_mocap_ape/data_types/bone_map.py @@ -42,7 +42,7 @@ class BoneMap: DEFAULT_LARM_LEN = 0.22 # 0.22 DEFAULT_UARM_LEN = 0.30 # default left shoulder origin relative to hip - DEFAULT_L_SHOU_ORIG_RH = np.array([-0.1704612, 0.4309841, -0.00670862]) + DEFAULT_UARM_ORIG_RH = np.array([-0.1704612, 0.4309841, -0.00670862]) def __init__(self, skeleton_name: str): @@ -61,7 +61,7 @@ def __init__(self, skeleton_name: str): b_id = int(bone.get("id")) p_id = int(bone.find("parent_id").text) # parse bone offsets from XML - b_pos = np.array([float(x) for x in bone.find("offset").text.split(",")], dtype=np.float64) + b_pos = np.array([float(x) for x in bone.find("offset").text.split(",")]) # if the bone is not the root object, add parent bone transform for global position b_pos += np.zeros(3) if p_id == 0 else self.__bonemap[p_id].default_pos # create bone object and add to bone map diff --git a/src/wear_mocap_ape/data_types/voice_commands.py b/src/wear_mocap_ape/data_types/voice_commands.py index 7fdd431..94e533f 100644 --- a/src/wear_mocap_ape/data_types/voice_commands.py +++ b/src/wear_mocap_ape/data_types/voice_commands.py @@ -4,25 +4,20 @@ See the transcription script for details. """ KEY_PHRASES = { - # dual manipulation task - # "blue": 0, # left - # "red": 1, # right - # - # # ROS experiment commands "stop": 3, # stop current task # "move": 4, # carry on with original task - # "continue": 4, # same as "move" + "continue": 4, # same as "move" + "now": 4, "follow": 5, # move EE to hand - "open": 6, # open the gripper - "close": 7, + "hand": 5, + + # "close": 7, # "handover": 6, # same as "open" # "over": 6, + "open": 6, # open the gripper # "give": 6, # "cube": 6, - "home": 10, - "record": 11 - - # # simple test - # "test": 99 + "back": 10, + "home": 10 } diff --git a/src/wear_mocap_ape/estimate/compose_msg.py b/src/wear_mocap_ape/estimate/compose_msg.py new file mode 100644 index 0000000..0b65d21 --- /dev/null +++ b/src/wear_mocap_ape/estimate/compose_msg.py @@ -0,0 +1,108 @@ +import numpy as np + +from wear_mocap_ape.utility.names import NNS_TARGETS +from wear_mocap_ape.utility import transformations as ts + +FUNCTION_LOOKUP = { + NNS_TARGETS.ORI_CAL_LARM_UARM_HIPS: lambda a, b: est_to_ori_cal_larm_uarm_hips(a, b), + NNS_TARGETS.ORI_CAL_LARM_UARM: lambda a, b: est_to_ori_cal_larm_uarm(a, b), + NNS_TARGETS.ORI_POS_CAL_LARM_UARM_HIPS: lambda a, b: est_ori_pos_cal_larm_uarm_hips(a, b) +} + + +def msg_from_nn_targets_est(est: np.array, body_measure: np.array, y_targets: NNS_TARGETS): + return FUNCTION_LOOKUP[y_targets](est, body_measure) + + +def est_ori_pos_cal_larm_uarm_hips(est: np.array, body_measure: np.array): + # estimate mean of rotations if we got multiple MC predictions + if est.shape[0] > 1: + # Calculate the mean of all predictions mean + p_hips_quat_g = ts.average_quaternions(est[:, 17:]) + p_larm_quat_g = ts.average_quaternions(est[:, 9:13]) + p_uarm_quat_g = ts.average_quaternions(est[:, 13:17]) + + # use body measurements for transitions + p_uarm_orig_g = np.mean(est[:, 6:9], axis=0) + p_larm_orig_g = np.mean(est[:, 3:6], axis=0) + p_hand_orig_g = np.mean(est[:, 0:3], axis=0) + else: + p_hand_orig_g = est[0, 0:3] + p_larm_orig_g = est[0, 3:6] + p_uarm_orig_g = est[0, 6:9] + p_larm_quat_g = est[0, 9:13] + p_uarm_quat_g = est[0, 13:17] + p_hips_quat_g = est[0, 17:] + # this is the list for the actual joint positions and rotations + return np.hstack([ + p_larm_quat_g, # hand rot [0,1,2,3] + p_hand_orig_g, # hand orig [4,5,6] + p_larm_quat_g, # larm rot [7,8,9,10] + p_larm_orig_g, # larm orig [11,12,13] + p_uarm_quat_g, # uarm rot [14,15,16,17] + p_uarm_orig_g, # uarm orig [18,19,20] + p_hips_quat_g # hips rot [21,22,23,24] + ]) + + +def est_to_ori_cal_larm_uarm_hips(est: np.array, body_measure: np.array): + larm_vec, uarm_vec, uarm_orig = body_measure[0, :3], body_measure[0, 3:6], body_measure[0, 6:] + + # estimate mean of rotations if we got multiple MC predictions + if est.shape[0] > 1: + # Calculate the mean of all predictions mean + p_hips_quat_g = ts.average_quaternions(est[:, 17:]) + p_larm_quat_g = ts.average_quaternions(est[:, 9:13]) + p_uarm_quat_g = ts.average_quaternions(est[:, 13:17]) + + # use body measurements for transitions + p_uarm_orig_g = ts.quat_rotate_vector(p_hips_quat_g, uarm_orig) + p_larm_orig_g = ts.quat_rotate_vector(p_uarm_quat_g, uarm_vec) + p_uarm_orig_g + p_hand_orig_g = ts.quat_rotate_vector(p_larm_quat_g, larm_vec) + p_larm_orig_g + else: + p_hand_orig_g = est[0, 0:3] + p_larm_orig_g = est[0, 3:6] + p_uarm_orig_g = est[0, 6:9] + p_larm_quat_g = est[0, 9:13] + p_uarm_quat_g = est[0, 13:17] + p_hips_quat_g = est[0, 17:] + + # this is the list for the actual joint positions and rotations + return np.hstack([ + p_larm_quat_g, # hand rot [0,1,2,3] + p_hand_orig_g, # hand orig [4,5,6] + p_larm_quat_g, # larm rot [7,8,9,10] + p_larm_orig_g, # larm orig [11,12,13] + p_uarm_quat_g, # uarm rot [14,15,16,17] + p_uarm_orig_g, # uarm orig [18,19,20] + p_hips_quat_g # hips rot [21,22,23,24] + ]) + + +def est_to_ori_cal_larm_uarm(est: np.array, body_measure: np.array): + larm_vec, uarm_vec, uarm_orig = body_measure[0, :3], body_measure[0, 3:6], body_measure[0, 6:] + + # estimate mean of rotations if we got multiple MC predictions + if est.shape[0] > 1: + # Calculate the mean of all predictions mean + p_larm_quat_rh = ts.average_quaternions(est[:, 6:10]) + p_uarm_quat_rh = ts.average_quaternions(est[:, 10:]) + + # use body measurements for transitions + p_larm_orig_rh = ts.quat_rotate_vector(p_uarm_quat_rh, uarm_vec) + uarm_orig + p_hand_orig_rh = ts.quat_rotate_vector(p_larm_quat_rh, larm_vec) + p_larm_orig_rh + else: + p_hand_orig_rh = est[0, :3] + p_larm_orig_rh = est[0, 3:6] + p_larm_quat_rh = est[0, 6:10] + p_uarm_quat_rh = est[0, 10:] + + return np.hstack([ + p_larm_quat_rh, # hand rot [0,1,2,3] + p_hand_orig_rh, # hand orig [4,5,6] + p_larm_quat_rh, # larm rot [7,8,9,10] + p_larm_orig_rh, # larm orig [11,12,13] + p_uarm_quat_rh, # uarm rot [14,15,16,17] + uarm_orig, # uarm orig [18,19,20] + np.array([1, 0, 0, 0]) # hips rot [21,22,23,24] + ]) diff --git a/src/wear_mocap_ape/estimate/estimate_joints.py b/src/wear_mocap_ape/estimate/estimate_joints.py index 640d76a..920da49 100644 --- a/src/wear_mocap_ape/estimate/estimate_joints.py +++ b/src/wear_mocap_ape/estimate/estimate_joints.py @@ -4,12 +4,12 @@ from wear_mocap_ape.utility import transformations as ts FUNCTION_LOOKUP = { - NNS_TARGETS.ORI_CALIB_UARM_LARM: lambda a, b: uarm_larm_6drr_to_origins(a, b), - NNS_TARGETS.POS_RH_LARM_HAND: lambda a, b: hand_larm_xyz_to_origins(a, b), - NNS_TARGETS.ORI_RH_UARM_LARM_HIPS: lambda a, b: uarm_larm_hip_6dof_rh_to_origins_g(a, b), - NNS_TARGETS.POS_RH_LARM_HAND_HIPS: lambda a, b: larm_hand_hip_pos_rua_to_origins_g(a, b), - # NNS_TARGETS.KALMAN_PHONE_POCKET: lambda a, b: larm_uarm_hip_6dof_rh_to_origins_g(a, b), - NNS_TARGETS.ORI_CAL_LARM_UARM_HIPS: lambda a, b: larm_uarm_hip_6dof_cal_to_origins_cal(a, b) + # NNS_TARGETS.POS_RH_LARM_HAND: lambda a, b: hand_larm_xyz_to_origins(a, b), + # NNS_TARGETS.ORI_RH_UARM_LARM_HIPS: lambda a, b: uarm_larm_hip_6dof_rh_to_origins_g(a, b), + # NNS_TARGETS.POS_RH_LARM_HAND_HIPS: lambda a, b: larm_hand_hip_pos_rua_to_origins_g(a, b), + NNS_TARGETS.ORI_CAL_LARM_UARM: lambda a, b: larm_uarm_6drr_cal_to_origins_cal(a, b), + NNS_TARGETS.ORI_CAL_LARM_UARM_HIPS: lambda a, b: larm_uarm_hip_6dof_cal_to_origins_cal(a, b), + NNS_TARGETS.ORI_POS_CAL_LARM_UARM_HIPS: lambda a, b: larm_uarm_hip_6dof_pos_cal(a, b) } @@ -17,70 +17,31 @@ def arm_pose_from_nn_targets(preds: np.array, body_measurements: np.array, y_tar return FUNCTION_LOOKUP[y_targets](preds, body_measurements) -def larm_hand_hip_pos_rua_to_origins_g(preds: np.array, body_measure: np.array): - """ - :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] - :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] - :return: [hand_orig, larm_orig, uarm_orig, larm_quat_g, uarm_quat_g, hips_quat_g] - """ +def larm_uarm_hip_6dof_pos_cal(preds: np.array, body_measure: np.array): # split combined pred rows back into separate arrays - p_hand_orig_rua, p_larm_orig_rua, hips_sin, hips_cos = preds[:, :3], preds[:, 3:6], preds[:, 6], preds[:, 7] - uarm_vecs, larm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] + hand_orig_cal = preds[:, :3] + larm_6drr_cal = preds[:, 3:9] + larm_orig_cal = preds[:, 9:12] + uarm_6drr_cal = preds[:, 12:18] + hips_sin = preds[:, 18] + hips_cos = preds[:, 19] # transform to quats - larm_quat_rh = ts.quat_a_to_b(larm_vecs, p_hand_orig_rua - p_larm_orig_rua) - uarm_quat_rh = ts.quat_a_to_b(uarm_vecs, p_larm_orig_rua) - hips_quat_g = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) - - uarm_quat_g = ts.hamilton_product(hips_quat_g, uarm_quat_rh) - larm_quat_g = ts.hamilton_product(hips_quat_g, larm_quat_rh) - - p_uarm_orig_g = ts.quat_rotate_vector(hips_quat_g, uarm_orig_rh) # relative to hips - - # transform to global positions - p_larm_orig_g = p_uarm_orig_g + p_larm_orig_rua - p_hand_orig_g = p_uarm_orig_g + p_hand_orig_rua - return np.hstack([ - p_hand_orig_g, - p_larm_orig_g, - p_uarm_orig_g, - larm_quat_g, - uarm_quat_g, - hips_quat_g - ]) - - -def larm_uarm_hip_6dof_rh_to_origins_g(preds: np.array, body_measure: np.array): - """ - :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] - :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] - :return: [hand_orig, larm_orig, uarm_orig, larm_quat_g, uarm_quat_g, hips_quat_g] - """ - # split combined pred rows back into separate arrays - larm_6drr, uarm_6drr, hips_sin, hips_cos = preds[:, :6], preds[:, 6:12], preds[:, 12], preds[:, 13] - larm_vecs, uarm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] - - # transform to quats - uarm_quat_rh = ts.six_drr_1x6_to_quat(uarm_6drr) - larm_quat_rh = ts.six_drr_1x6_to_quat(larm_6drr) - hips_quat_g = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) - - uarm_quat_g = ts.hamilton_product(hips_quat_g, uarm_quat_rh) - larm_quat_g = ts.hamilton_product(hips_quat_g, larm_quat_rh) + uarm_quat_cal = ts.six_drr_1x6_to_quat(uarm_6drr_cal) + larm_quat_cal = ts.six_drr_1x6_to_quat(larm_6drr_cal) + hips_quat_cal = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) - # estimate origins in respective reference frame - p_uarm_orig_g = ts.quat_rotate_vector(hips_quat_g, uarm_orig_rh) # relative to hips - p_larm_orig_g = ts.quat_rotate_vector(uarm_quat_g, uarm_vecs) + p_uarm_orig_g - p_hand_orig_g = ts.quat_rotate_vector(larm_quat_g, larm_vecs) + p_larm_orig_g + # get uarm orig + uarm_orig_rh = body_measure[:, 6:] + uarm_orig_cal = ts.quat_rotate_vector(hips_quat_cal, uarm_orig_rh) - # transform to global positions return np.hstack([ - p_hand_orig_g, - p_larm_orig_g, - p_uarm_orig_g, - larm_quat_g, - uarm_quat_g, - hips_quat_g + hand_orig_cal, + larm_orig_cal, + uarm_orig_cal, + larm_quat_cal, + uarm_quat_cal, + hips_quat_cal ]) @@ -110,134 +71,190 @@ def larm_uarm_hip_6dof_cal_to_origins_cal(preds: np.array, body_measure: np.arra ]) -def uarm_larm_hip_6dof_rh_to_origins_g(preds: np.array, body_measure: np.array): - """ - :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] - :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] - :return: [hand_orig, larm_orig, uarm_orig, larm_quat_g, uarm_quat_g, hips_quat_g] - """ - # split combined pred rows back into separate arrays - uarm_6drr, larm_6drr, hips_sin, hips_cos = preds[:, :6], preds[:, 6:12], preds[:, 12], preds[:, 13] - uarm_vecs, larm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] - - # transform to quats - uarm_quat_rh = ts.six_drr_1x6_to_quat(uarm_6drr) - larm_quat_rh = ts.six_drr_1x6_to_quat(larm_6drr) - hips_quat_g = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) - - uarm_quat_g = ts.hamilton_product(hips_quat_g, uarm_quat_rh) - larm_quat_g = ts.hamilton_product(hips_quat_g, larm_quat_rh) - - # estimate origins in respective reference frame - p_uarm_orig_g = ts.quat_rotate_vector(hips_quat_g, uarm_orig_rh) # relative to hips - p_larm_orig_rua = ts.quat_rotate_vector(uarm_quat_g, uarm_vecs) # relative to uarm origin - p_hand_orig_rla = ts.quat_rotate_vector(larm_quat_g, larm_vecs) # relative to larm origin - - # transform to global positions - p_larm_orig_g = p_uarm_orig_g + p_larm_orig_rua - p_hand_orig_g = p_hand_orig_rla + p_larm_orig_g - return np.hstack([ - p_hand_orig_g, - p_larm_orig_g, - p_uarm_orig_g, - larm_quat_g, - uarm_quat_g, - hips_quat_g - ]) - - -def uarm_larm_6drr_to_origins(preds: np.array, body_measure: np.array): - """ - takes predictions from a model that has 6dof for lower and upper arm - as well as upper arm radius as its outputs - :param preds: [uarm_6drr, larm_6drr] - :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] - :return: [hand_orig, larm_orig, larm_quat_rh, uarm_quat_rh] - """ - - # split combined pred rows back into separate arrays - uarm_6drr, larm_6drr = preds[:, :6], preds[:, 6:] - - # get the default arm vectors from the row-by-row body measurements data - # lengths may vary if data is shuffled and from different participants - uarm_vecs = body_measure[:, :3] - larm_vecs = body_measure[:, 3:6] - uarm_orig = body_measure[:, 6:] - - # transform 6dof rotation representations back into quaternions - uarm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(uarm_6drr) - p_uarm_quat_rh = ts.rot_mat_1x9_to_quat(uarm_rot_mat) - - larm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(larm_6drr) - p_larm_quat_rh = ts.rot_mat_1x9_to_quat(larm_rot_mat) - - # get the transition from upper arm origin to lower arm origin - p_larm_orig_rh = ts.quat_rotate_vector(p_uarm_quat_rh, uarm_vecs) + uarm_orig - - # get transitions from lower arm origin to hand - # RE stands for relative to elbow (lower arm origin) - rotated_lower_arms_re = ts.quat_rotate_vector(p_larm_quat_rh, larm_vecs) - p_hand_orig_rh = rotated_lower_arms_re + p_larm_orig_rh - - return np.hstack([ - p_hand_orig_rh, - p_larm_orig_rh, - p_larm_quat_rh, - p_uarm_quat_rh - ]) - - -def larm_uarm_6drr_to_origins(preds: np.array, body_measure: np.array): +def larm_uarm_6drr_cal_to_origins_cal(preds: np.array, body_measure: np.array): # split combined pred rows back into separate arrays larm_6drr, uarm_6drr = preds[:, :6], preds[:, 6:] - - # get the default arm vectors from the row-by-row body measurements data - # lengths may vary if data is shuffled and from different participants - larm_vecs = body_measure[:, :3] - uarm_vecs = body_measure[:, 3:] + larm_vecs, uarm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] # transform 6dof rotation representations back into quaternions - uarm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(uarm_6drr) - p_uarm_quat_rh = ts.rot_mat_1x9_to_quat(uarm_rot_mat) - - larm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(larm_6drr) - p_larm_quat_rh = ts.rot_mat_1x9_to_quat(larm_rot_mat) + uarm_quat_cal = ts.six_drr_1x6_to_quat(uarm_6drr) + larm_quat_cal = ts.six_drr_1x6_to_quat(larm_6drr) # get the transition from upper arm origin to lower arm origin - p_larm_orig_rua = ts.quat_rotate_vector(p_uarm_quat_rh, uarm_vecs) - - # get transitions from lower arm origin to hand - # RE stands for relative to elbow (lower arm origin) - rotated_lower_arms_re = ts.quat_rotate_vector(p_larm_quat_rh, larm_vecs) - p_hand_orig_rua = rotated_lower_arms_re + p_larm_orig_rua + larm_orig_cal = ts.quat_rotate_vector(uarm_quat_cal, uarm_vecs) + uarm_orig_rh + hand_orig_cal = ts.quat_rotate_vector(larm_quat_cal, larm_vecs) + larm_orig_cal return np.hstack([ - p_hand_orig_rua, - p_larm_orig_rua, - p_larm_quat_rh, - p_uarm_quat_rh + hand_orig_cal, + larm_orig_cal, + larm_quat_cal, + uarm_quat_cal ]) - -def hand_larm_xyz_to_origins(preds: np.array, body_measure: np.array): - """ - :param preds: [hand_orig_rua, larm_orig_rua] - :param body_measure: [uarm_vec, larm_vec] - :return: [hand_origins, lower_arm_origins, lower_arm_rot_rh, upper_arm_rot_rh] - """ - p_hand_origin_rua = preds[:, :3] - p_larm_origin_rua = preds[:, 3:] - - uarm_vec = body_measure[:, :3] - larm_vec = body_measure[:, 3:] - - # estimate correct orientations - p_larm_quat_rh = ts.quat_a_to_b(larm_vec, p_hand_origin_rua - p_larm_origin_rua) - p_uarm_quat_rh = ts.quat_a_to_b(uarm_vec, p_larm_origin_rua) - - return np.hstack([ - p_hand_origin_rua, - p_larm_origin_rua, - p_larm_quat_rh, - p_uarm_quat_rh - ]) +# +# def larm_hand_hip_pos_rua_to_origins_g(preds: np.array, body_measure: np.array): +# """ +# :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] +# :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] +# :return: [hand_orig, larm_orig, uarm_orig, larm_quat_g, uarm_quat_g, hips_quat_g] +# """ +# # split combined pred rows back into separate arrays +# p_hand_orig_rua, p_larm_orig_rua, hips_sin, hips_cos = preds[:, :3], preds[:, 3:6], preds[:, 6], preds[:, 7] +# uarm_vecs, larm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] +# +# # transform to quats +# larm_quat_rh = ts.quat_a_to_b(larm_vecs, p_hand_orig_rua - p_larm_orig_rua) +# uarm_quat_rh = ts.quat_a_to_b(uarm_vecs, p_larm_orig_rua) +# hips_quat_g = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) +# +# uarm_quat_g = ts.hamilton_product(hips_quat_g, uarm_quat_rh) +# larm_quat_g = ts.hamilton_product(hips_quat_g, larm_quat_rh) +# +# p_uarm_orig_g = ts.quat_rotate_vector(hips_quat_g, uarm_orig_rh) # relative to hips +# +# # transform to global positions +# p_larm_orig_g = p_uarm_orig_g + p_larm_orig_rua +# p_hand_orig_g = p_uarm_orig_g + p_hand_orig_rua +# return np.hstack([ +# p_hand_orig_g, +# p_larm_orig_g, +# p_uarm_orig_g, +# larm_quat_g, +# uarm_quat_g, +# hips_quat_g +# ]) +# +# +# def larm_uarm_hip_6dof_rh_to_origins_g(preds: np.array, body_measure: np.array): +# """ +# :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] +# :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] +# :return: [hand_orig, larm_orig, uarm_orig, larm_quat_g, uarm_quat_g, hips_quat_g] +# """ +# # split combined pred rows back into separate arrays +# larm_6drr, uarm_6drr, hips_sin, hips_cos = preds[:, :6], preds[:, 6:12], preds[:, 12], preds[:, 13] +# larm_vecs, uarm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] +# +# # transform to quats +# uarm_quat_rh = ts.six_drr_1x6_to_quat(uarm_6drr) +# larm_quat_rh = ts.six_drr_1x6_to_quat(larm_6drr) +# hips_quat_g = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) +# +# uarm_quat_g = ts.hamilton_product(hips_quat_g, uarm_quat_rh) +# larm_quat_g = ts.hamilton_product(hips_quat_g, larm_quat_rh) +# +# # estimate origins in respective reference frame +# p_uarm_orig_g = ts.quat_rotate_vector(hips_quat_g, uarm_orig_rh) # relative to hips +# p_larm_orig_g = ts.quat_rotate_vector(uarm_quat_g, uarm_vecs) + p_uarm_orig_g +# p_hand_orig_g = ts.quat_rotate_vector(larm_quat_g, larm_vecs) + p_larm_orig_g +# +# # transform to global positions +# return np.hstack([ +# p_hand_orig_g, +# p_larm_orig_g, +# p_uarm_orig_g, +# larm_quat_g, +# uarm_quat_g, +# hips_quat_g +# ]) +# +# +# def uarm_larm_hip_6dof_rh_to_origins_g(preds: np.array, body_measure: np.array): +# """ +# :param preds: [uarm_6drr, larm_6drr, hips_sin_cos] +# :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] +# :return: [hand_orig, larm_orig, uarm_orig, larm_quat_g, uarm_quat_g, hips_quat_g] +# """ +# # split combined pred rows back into separate arrays +# uarm_6drr, larm_6drr, hips_sin, hips_cos = preds[:, :6], preds[:, 6:12], preds[:, 12], preds[:, 13] +# uarm_vecs, larm_vecs, uarm_orig_rh = body_measure[:, :3], body_measure[:, 3:6], body_measure[:, 6:] +# +# # transform to quats +# uarm_quat_rh = ts.six_drr_1x6_to_quat(uarm_6drr) +# larm_quat_rh = ts.six_drr_1x6_to_quat(larm_6drr) +# hips_quat_g = ts.hips_sin_cos_to_quat(hips_sin, hips_cos) +# +# uarm_quat_g = ts.hamilton_product(hips_quat_g, uarm_quat_rh) +# larm_quat_g = ts.hamilton_product(hips_quat_g, larm_quat_rh) +# +# # estimate origins in respective reference frame +# p_uarm_orig_g = ts.quat_rotate_vector(hips_quat_g, uarm_orig_rh) # relative to hips +# p_larm_orig_rua = ts.quat_rotate_vector(uarm_quat_g, uarm_vecs) # relative to uarm origin +# p_hand_orig_rla = ts.quat_rotate_vector(larm_quat_g, larm_vecs) # relative to larm origin +# +# # transform to global positions +# p_larm_orig_g = p_uarm_orig_g + p_larm_orig_rua +# p_hand_orig_g = p_hand_orig_rla + p_larm_orig_g +# return np.hstack([ +# p_hand_orig_g, +# p_larm_orig_g, +# p_uarm_orig_g, +# larm_quat_g, +# uarm_quat_g, +# hips_quat_g +# ]) +# +# +# def uarm_larm_6drr_to_origins(preds: np.array, body_measure: np.array): +# """ +# takes predictions from a model that has 6dof for lower and upper arm +# as well as upper arm radius as its outputs +# :param preds: [uarm_6drr, larm_6drr] +# :param body_measure: [uarm_vec, larm_vec, uarm_orig_rh] +# :return: [hand_orig, larm_orig, larm_quat_rh, uarm_quat_rh] +# """ +# +# # split combined pred rows back into separate arrays +# uarm_6drr, larm_6drr = preds[:, :6], preds[:, 6:] +# +# # get the default arm vectors from the row-by-row body measurements data +# # lengths may vary if data is shuffled and from different participants +# uarm_vecs = body_measure[:, :3] +# larm_vecs = body_measure[:, 3:6] +# uarm_orig = body_measure[:, 6:] +# +# # transform 6dof rotation representations back into quaternions +# uarm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(uarm_6drr) +# p_uarm_quat_rh = ts.rot_mat_1x9_to_quat(uarm_rot_mat) +# +# larm_rot_mat = ts.six_drr_1x6_to_rot_mat_1x9(larm_6drr) +# p_larm_quat_rh = ts.rot_mat_1x9_to_quat(larm_rot_mat) +# +# # get the transition from upper arm origin to lower arm origin +# p_larm_orig_rh = ts.quat_rotate_vector(p_uarm_quat_rh, uarm_vecs) + uarm_orig +# +# # get transitions from lower arm origin to hand +# # RE stands for relative to elbow (lower arm origin) +# rotated_lower_arms_re = ts.quat_rotate_vector(p_larm_quat_rh, larm_vecs) +# p_hand_orig_rh = rotated_lower_arms_re + p_larm_orig_rh +# +# return np.hstack([ +# p_hand_orig_rh, +# p_larm_orig_rh, +# p_larm_quat_rh, +# p_uarm_quat_rh +# ]) +# +# +# def hand_larm_xyz_to_origins(preds: np.array, body_measure: np.array): +# """ +# :param preds: [hand_orig_rua, larm_orig_rua] +# :param body_measure: [uarm_vec, larm_vec] +# :return: [hand_origins, lower_arm_origins, lower_arm_rot_rh, upper_arm_rot_rh] +# """ +# p_hand_origin_rua = preds[:, :3] +# p_larm_origin_rua = preds[:, 3:] +# +# uarm_vec = body_measure[:, :3] +# larm_vec = body_measure[:, 3:] +# +# # estimate correct orientations +# p_larm_quat_rh = ts.quat_a_to_b(larm_vec, p_hand_origin_rua - p_larm_origin_rua) +# p_uarm_quat_rh = ts.quat_a_to_b(uarm_vec, p_larm_origin_rua) +# +# return np.hstack([ +# p_hand_origin_rua, +# p_larm_origin_rua, +# p_larm_quat_rh, +# p_uarm_quat_rh +# ]) diff --git a/src/wear_mocap_ape/estimate/estimator.py b/src/wear_mocap_ape/estimate/estimator.py new file mode 100644 index 0000000..79aa1bf --- /dev/null +++ b/src/wear_mocap_ape/estimate/estimator.py @@ -0,0 +1,214 @@ +import logging +from abc import abstractmethod +from datetime import datetime + +import torch +import queue +import numpy as np + +from wear_mocap_ape.data_types.bone_map import BoneMap +from wear_mocap_ape.estimate import estimate_joints, compose_msg +from wear_mocap_ape.utility import data_stats +from wear_mocap_ape.utility.names import NNS_INPUTS, NNS_TARGETS + + +class Estimator: + def __init__(self, + model_name: str, + x_inputs: NNS_INPUTS, + y_targets: NNS_TARGETS, + normalize: bool = True, + smooth: int = 1, + seq_len: int = 1, + stream_mc: bool = True, + bonemap: BoneMap = None, + tag: str = "Estimator"): + + self.__tag = tag + self._active = True + self._prev_time = datetime.now() + self._model_name = model_name + + self._y_targets = y_targets + self._x_inputs = x_inputs + + # load normalized data stats if required + self._normalize = normalize + if normalize: + stats = data_stats.get_norm_stats( + x_inputs=self._x_inputs, + y_targets=self._y_targets + ) + # data is normalized and has to be transformed with pre-calculated mean and std + self._xx_m, self._xx_s = stats["xx_m"], stats["xx_s"] + self._yy_m, self._yy_s = stats["yy_m"], stats["yy_s"] + + + # average over multiple time steps + self._smooth = max(1, smooth) # smooth should not be smaller 1 + self._smooth_hist = [] + + # monte carlo predictions + self._last_msg = None + self._stream_mc = stream_mc + + self._row_hist = [] + self._sequence_len = max(1, seq_len) # seq_len should not be smaller 1 + + # use body measurements for transitions + if bonemap is None: + self._larm_vec = np.array([-BoneMap.DEFAULT_LARM_LEN, 0, 0]) + self._uarm_vec = np.array([-BoneMap.DEFAULT_UARM_LEN, 0, 0]) + self._uarm_orig = BoneMap.DEFAULT_UARM_ORIG_RH + else: + # get values from bone map + self._larm_vec = np.array([-bonemap.left_lower_arm_length, 0, 0]) + self._uarm_vec = np.array([-bonemap.left_upper_arm_length, 0, 0]) + self._uarm_orig = bonemap.left_upper_arm_origin_rh + + # for quicker access we store a matrix with relevant body measurements for quick multiplication + self._body_measurements = np.r_[self._larm_vec, self._uarm_vec, self._uarm_orig][np.newaxis, :] + + self._device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + + def get_last_msg(self): + return self._last_msg + + def is_active(self): + return self._active + + def terminate(self): + self._active = False + + def reset(self): + self._active = True + self._row_hist = [] + self._smooth_hist = [] + + def add_xx_to_row_hist_and_make_prediction(self, xx) -> np.array: + self._row_hist.append(xx) + # if not enough data is available yet, simply repeat the input as a first estimate + while len(self._row_hist) < self._sequence_len: + self._row_hist.append(xx) + # if the history is too long, delete old values + while len(self._row_hist) > self._sequence_len: + del self._row_hist[0] + xx_hist = np.vstack(self._row_hist) + + if self._normalize: + xx_hist = (xx_hist - self._xx_m) / self._xx_s + + pred = self.make_prediction_from_row_hist(xx_hist) + + if self._normalize: + pred = pred * self._yy_s + self._yy_m + + # store est in history if smoothing is required + if self._smooth > 1: + self._smooth_hist.append(pred) + while len(self._smooth_hist) < self._smooth: + self._smooth_hist.append(pred) + while len(self._smooth_hist) > self._smooth: + del self._smooth_hist[0] + pred = np.vstack(self._smooth_hist) + + return pred + + def msg_from_pred(self, pred: np.array, stream_mc: bool) -> np.array: + est = estimate_joints.arm_pose_from_nn_targets( + preds=pred, + body_measurements=self._body_measurements, + y_targets=self._y_targets + ) + msg = compose_msg.msg_from_nn_targets_est(est, self._body_measurements, self._y_targets) + + self._last_msg = msg.copy() + if stream_mc: + msg = list(msg) + # now we attach the monte carlo predictions for XYZ positions + if est.shape[0] > 1: + for e_row in est: + msg += list(e_row[:6]) + return msg + + def processing_loop(self, sensor_q: queue = None): + logging.info(f"[{self.__tag}] wearable streaming loop") + + # used to estimate delta time and processing speed in Hz + start = datetime.now() + self._prev_time = start + dat = 0 + + # this loops while the socket is listening and/or receiving data + self._active = True + + self.reset() + while self._active: + + # processing speed output + now = datetime.now() + if (now - start).seconds >= 5: + start = now + logging.info(f"[{self.__tag}] {dat / 5} Hz") + dat = 0 + self._prev_time = now + + # get the most recent smartwatch data row from the queue + row = sensor_q.get() + while sensor_q.qsize() > 5: + row = sensor_q.get() + + # finally get predicted positions etc + xx = self.parse_row_to_xx(row) + pred = self.add_xx_to_row_hist_and_make_prediction(xx) + msg = self.msg_from_pred(pred, self._stream_mc) + self.process_msg(msg) + dat += 1 + + @abstractmethod + def make_prediction_from_row_hist(self, xx_hist: np.array) -> np.array: + return + + @abstractmethod + def parse_row_to_xx(self, row) -> np.array: + return + + @abstractmethod + def process_msg(self, msg: np.array): + return + + @property + def sequence_len(self): + return self._sequence_len + + @property + def body_measurements(self): + return self._body_measurements + + @property + def uarm_orig(self): + return self._uarm_orig + + @property + def uarm_vec(self): + return self._uarm_vec + + @property + def larm_vec(self): + return self._larm_vec + + @property + def device(self): + return self._device + + @property + def x_inputs(self): + return self._x_inputs + + @property + def y_targets(self): + return self._y_targets + + @property + def model_name(self): + return self._model_name diff --git a/src/wear_mocap_ape/estimate/models.py b/src/wear_mocap_ape/estimate/models.py index dc296d1..e6fb588 100644 --- a/src/wear_mocap_ape/estimate/models.py +++ b/src/wear_mocap_ape/estimate/models.py @@ -124,7 +124,8 @@ def __init__(self, hidden_size=256, num_layers=2, batch_first=True, - bidirectional=False) + bidirectional=False, + dropout=dropout) self._activation_function = F.relu self.output_layer = torch.nn.Linear(256, output_size) self.output_size = output_size diff --git a/src/wear_mocap_ape/estimate/watch_only.py b/src/wear_mocap_ape/estimate/watch_only.py index b9e5cb5..8f4fab4 100644 --- a/src/wear_mocap_ape/estimate/watch_only.py +++ b/src/wear_mocap_ape/estimate/watch_only.py @@ -1,237 +1,104 @@ -import logging from abc import abstractmethod -from datetime import datetime import torch -import queue import numpy as np from wear_mocap_ape.data_deploy.nn import deploy_models -from wear_mocap_ape.estimate import estimate_joints, models +from wear_mocap_ape.estimate import models from wear_mocap_ape.data_types.bone_map import BoneMap -from wear_mocap_ape.utility import transformations as ts, data_stats +from wear_mocap_ape.estimate.estimator import Estimator +from wear_mocap_ape.utility import transformations as ts from wear_mocap_ape.data_types import messaging from wear_mocap_ape.utility.names import NNS_TARGETS, NNS_INPUTS -class WatchOnly: +class WatchOnlyNn(Estimator): def __init__(self, model_hash: str = deploy_models.LSTM.WATCH_ONLY.value, smooth: int = 10, stream_monte_carlo=True, monte_carlo_samples=25, bonemap: BoneMap = None, + watch_phone: bool = False, tag: str = "PUB WATCH"): - self.__tag = tag - self.__active = True - - # average over multiple time steps - self.__smooth = smooth # monte carlo predictions - self.__stream_mc = stream_monte_carlo self.__mc_samples = monte_carlo_samples - self.__last_msg = None - # use body measurements for transitions - if bonemap is None: - self.__larm_vec = np.array([-BoneMap.DEFAULT_LARM_LEN, 0, 0]) - self.__uarm_vec = np.array([-BoneMap.DEFAULT_UARM_LEN, 0, 0]) - self.__uarm_orig = BoneMap.DEFAULT_L_SHOU_ORIG_RH + # simple lookup for values of interest + if watch_phone: + self.__slp = messaging.WATCH_PHONE_IMU_LOOKUP else: - # get values from bone map - self.__larm_vec = np.array([-bonemap.left_lower_arm_length, 0, 0]) - self.__uarm_vec = np.array([-bonemap.left_upper_arm_length, 0, 0]) - self.__uarm_orig = bonemap.left_upper_arm_origin_rh - self.__hips_quat = np.array([1, 0, 0, 0]) - - # load the trained network - torch.set_default_dtype(torch.float64) - self.__device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + self.__slp = messaging.WATCH_ONLY_IMU_LOOKUP # load model from given parameters self.__nn_model, params = models.load_deployed_model_from_hash(hash_str=model_hash) - self.__y_targets = NNS_TARGETS(params["y_targets_v"]) - self.__sequence_len = params["sequence_len"] - self.__normalize = params["normalize"] if "normalize" in params else True - - # load normalized data stats if required - if self.__normalize: - stats = data_stats.get_norm_stats(x_inputs=NNS_INPUTS(params["x_inputs_v"]), y_targets=self.__y_targets) - # data is normalized and has to be transformed with pre-calculated mean and std - self.__xx_m, self.__xx_s = stats["xx_m"], stats["xx_s"] - self.__yy_m, self.__yy_s = stats["yy_m"], stats["yy_s"] - else: - self.__xx_m, self.__xx_s = 0., 1. - self.__yy_m, self.__yy_s = 0., 1. - - def get_last_msg(self): - return self.__last_msg - - def is_active(self): - return self.__active - - def terminate(self): - self.__active = False - - def processing_loop(self, sensor_q: queue): - self.__active = True - logging.info(f"[{self.__tag}] starting watch standalone processing loop") - - # used to estimate delta time and processing speed in Hz - start = datetime.now() - prev_time = datetime.now() - dat = 0 - - # historical data for time series predictions - row_hist = [] # history of predictions for sequence data - smooth_hist = [] # smoothing averages over a series of time steps - - # simple lookup for values of interest - slp = messaging.WATCH_ONLY_IMU_LOOKUP - - # for quicker access we store a matrix with relevant body measurements for quick multiplication - body_measurements = np.r_[self.__uarm_vec, self.__larm_vec, self.__uarm_orig][np.newaxis, :] - - # this loops while the socket is listening and/or receiving data - while self.__active: - - # get the most recent smartwatch data row from the queue - row = None - while not sensor_q.empty(): - row = sensor_q.get() - - # process received data - if row is not None: - # second-wise updates to determine message frequency - now = datetime.now() - if (now - start).seconds >= 5: - start = now - logging.info(f"[{self.__tag}] {dat / 5} Hz") - dat = 0 - delta_t = now - prev_time - delta_t = delta_t.microseconds * 0.000001 - prev_time = now - - # process the data - # pressure - calibrated initial pressure = relative pressure - r_pres = row[slp["sw_pres"]] - row[slp["sw_init_pres"]] - - # calibrate smartwatch rotation - sw_rot = np.array([ - row[slp["sw_rotvec_w"]], - row[slp["sw_rotvec_x"]], - row[slp["sw_rotvec_y"]], - row[slp["sw_rotvec_z"]] - ]) - sw_fwd = np.array([ - row[slp["sw_forward_w"]], - row[slp["sw_forward_x"]], - row[slp["sw_forward_y"]], - row[slp["sw_forward_z"]] - ]) - quat_north = ts.calib_watch_left_to_north_quat(sw_fwd) - sw_quat_cal = ts.android_quat_to_global(sw_rot, quat_north) - sw_6drr_cal = ts.quat_to_6drr_1x6(sw_quat_cal) # get 6dof rotation representation - - # assemble the entire input vector of one time step - xx = np.hstack([ - delta_t, - row[slp["sw_gyro_x"]], row[slp["sw_gyro_y"]], row[slp["sw_gyro_z"]], - row[slp["sw_lvel_x"]], row[slp["sw_lvel_y"]], row[slp["sw_lvel_z"]], - row[slp["sw_lacc_x"]], row[slp["sw_lacc_y"]], row[slp["sw_lacc_z"]], - row[slp["sw_grav_x"]], row[slp["sw_grav_y"]], row[slp["sw_grav_z"]], - sw_6drr_cal, - r_pres - ]) - - if self.__normalize: - # normalize measurements with pre-calculated mean and std - xx = (xx - self.__xx_m) / self.__xx_s - - # sequences are used for recurrent nets. Stack time steps along 2nd axis - row_hist.append(xx) - if len(row_hist) < self.__sequence_len: - continue - while len(row_hist) > self.__sequence_len: - del row_hist[0] - xx = np.vstack(row_hist) - - # finally, cast to a torch tensor with batch size 1 - xx = torch.tensor(xx[None, :, :]) - with torch.no_grad(): - # make mote carlo predictions if the model makes use of dropout - t_preds = self.__nn_model.monte_carlo_predictions(x=xx, n_samples=self.__mc_samples) - - # if on GPU copy the tensor to host memory first - if self.__device.type == 'cuda': - t_preds = t_preds.cpu() - t_preds = t_preds.numpy() - - # we are only interested in the last prediction of the sequence - t_preds = t_preds[:, -1, :] - - if self.__normalize: - # transform predictions back from normalized labels - t_preds = t_preds * self.__yy_s + self.__yy_m - - # store t_preds in history if smoothing is required - if self.__smooth > 1: - smooth_hist.append(t_preds) - if len(smooth_hist) < self.__smooth: - continue - while len(smooth_hist) > self.__smooth: - del smooth_hist[0] - t_preds = np.vstack(smooth_hist) - - # finally, estimate hand and lower arm origins from prediction data - est = estimate_joints.arm_pose_from_nn_targets( - preds=t_preds, - body_measurements=body_measurements, - y_targets=self.__y_targets - ) - - # estimate mean of rotations if we got multiple MC predictions - if est.shape[0] > 1: - # Calculate the mean of all predictions mean - pred_larm_rot_rh = ts.average_quaternions(est[:, 6:10]) - pred_uarm_rot_rh = ts.average_quaternions(est[:, 10:]) - # use body measurements for transitions - # get the transition from upper arm origin to lower arm origin - pred_larm_origin_rh = ts.quat_rotate_vector(pred_uarm_rot_rh, self.__uarm_vec) + self.__uarm_orig - # get transitions from lower arm origin to hand - rotated_lower_arms_re = ts.quat_rotate_vector(pred_larm_rot_rh, self.__larm_vec) - pred_hand_origin_rh = rotated_lower_arms_re + pred_larm_origin_rh - else: - pred_hand_origin_rh = est[0, :3] - pred_larm_origin_rh = est[0, 3:6] - pred_larm_rot_rh = est[0, 6:10] - pred_uarm_rot_rh = est[0, 10:] - - # this is the list for the actual joint positions and rotations - basic_value_list = np.hstack([ - pred_larm_rot_rh, # hand quat - pred_hand_origin_rh, # hand pos - pred_larm_rot_rh, # larm quat - pred_larm_origin_rh, - pred_uarm_rot_rh, # uarm quat - self.__uarm_orig, - self.__hips_quat - ]) - - # now we attach the monte carlo predictions for XYZ positions - if self.__stream_mc: - if est.shape[0] > 1: - for e_row in est: - basic_value_list = np.hstack([basic_value_list, e_row[:6]]) - - # store as last msg for getter - self.__last_msg = basic_value_list.copy() - - # craft UDP message and send - self.process_msg(msg=basic_value_list) - dat += 1 + super().__init__( + model_name=model_hash, + x_inputs=NNS_INPUTS(params["x_inputs_v"]), + y_targets=NNS_TARGETS(params["y_targets_v"]), + smooth=smooth, + normalize=params["normalize"], + seq_len=params["sequence_len"], + stream_mc=stream_monte_carlo, + tag=tag, + bonemap=bonemap + ) + + def parse_row_to_xx(self, row) -> np.array: + # process the data + # pressure - calibrated initial pressure = relative pressure + r_pres = row[self.__slp["sw_pres"]] - row[self.__slp["sw_init_pres"]] + + # calibrate smartwatch rotation + sw_rot = np.array([ + row[self.__slp["sw_rotvec_w"]], + row[self.__slp["sw_rotvec_x"]], + row[self.__slp["sw_rotvec_y"]], + row[self.__slp["sw_rotvec_z"]] + ]) + sw_fwd = np.array([ + row[self.__slp["sw_forward_w"]], + row[self.__slp["sw_forward_x"]], + row[self.__slp["sw_forward_y"]], + row[self.__slp["sw_forward_z"]] + ]) + + # estimate north quat to align Z-axis of global and android coord system + r = ts.android_quat_to_global_no_north(sw_fwd) + y_rot = ts.reduce_global_quat_to_y_rot(r) + quat_north = ts.euler_to_quat(np.array([0, -y_rot, 0])) + # calibrate watch + sw_cal_g = ts.android_quat_to_global(sw_rot, quat_north) + sw_6drr_cal = ts.quat_to_6drr_1x6(sw_cal_g) + + # assemble the entire input vector of one time step + return np.hstack([ + row[self.__slp["sw_dt"]], + row[self.__slp["sw_gyro_x"]], row[self.__slp["sw_gyro_y"]], row[self.__slp["sw_gyro_z"]], + row[self.__slp["sw_lvel_x"]], row[self.__slp["sw_lvel_y"]], row[self.__slp["sw_lvel_z"]], + row[self.__slp["sw_lacc_x"]], row[self.__slp["sw_lacc_y"]], row[self.__slp["sw_lacc_z"]], + row[self.__slp["sw_grav_x"]], row[self.__slp["sw_grav_y"]], row[self.__slp["sw_grav_z"]], + sw_6drr_cal, + r_pres + ], dtype=np.float32) + + def make_prediction_from_row_hist(self, xx_hist: np.array) -> np.array: + # cast to a torch tensor with batch size 1 + xx = torch.tensor(xx_hist[None, :, :]) + with torch.no_grad(): + # make mote carlo predictions if the model makes use of dropout + t_preds = self.__nn_model.monte_carlo_predictions(x=xx, n_samples=self.__mc_samples) + + # if on GPU copy the tensor to host memory first + if self._device.type == 'cuda': + t_preds = t_preds.cpu() + t_preds = t_preds.numpy() + + # we are only interested in the last prediction of the sequence + t_preds = t_preds[:, -1, :] + return t_preds @abstractmethod def process_msg(self, msg: np.array): diff --git a/src/wear_mocap_ape/estimate/watch_phone_pocket.py b/src/wear_mocap_ape/estimate/watch_phone_pocket.py deleted file mode 100644 index 4be3275..0000000 --- a/src/wear_mocap_ape/estimate/watch_phone_pocket.py +++ /dev/null @@ -1,431 +0,0 @@ -import logging -from abc import abstractmethod -from datetime import datetime -from pathlib import Path - -import pandas as pd -import torch -import queue -import numpy as np - -from einops import rearrange - -from wear_mocap_ape.estimate import kalman_models -from wear_mocap_ape import config -from wear_mocap_ape.estimate import estimate_joints -from wear_mocap_ape.data_types.bone_map import BoneMap -from wear_mocap_ape.utility import transformations as ts, data_stats -from wear_mocap_ape.data_types import messaging -from wear_mocap_ape.utility.names import NNS_TARGETS, NNS_INPUTS - - -class WatchPhonePocket: - def __init__(self, - smooth: int = 1, - num_ensemble: int = 32, - model_name="SW-model", - window_size: int = 10, - stream_mc: bool = True, - tag: str = "KALMAN POCKET PHONE"): - - self.__y_targets = NNS_TARGETS.ORI_CAL_LARM_UARM_HIPS - self.x_inputs = NNS_INPUTS.WATCH_PH_HIP - self.__prev_time = datetime.now() - - self.__tag = tag - self.__active = False - self.__smooth = smooth - self.__smooth_hist = [] - self.__stream_mc = stream_mc - self.__slp = messaging.WATCH_PHONE_IMU_LOOKUP - - self.__row_hist = [] - - self.__uarm_vec = np.array([-BoneMap.DEFAULT_UARM_LEN, 0, 0]) - self.__larm_vec = np.array([-BoneMap.DEFAULT_LARM_LEN, 0, 0]) - self.__shou_orig = BoneMap.DEFAULT_L_SHOU_ORIG_RH - self.last_msg = None - self.__normalize = True - - self.batch_size = 1 - self.dim_x = 14 - self.dim_z = 14 - self.dim_gt = 14 - self.sensor_len = 1 - self.input_size_1 = 22 - self.num_ensemble = num_ensemble - self.win_size = window_size - self.mode = "Test" - - self.model = kalman_models.KalmanSmartwatchModel( - self.num_ensemble, - self.win_size, - self.dim_x, - self.dim_z, - self.input_size_1 - ) - - self.utils_ = kalman_models.Utils(self.num_ensemble, self.dim_x, self.dim_z) - - # Check model type - if not isinstance(self.model, torch.nn.Module): - raise TypeError("model must be an instance of nn.Module") - self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - if torch.cuda.is_available(): - self.model.cuda() - - # load normalized data stats if required - stats = data_stats.get_norm_stats( - x_inputs=self.x_inputs, - y_targets=self.__y_targets - ) - - # data is normalized and has to be transformed with pre-calculated mean and std - self.__xx_m, self.__xx_s = stats["xx_m"], stats["xx_s"] - self.__yy_m, self.__yy_s = stats["yy_m"], stats["yy_s"] - - # Load the pretrained model - if torch.cuda.is_available(): - checkpoint = torch.load(config.PATHS["deploy"] / "kalman" / model_name) - self.model.load_state_dict(checkpoint["model"]) - else: - checkpoint = torch.load(config.PATHS["deploy"] / "kalman" / model_name, - map_location=torch.device("cpu")) - self.model.load_state_dict(checkpoint["model"]) - self.model.eval() - - # INIT MODEL - # for quicker access we store a matrix with relevant body measurements for quick multiplication - self.__body_measurements = np.r_[self.__larm_vec, self.__uarm_vec, self.__shou_orig][np.newaxis, :] - self.__init_step = 0 - - @property - def sequence_len(self): - return self.win_size - - def is_active(self): - return self.__active - - def terminate(self): - self.__active = False - - def get_y_targets(self): - return self.__y_targets - - def get_body_measurements(self): - return self.__body_measurements - - def errors_from_file(self, file_p: Path, process_msg: bool = False): - logging.info(f"[{self.__tag}] processing from file") - # init some dummy input - input_state = np.zeros((self.batch_size, self.num_ensemble, self.win_size, self.dim_x)) - input_state = torch.tensor(input_state, dtype=torch.float32) - input_state = input_state.to(self.device) - - hand_l_err, hand_r_err = [], [] - elbow_l_err, elbow_r_err = [], [] - hip_rot_errors = [] - - dat = pd.read_csv(file_p) - for i, row in dat.iterrows(): - ## PREDICTIONS - xx = row[self.x_inputs.value].to_numpy() - t_pred, input_state = self.add_obs_and_make_prediction(xx, input_state) - - est = estimate_joints.arm_pose_from_nn_targets( - preds=t_pred, - body_measurements=self.__body_measurements, - y_targets=self.__y_targets - ) - # estimate mean of rotations if we got multiple MC predictions - if est.shape[0] > 1: - # Calculate the mean of all predictions mean - p_larm_quat_rh = ts.average_quaternions(est[:, 9:13]) - p_uarm_quat_rh = ts.average_quaternions(est[:, 13:17]) - p_hips_quat_rh = ts.average_quaternions(est[:, 17:]) - - # get the transition from upper arm origin to lower arm origin - p_uarm_orig_rh = ts.quat_rotate_vector(p_hips_quat_rh, self.__shou_orig) - p_larm_orig_rh = ts.quat_rotate_vector(p_uarm_quat_rh, self.__uarm_vec) + p_uarm_orig_rh - p_hand_orig_rh = ts.quat_rotate_vector(p_larm_quat_rh, self.__larm_vec) + p_larm_orig_rh - else: - p_hand_orig_rh = est[0, 0:3] - p_larm_orig_rh = est[0, 3:6] - p_uarm_orig_rh = est[0, 6:9] - p_larm_quat_rh = est[0, 9:13] - p_uarm_quat_rh = est[0, 13:17] - p_hips_quat_rh = est[0, 17:] - - # publish the estimation for our unity visualization - if process_msg: - msg = np.hstack([ - p_larm_quat_rh, - p_hand_orig_rh, - p_larm_quat_rh, - p_larm_orig_rh, - p_uarm_quat_rh, - p_uarm_orig_rh, - p_hips_quat_rh - ]) - if est.shape[0] > 1: - for e_row in est: - msg = np.hstack([msg, e_row[:9]]) - self.process_msg(msg) - - # GROUND TRUTH - yy = row[self.__y_targets.value].to_numpy()[np.newaxis, :] - est = estimate_joints.arm_pose_from_nn_targets( - preds=yy, - body_measurements=self.__body_measurements, - y_targets=self.__y_targets - ) - gt_hand_orig_rh = est[0, 0:3] - gt_larm_orig_rh = est[0, 3:6] - gt_uarm_orig_rh = est[0, 6:9] - gt_larm_quat_g = est[0, 9:13] - gt_uarm_quat_g = est[0, 13:17] - gt_hips_quat_g = est[0, 17:] - - hand_l_err.append(np.linalg.norm(gt_hand_orig_rh - p_hand_orig_rh) * 100) - hre = ts.quat_to_euler(gt_larm_quat_g) - ts.quat_to_euler(p_larm_quat_rh) - hree = np.sum(np.abs(np.degrees(np.arctan2(np.sin(hre), np.cos(hre))))) - hand_r_err.append(hree) - - elbow_l_err.append(np.linalg.norm(gt_larm_orig_rh - p_larm_orig_rh) * 100) - ere = ts.quat_to_euler(gt_uarm_quat_g) - ts.quat_to_euler(p_uarm_quat_rh) - eree = np.sum(np.abs(np.degrees(np.arctan2(np.sin(ere), np.cos(ere))))) - elbow_r_err.append(eree) - - ydiff = ts.quat_to_euler(gt_hips_quat_g)[1] - ts.quat_to_euler(p_hips_quat_rh)[1] - err = np.abs(np.degrees(np.arctan2(np.sin(ydiff), np.cos(ydiff)))) - hip_rot_errors.append(err) - - if (int(i) + 1) % 100 == 0: - logging.info(f"[{self.__tag}] processed {i} rows") - - return hand_l_err, hand_r_err, elbow_l_err, elbow_r_err, hip_rot_errors - - def parse_row_to_xx(self, row): - # process the data - # pressure - calibrated initial pressure = relative pressure - r_pres = row[self.__slp["sw_pres"]] - row[self.__slp["sw_init_pres"]] - # calibrate smartwatch rotation - sw_rot = np.array([ - row[self.__slp["sw_rotvec_w"]], - row[self.__slp["sw_rotvec_x"]], - row[self.__slp["sw_rotvec_y"]], - row[self.__slp["sw_rotvec_z"]] - ]) - sw_fwd = np.array([ - row[self.__slp["sw_forward_w"]], - row[self.__slp["sw_forward_x"]], - row[self.__slp["sw_forward_y"]], - row[self.__slp["sw_forward_z"]] - ]) - - # estimate north quat to align Z-axis of global and android coord system - r = ts.android_quat_to_global_no_north(sw_fwd) - y_rot = ts.reduce_global_quat_to_y_rot(r) - quat_north = ts.euler_to_quat(np.array([0, -y_rot, 0])) - # calibrate watch - sw_cal_g = ts.android_quat_to_global(sw_rot, quat_north) - sw_6drr_cal = ts.quat_to_6drr_1x6(sw_cal_g) - - ph_fwd = np.array([ - row[self.__slp["ph_forward_w"]], row[self.__slp["ph_forward_x"]], - row[self.__slp["ph_forward_y"]], row[self.__slp["ph_forward_z"]] - ]) - ph_rot = np.array([ - row[self.__slp["ph_rotvec_w"]], row[self.__slp["ph_rotvec_x"]], - row[self.__slp["ph_rotvec_y"]], row[self.__slp["ph_rotvec_z"]] - ]) - # the device orientations if the calib position with left arm forward is perfect - hips_dst_g = np.array([1, 0, 0, 0]) - ph_rot_g = ts.android_quat_to_global(ph_rot, quat_north) - ph_fwd_g = ts.android_quat_to_global(ph_fwd, quat_north) - ph_off_g = ts.hamilton_product(ts.quat_invert(ph_fwd_g), hips_dst_g) - ph_cal_g = ts.hamilton_product(ph_rot_g, ph_off_g) - - # hip y rotation from phone - hips_y_rot = ts.reduce_global_quat_to_y_rot(ph_cal_g) - hips_yrot_cal_sin = np.sin(hips_y_rot) - hips_yrot_cal_cos = np.cos(hips_y_rot) - - # assemble the entire input vector of one time step - return np.hstack([ - row[self.__slp["sw_dt"]], - row[self.__slp["sw_gyro_x"]], row[self.__slp["sw_gyro_y"]], row[self.__slp["sw_gyro_z"]], - row[self.__slp["sw_lvel_x"]], row[self.__slp["sw_lvel_y"]], row[self.__slp["sw_lvel_z"]], - row[self.__slp["sw_lacc_x"]], row[self.__slp["sw_lacc_y"]], row[self.__slp["sw_lacc_z"]], - row[self.__slp["sw_grav_x"]], row[self.__slp["sw_grav_y"]], row[self.__slp["sw_grav_z"]], - sw_6drr_cal, - r_pres, - hips_yrot_cal_sin, - hips_yrot_cal_cos - ]) - - def msg_from_pred(self, pred): - est = estimate_joints.arm_pose_from_nn_targets( - preds=pred, - body_measurements=self.__body_measurements, - y_targets=self.__y_targets - ) - - # estimate mean of rotations if we got multiple MC predictions - if est.shape[0] > 1: - # Calculate the mean of all predictions mean - p_hips_quat_g = ts.average_quaternions(est[:, 17:]) - p_larm_quat_g = ts.average_quaternions(est[:, 9:13]) - p_uarm_quat_g = ts.average_quaternions(est[:, 13:17]) - - # get the transition from upper arm origin to lower arm origin - p_uarm_orig_g = ts.quat_rotate_vector(p_hips_quat_g, self.__shou_orig) - p_larm_orig_g = ts.quat_rotate_vector(p_uarm_quat_g, self.__uarm_vec) + p_uarm_orig_g - p_hand_orig_g = ts.quat_rotate_vector(p_larm_quat_g, self.__larm_vec) + p_larm_orig_g - else: - p_hand_orig_g = est[0, 0:3] - p_larm_orig_g = est[0, 3:6] - p_uarm_orig_g = est[0, 6:9] - p_larm_quat_g = est[0, 9:13] - p_uarm_quat_g = est[0, 13:17] - p_hips_quat_g = est[0, 17:] - - # this is the list for the actual joint positions and rotations - msg = np.hstack([ - p_larm_quat_g, # hand rot - p_hand_orig_g, # hand orig - p_larm_quat_g, # larm rot - p_larm_orig_g, # larm orig - p_uarm_quat_g, # uarm rot - p_uarm_orig_g, # uarm orig - p_hips_quat_g # hips rot - ]) - # store as last msg for getter - self.last_msg = msg.copy() - - if self.__stream_mc: - msg = list(msg) - # now we attach the monte carlo predictions for XYZ positions - if est.shape[0] > 1: - for e_row in est: - msg += list(e_row[:6]) - return msg - - def stream_wearable_devices(self, sensor_q: queue = None, process_msg: bool = True): - logging.info(f"[{self.__tag}] wearable streaming loop") - self.__init_step = 0 - - # used to estimate delta time and processing speed in Hz - start = datetime.now() - self.__prev_time = start - dat = 0 - - # this loops while the socket is listening and/or receiving data - self.__active = True - - logging.info("loaded") - input_state = None - while self.__active: - - # processing speed output - now = datetime.now() - if (now - start).seconds >= 5: - start = now - logging.info(f"[{self.__tag}] {dat / 5} Hz") - dat = 0 - self.__prev_time = now - - # get the most recent smartwatch data row from the queue - row = sensor_q.get() - while sensor_q.qsize() > 5: - row = sensor_q.get() - - # finally get predicted positions etc - xx = self.parse_row_to_xx(row) - t_pred, input_state = self.add_obs_and_make_prediction(xx, input_state) - msg = self.msg_from_pred(t_pred) - if process_msg: - self.process_msg(msg) - dat += 1 - - @abstractmethod - def process_msg(self, msg: np.array): - return - - def make_prediction_from_row_hist(self, xx, input_state=None): - - if input_state is None: - # init some dummy input -> [batch_size, ensemble, timestep, dim_x] - input_state = np.zeros((self.batch_size, self.num_ensemble, self.win_size, self.dim_x)) - input_state = torch.tensor(input_state, dtype=torch.float32) - input_state = input_state.to(self.device) - - if self.__normalize: - xx = (xx - self.__xx_m) / self.__xx_s - - xx_seq = rearrange(xx, "(bs seq) (en feat) -> bs seq en feat", bs=1, en=1) - xx_seq = torch.tensor(xx_seq, dtype=torch.float32).to(self.device) - - with torch.no_grad(): - output = self.model(xx_seq, input_state) - - # not enough history yet. Make sensor model predictions until we have a time-window worth of data - if self.__init_step <= self.win_size: - # there will be no prediction in this time window - pred_ = output[3] - pred_ = rearrange(pred_, "bs en dim -> (bs en) dim") - pred_ = self.utils_.format_state(pred_) - pred_ = rearrange( - pred_, "(bs en) (k dim) -> bs en k dim", bs=self.batch_size, k=1 - ) - input_state = torch.cat( - (input_state[:, :, 1:, :], pred_), axis=2 - ) - input_state = input_state.to(self.device) - self.__init_step += 1 - # if on GPU copy the tensor to host memory first - if self.device.type == 'cuda': - smp = output[3].cpu().detach().numpy()[0] - else: - smp = output[3].detach().numpy()[0] - return smp, input_state # return only sensor model prediction - - ensemble = output[0] # -> output ensemble - ensemble_ = rearrange(ensemble, "bs (en k) dim -> bs en k dim", k=1) - input_state = torch.cat( - (input_state[:, :, 1:, :], ensemble_), axis=2 - ) - input_state = input_state.to(self.device) - - # if on GPU copy the tensor to host memory first - if self.device.type == 'cuda': - ensemble = ensemble.cpu() - - # get the output - t_preds = ensemble.detach().numpy()[0] - - if self.__normalize: - t_preds = t_preds * self.__yy_s + self.__yy_m - - # store t_preds in history if smoothing is required - if self.__smooth > 1: - self.__smooth_hist.append(t_preds) - while len(self.__smooth_hist) < self.__smooth: - self.__smooth_hist.append(t_preds) - while len(self.__smooth_hist) > self.__smooth: - del self.__smooth_hist[0] - t_preds = np.vstack(self.__smooth_hist) - - return t_preds, input_state - - def add_obs_and_make_prediction(self, x, input_state=None): - self.__row_hist.append(x) - # if not enough data is available yet, simply repeat the input as a first estimate - while len(self.__row_hist) < self.win_size: - self.__row_hist.append(x) - # if the history is too long, delete old values - while len(self.__row_hist) > self.win_size: - del self.__row_hist[0] - - t_preds, input_state = self.make_prediction_from_row_hist(np.vstack(self.__row_hist), input_state) - return t_preds, input_state diff --git a/src/wear_mocap_ape/estimate/watch_phone_pocket_kalman.py b/src/wear_mocap_ape/estimate/watch_phone_pocket_kalman.py new file mode 100644 index 0000000..e2b779a --- /dev/null +++ b/src/wear_mocap_ape/estimate/watch_phone_pocket_kalman.py @@ -0,0 +1,193 @@ +from abc import abstractmethod +from einops import rearrange + +import numpy as np +import torch + +from wear_mocap_ape.estimate import kalman_models +from wear_mocap_ape import config +from wear_mocap_ape.estimate.estimator import Estimator +from wear_mocap_ape.utility import transformations as ts +from wear_mocap_ape.data_types import messaging +from wear_mocap_ape.utility.names import NNS_TARGETS, NNS_INPUTS + + +class WatchPhonePocketKalman(Estimator): + def __init__(self, + smooth: int = 1, + num_ensemble: int = 32, + model_hash: str = "SW-model-3.5", + window_size: int = 10, + stream_mc: bool = True, + normalize: bool = True, + tag: str = "KALMAN POCKET PHONE"): + + super().__init__( + model_name=model_hash, + x_inputs=NNS_INPUTS.WATCH_HIP, + y_targets=NNS_TARGETS.ORI_CAL_LARM_UARM_HIPS, + smooth=smooth, + seq_len=window_size, + stream_mc=stream_mc, + normalize=normalize, + tag=tag + ) + + self.__tag = tag + self.__slp = messaging.WATCH_PHONE_IMU_LOOKUP + + self.__batch_size = 1 + self.__dim_x = 14 + self.__dim_z = 14 + # self.__dim_x = 27 + # self.__dim_z = 27 + self.__input_size_1 = 22 + self.__num_ensemble = num_ensemble + self.__win_size = window_size + + self.__utils = kalman_models.Utils( + self.__num_ensemble, + self.__dim_x, + self.__dim_z + ) + self.__model = kalman_models.KalmanSmartwatchModel( + self.__num_ensemble, + self.__win_size, + self.__dim_x, + self.__dim_z, + self.__input_size_1 + ) + self.__model.eval() + if torch.cuda.is_available(): + self.__model.cuda() + + # Load the pretrained model + if torch.cuda.is_available(): + checkpoint = torch.load(config.PATHS["deploy"] / "kalman" / model_hash) + self.__model.load_state_dict(checkpoint["model"]) + else: + checkpoint = torch.load(config.PATHS["deploy"] / "kalman" / model_hash, + map_location=torch.device("cpu")) + self.__model.load_state_dict(checkpoint["model"]) + + # INIT MODEL + # for quicker access we store a matrix with relevant body measurements for quick multiplication + self.__init_step = 0 + self.__input_state = torch.tensor( + np.zeros( + (self.__batch_size, self.__num_ensemble, self.__win_size, self.__dim_x) + ), dtype=torch.float32 + ).to(self._device) + + def reset(self): + super().reset() + self.__input_state = torch.tensor( + np.zeros( + (self.__batch_size, self.__num_ensemble, self.__win_size, self.__dim_x) + ), dtype=torch.float32 + ).to(self._device) + self.__init_step = 0 + + def parse_row_to_xx(self, row): + # process the data + # pressure - calibrated initial pressure = relative pressure + r_pres = row[self.__slp["sw_pres"]] - row[self.__slp["sw_init_pres"]] + # calibrate smartwatch rotation + sw_rot = np.array([ + row[self.__slp["sw_rotvec_w"]], + row[self.__slp["sw_rotvec_x"]], + row[self.__slp["sw_rotvec_y"]], + row[self.__slp["sw_rotvec_z"]] + ]) + sw_fwd = np.array([ + row[self.__slp["sw_forward_w"]], + row[self.__slp["sw_forward_x"]], + row[self.__slp["sw_forward_y"]], + row[self.__slp["sw_forward_z"]] + ]) + + # estimate north quat to align Z-axis of global and android coord system + r = ts.android_quat_to_global_no_north(sw_fwd) + y_rot = ts.reduce_global_quat_to_y_rot(r) + quat_north = ts.euler_to_quat(np.array([0, -y_rot, 0])) + # calibrate watch + sw_cal_g = ts.android_quat_to_global(sw_rot, quat_north) + sw_6drr_cal = ts.quat_to_6drr_1x6(sw_cal_g) + + ph_fwd = np.array([ + row[self.__slp["ph_forward_w"]], row[self.__slp["ph_forward_x"]], + row[self.__slp["ph_forward_y"]], row[self.__slp["ph_forward_z"]] + ]) + ph_rot = np.array([ + row[self.__slp["ph_rotvec_w"]], row[self.__slp["ph_rotvec_x"]], + row[self.__slp["ph_rotvec_y"]], row[self.__slp["ph_rotvec_z"]] + ]) + # the device orientations if the calib position with left arm forward is perfect + hips_dst_g = np.array([1, 0, 0, 0]) + ph_rot_g = ts.android_quat_to_global(ph_rot, quat_north) + ph_fwd_g = ts.android_quat_to_global(ph_fwd, quat_north) + ph_off_g = ts.hamilton_product(ts.quat_invert(ph_fwd_g), hips_dst_g) + ph_cal_g = ts.hamilton_product(ph_rot_g, ph_off_g) + + # hip y rotation from phone + hips_y_rot = ts.reduce_global_quat_to_y_rot(ph_cal_g) + hips_yrot_cal_sin = np.sin(hips_y_rot) + hips_yrot_cal_cos = np.cos(hips_y_rot) + + # assemble the entire input vector of one time step + return np.hstack([ + row[self.__slp["sw_dt"]], + row[self.__slp["sw_gyro_x"]], row[self.__slp["sw_gyro_y"]], row[self.__slp["sw_gyro_z"]], + row[self.__slp["sw_lvel_x"]], row[self.__slp["sw_lvel_y"]], row[self.__slp["sw_lvel_z"]], + row[self.__slp["sw_lacc_x"]], row[self.__slp["sw_lacc_y"]], row[self.__slp["sw_lacc_z"]], + row[self.__slp["sw_grav_x"]], row[self.__slp["sw_grav_y"]], row[self.__slp["sw_grav_z"]], + sw_6drr_cal, + r_pres, + hips_yrot_cal_sin, + hips_yrot_cal_cos + ]) + + def make_prediction_from_row_hist(self, xx_hist: np.array): + xx_seq = rearrange(xx_hist, "(bs seq) (en feat) -> bs seq en feat", bs=1, en=1) + xx_seq = torch.tensor(xx_seq, dtype=torch.float32).to(self._device) + + with torch.no_grad(): + output = self.__model(xx_seq, self.__input_state) + + # not enough history yet. Make sensor model predictions until we have a time-window worth of data + if self.__init_step <= self.__win_size: + # there will be no prediction in this time window + pred_ = output[3] + pred_ = rearrange(pred_, "bs en dim -> (bs en) dim") + pred_ = self.__utils.format_state(pred_) + pred_ = rearrange( + pred_, "(bs en) (k dim) -> bs en k dim", bs=self.__batch_size, k=1 + ) + self.__input_state = torch.cat( + (self.__input_state[:, :, 1:, :], pred_), axis=2 + ).to(self._device) + self.__init_step += 1 + # if on GPU copy the tensor to host memory first + if self._device.type == 'cuda': + smp = output[3].cpu().detach().numpy()[0] + else: + smp = output[3].detach().numpy()[0] + return smp[:, :14] # return only sensor model prediction + + ensemble = output[0] # -> output ensemble + ensemble_ = rearrange(ensemble, "bs (en k) dim -> bs en k dim", k=1) + self.__input_state = torch.cat( + (self.__input_state[:, :, 1:, :], ensemble_), axis=2 + ).to(self._device) + + # if on GPU copy the tensor to host memory first + if self._device.type == 'cuda': + ensemble = ensemble.cpu() + + # get the output + t_preds = ensemble.detach().numpy()[0] + return t_preds[:, :14] + + @abstractmethod + def process_msg(self, msg: np.array): + return diff --git a/src/wear_mocap_ape/estimate/watch_phone_pocket_nn.py b/src/wear_mocap_ape/estimate/watch_phone_pocket_nn.py new file mode 100644 index 0000000..bd20f07 --- /dev/null +++ b/src/wear_mocap_ape/estimate/watch_phone_pocket_nn.py @@ -0,0 +1,119 @@ +from abc import abstractmethod + +import numpy as np +import torch + +from wear_mocap_ape.data_types.bone_map import BoneMap +from wear_mocap_ape.estimate import models +from wear_mocap_ape.estimate.estimator import Estimator +from wear_mocap_ape.utility import transformations as ts +from wear_mocap_ape.data_types import messaging +from wear_mocap_ape.utility.names import NNS_TARGETS, NNS_INPUTS + + +class WatchPhonePocketNN(Estimator): + def __init__(self, + model_hash: str, + smooth: int = 1, + stream_mc=True, + monte_carlo_samples=25, + bonemap: BoneMap = None, + tag: str = "NN POCKET PHONE"): + self.__tag = tag + + self.__mc_samples = monte_carlo_samples + + # simple lookup for values of interest + self.__slp = messaging.WATCH_PHONE_IMU_LOOKUP + + # load model from given parameters + self.__nn_model, params = models.load_deployed_model_from_hash(hash_str=model_hash) + + super().__init__( + model_name=model_hash, + x_inputs=NNS_INPUTS(params["x_inputs_v"]), + y_targets=NNS_TARGETS(params["y_targets_v"]), + smooth=smooth, + normalize=params["normalize"], + seq_len=params["sequence_len"], + stream_mc=stream_mc, + tag=tag, + bonemap=bonemap + ) + + def parse_row_to_xx(self, row): + # process the data + # pressure - calibrated initial pressure = relative pressure + r_pres = row[self.__slp["sw_pres"]] - row[self.__slp["sw_init_pres"]] + # calibrate smartwatch rotation + sw_rot = np.array([ + row[self.__slp["sw_rotvec_w"]], + row[self.__slp["sw_rotvec_x"]], + row[self.__slp["sw_rotvec_y"]], + row[self.__slp["sw_rotvec_z"]] + ]) + sw_fwd = np.array([ + row[self.__slp["sw_forward_w"]], + row[self.__slp["sw_forward_x"]], + row[self.__slp["sw_forward_y"]], + row[self.__slp["sw_forward_z"]] + ]) + + # estimate north quat to align Z-axis of global and android coord system + r = ts.android_quat_to_global_no_north(sw_fwd) + y_rot = ts.reduce_global_quat_to_y_rot(r) + quat_north = ts.euler_to_quat(np.array([0, -y_rot, 0])) + # calibrate watch + sw_cal_g = ts.android_quat_to_global(sw_rot, quat_north) + sw_6drr_cal = ts.quat_to_6drr_1x6(sw_cal_g) + + ph_fwd = np.array([ + row[self.__slp["ph_forward_w"]], row[self.__slp["ph_forward_x"]], + row[self.__slp["ph_forward_y"]], row[self.__slp["ph_forward_z"]] + ]) + ph_rot = np.array([ + row[self.__slp["ph_rotvec_w"]], row[self.__slp["ph_rotvec_x"]], + row[self.__slp["ph_rotvec_y"]], row[self.__slp["ph_rotvec_z"]] + ]) + # the device orientations if the calib position with left arm forward is perfect + ph_rot_g = ts.android_quat_to_global(ph_rot, quat_north) + ph_fwd_g = ts.android_quat_to_global(ph_fwd, quat_north) + ph_cal_g = ts.hamilton_product(ph_rot_g, ts.quat_invert(ph_fwd_g)) + + # hip y rotation from phone + hips_y_rot = ts.reduce_global_quat_to_y_rot(ph_cal_g) + hips_yrot_cal_sin = np.sin(hips_y_rot) + hips_yrot_cal_cos = np.cos(hips_y_rot) + + # assemble the entire input vector of one time step + return np.hstack([ + row[self.__slp["sw_dt"]], + row[self.__slp["sw_gyro_x"]], row[self.__slp["sw_gyro_y"]], row[self.__slp["sw_gyro_z"]], + row[self.__slp["sw_lvel_x"]], row[self.__slp["sw_lvel_y"]], row[self.__slp["sw_lvel_z"]], + row[self.__slp["sw_lacc_x"]], row[self.__slp["sw_lacc_y"]], row[self.__slp["sw_lacc_z"]], + row[self.__slp["sw_grav_x"]], row[self.__slp["sw_grav_y"]], row[self.__slp["sw_grav_z"]], + sw_6drr_cal, + r_pres, + hips_yrot_cal_sin, + hips_yrot_cal_cos + ], dtype=np.float32) + + def make_prediction_from_row_hist(self, xx): + # cast to a torch tensor with batch size 1 + xx = torch.tensor(xx[None, :, :]) + with torch.no_grad(): + # make mote carlo predictions if the model makes use of dropout + t_preds = self.__nn_model.monte_carlo_predictions(x=xx, n_samples=self.__mc_samples) + + # if on GPU copy the tensor to host memory first + if self._device.type == 'cuda': + t_preds = t_preds.cpu() + t_preds = t_preds.numpy() + + # we are only interested in the last prediction of the sequence + t_preds = t_preds[:, -1, :] + return t_preds + + @abstractmethod + def process_msg(self, msg: np.array): + return diff --git a/src/wear_mocap_ape/estimate/watch_phone_uarm.py b/src/wear_mocap_ape/estimate/watch_phone_uarm.py index d1ccfe7..2a483d3 100644 --- a/src/wear_mocap_ape/estimate/watch_phone_uarm.py +++ b/src/wear_mocap_ape/estimate/watch_phone_uarm.py @@ -1,65 +1,119 @@ -import logging -import time from abc import abstractmethod -from datetime import datetime -import queue import numpy as np from wear_mocap_ape.data_types.bone_map import BoneMap +from wear_mocap_ape.estimate.estimator import Estimator from wear_mocap_ape.utility import transformations as ts from wear_mocap_ape.data_types import messaging +from wear_mocap_ape.utility.names import NNS_INPUTS, NNS_TARGETS -class WatchPhoneUarm: +class WatchPhoneUarm(Estimator): def __init__(self, smooth: int = 5, - left_hand_mode=True, tag: str = "PUB WATCH PHONE UARM", bonemap: BoneMap = None): - self.__active = True - self.__tag = tag - self.__left_hand_mode = left_hand_mode + super().__init__( + model_name="forward_kinematics", + x_inputs=NNS_INPUTS.WATCH_PHONE_CAL, + y_targets=NNS_TARGETS.ORI_CAL_LARM_UARM, + smooth=smooth, + normalize=False, + seq_len=1, + stream_mc=False, + tag=tag, + bonemap=bonemap + ) - # average over multiple time steps - self.__smooth = smooth - self.__smooth_hist = [] + self.__tag = tag # simple lookup for values of interest self.__slp = messaging.WATCH_PHONE_IMU_LOOKUP - # use body measurements for transitions - if bonemap is None: - self.__larm_vec = np.array([BoneMap.DEFAULT_LARM_LEN, 0, 0]) - self.__uarm_vec = np.array([BoneMap.DEFAULT_UARM_LEN, 0, 0]) - self.__uarm_orig = BoneMap.DEFAULT_L_SHOU_ORIG_RH - else: - # get values from bone map - self.__larm_vec = np.array([bonemap.left_lower_arm_length, 0, 0]) - self.__uarm_vec = np.array([bonemap.left_upper_arm_length, 0, 0]) - self.__uarm_orig = bonemap.left_upper_arm_origin_rh - self.__hips_quat = np.array([1, 0, 0, 0]) - # in left hand mode, the arm is stretched along the negative X-axis in T pose - if left_hand_mode: - self.__larm_vec[0] = -self.__larm_vec[0] - self.__uarm_vec[0] = -self.__uarm_vec[0] - else: - self.__uarm_orig = self.__uarm_orig * np.array([-1, 1, 1]) # invert x + # if not left_hand_mode: + # self._larm_vec[0] = -self._larm_vec[0] + # self._uarm_vec[0] = -self._uarm_vec[0] + # self._uarm_orig = self._uarm_orig * np.array([-1, 1, 1]) # invert x + # self._body_measurements = np.r_[self._larm_vec, self._uarm_vec, self._uarm_orig][np.newaxis, :] + + def calibrate_orientation_quats(self, + sw_quat: np.array, + sw_fwd: np.array, + ph_quat: np.array, + ph_fwd: np.array) -> (np.array, np.array): + + # estimate north quat to align Z-axis of global and android coord system + quat_north = ts.calib_watch_left_to_north_quat(sw_fwd) + # the arm orientations if the calib position with left arm forward is perfect + larm_dst_g = np.array([-0.7071068, 0, -0.7071068, 0]) + uarm_dst_g = np.array([0.7071068, 0, 0.7071068, 0]) + + # calibrate watch with offset to perfect position + sw_rot_g = ts.android_quat_to_global(sw_quat, quat_north) + sw_fwd_g = ts.android_quat_to_global(sw_fwd, quat_north) + sw_off_g = ts.hamilton_product(ts.quat_invert(sw_fwd_g), larm_dst_g) + sw_cal_g = ts.hamilton_product(sw_rot_g, sw_off_g) + + # calibrate phone with offset to perfect position + ph_rot_g = ts.android_quat_to_global(ph_quat, quat_north) + ph_fwd_g = ts.android_quat_to_global(ph_fwd, quat_north) + ph_off_g = ts.hamilton_product(ts.quat_invert(ph_fwd_g), uarm_dst_g) + ph_cal_g = ts.hamilton_product(ph_rot_g, ph_off_g) - def calibrate_imus_with_offset(self, row: np.array): + return sw_cal_g, ph_cal_g - # get relevant entries from the row + def parse_row_to_xx(self, row: np.array): + if not type(row) is np.array: + row = np.array(row) + + # # get relevant entries from the row + # if len(row.shape) > 1: + # # get relevant entries from the row + # sw_fwd = np.c_[ + # row[:, self.__slp["sw_forward_w"]], row[:, self.__slp["sw_forward_x"]], + # row[:, self.__slp["sw_forward_y"]], row[:, self.__slp["sw_forward_z"]] + # ] + # sw_rot = np.c_[ + # row[:, self.__slp["sw_rotvec_w"]], row[:, self.__slp["sw_rotvec_x"]], + # row[:, self.__slp["sw_rotvec_y"]], row[:, self.__slp["sw_rotvec_z"]] + # ] + # + # ph_fwd = np.c_[ + # row[:, self.__slp["ph_forward_w"]], row[:, self.__slp["ph_forward_x"]], + # row[:, self.__slp["ph_forward_y"]], row[:, self.__slp["ph_forward_z"]] + # ] + # + # ph_rot = np.c_[ + # row[:, self.__slp["ph_rotvec_w"]], row[:, self.__slp["ph_rotvec_x"]], + # row[:, self.__slp["ph_rotvec_y"]], row[:, self.__slp["ph_rotvec_z"]] + # ] + # + # sw_sensor_dat = np.c_[ + # row[:, self.__slp["sw_dt"]], + # row[:, self.__slp["sw_gyro_x"]], row[:, self.__slp["sw_gyro_y"]], row[:, self.__slp["sw_gyro_z"]], + # row[:, self.__slp["sw_lvel_x"]], row[:, self.__slp["sw_lvel_y"]], row[:, self.__slp["sw_lvel_z"]], + # row[:, self.__slp["sw_lacc_x"]], row[:, self.__slp["sw_lacc_y"]], row[:, self.__slp["sw_lacc_z"]], + # row[:, self.__slp["sw_grav_x"]], row[:, self.__slp["sw_grav_y"]], row[:, self.__slp["sw_grav_z"]] + # ] + # r_pres = row[:, self.__slp["sw_pres"]] - row[:, self.__slp["sw_init_pres"]] + # + # ph_sensor_dat = np.c_[ + # row[:, self.__slp["ph_gyro_x"]], row[:, self.__slp["ph_gyro_y"]], row[:, self.__slp["ph_gyro_z"]], + # row[:, self.__slp["ph_lvel_x"]], row[:, self.__slp["ph_lvel_y"]], row[:, self.__slp["ph_lvel_z"]], + # row[:, self.__slp["ph_lacc_x"]], row[:, self.__slp["ph_lacc_y"]], row[:, self.__slp["ph_lacc_z"]], + # row[:, self.__slp["ph_grav_x"]], row[:, self.__slp["ph_grav_y"]], row[:, self.__slp["ph_grav_z"]] + # ] + # else: sw_fwd = np.array([ row[self.__slp["sw_forward_w"]], row[self.__slp["sw_forward_x"]], row[self.__slp["sw_forward_y"]], row[self.__slp["sw_forward_z"]] ]) - sw_rot = np.array([ row[self.__slp["sw_rotvec_w"]], row[self.__slp["sw_rotvec_x"]], row[self.__slp["sw_rotvec_y"]], row[self.__slp["sw_rotvec_z"]] ]) - ph_fwd = np.array([ row[self.__slp["ph_forward_w"]], row[self.__slp["ph_forward_x"]], row[self.__slp["ph_forward_y"]], row[self.__slp["ph_forward_z"]] @@ -70,110 +124,35 @@ def calibrate_imus_with_offset(self, row: np.array): row[self.__slp["ph_rotvec_y"]], row[self.__slp["ph_rotvec_z"]] ]) - # estimate north quat to align Z-axis of global and android coord system - if self.__left_hand_mode: - quat_north = ts.calib_watch_left_to_north_quat(sw_fwd) - # the arm orientations if the calib position with left arm forward is perfect - larm_dst_g = np.array([-0.7071068, 0, -0.7071068, 0]) - uarm_dst_g = np.array([0.7071068, 0, 0.7071068, 0]) - - else: - quat_north = ts.calib_watch_right_to_north_quat(sw_fwd) - # the arm orientation if calib position with right arm forward is perfect - larm_dst_g = np.array([-0.7071068, 0.0, 0.7071068, 0.0]) - uarm_dst_g = np.array([0.7071068, 0.0, -0.7071068, 0.0]) - - # calibrate watch with offset to perfect position - sw_rot_g = ts.android_quat_to_global(sw_rot, quat_north) - sw_fwd_g = ts.android_quat_to_global(sw_fwd, quat_north) - sw_off_g = ts.hamilton_product(ts.quat_invert(sw_fwd_g), larm_dst_g) - sw_cal_g = ts.hamilton_product(sw_rot_g, sw_off_g) + sw_sensor_dat = np.array([ + row[self.__slp["sw_dt"]], + row[self.__slp["sw_gyro_x"]], row[self.__slp["sw_gyro_y"]], row[self.__slp["sw_gyro_z"]], + row[self.__slp["sw_lvel_x"]], row[self.__slp["sw_lvel_y"]], row[self.__slp["sw_lvel_z"]], + row[self.__slp["sw_lacc_x"]], row[self.__slp["sw_lacc_y"]], row[self.__slp["sw_lacc_z"]], + row[self.__slp["sw_grav_x"]], row[self.__slp["sw_grav_y"]], row[self.__slp["sw_grav_z"]] + ]) + r_pres = row[self.__slp["sw_pres"]] - row[self.__slp["sw_init_pres"]] - # calibrate phone with offset to perfect position - ph_rot_g = ts.android_quat_to_global(ph_rot, quat_north) - ph_fwd_g = ts.android_quat_to_global(ph_fwd, quat_north) - ph_off_g = ts.hamilton_product(ts.quat_invert(ph_fwd_g), uarm_dst_g) - ph_cal_g = ts.hamilton_product(ph_rot_g, ph_off_g) + ph_sensor_dat = np.array([ + row[self.__slp["ph_gyro_x"]], row[self.__slp["ph_gyro_y"]], row[self.__slp["ph_gyro_z"]], + row[self.__slp["ph_lvel_x"]], row[self.__slp["ph_lvel_y"]], row[self.__slp["ph_lvel_z"]], + row[self.__slp["ph_lacc_x"]], row[self.__slp["ph_lacc_y"]], row[self.__slp["ph_lacc_z"]], + row[self.__slp["ph_grav_x"]], row[self.__slp["ph_grav_y"]], row[self.__slp["ph_grav_z"]] + ]) - return sw_cal_g, ph_cal_g + sw_cal_g, ph_cal_g = self.calibrate_orientation_quats(sw_quat=sw_rot, sw_fwd=sw_fwd, + ph_quat=ph_rot, ph_fwd=ph_fwd) - def row_to_arm_pose(self, row): - - larm_quat, uarm_quat = self.calibrate_imus_with_offset(row) - - # store rotations in history if smoothing is required - if self.__smooth > 1: - self.__smooth_hist.append(np.hstack([larm_quat, uarm_quat])) - if len(self.__smooth_hist) < self.__smooth: - return None - while len(self.__smooth_hist) > self.__smooth: - del self.__smooth_hist[0] - - all_rots = np.vstack(self.__smooth_hist) - # Calculate the mean of all predictions mean - avg_larm_rot_r = ts.average_quaternions(all_rots[:, :4]) - avg_uarm_rot_r = ts.average_quaternions(all_rots[:, 4:]) - else: - avg_larm_rot_r = larm_quat - avg_uarm_rot_r = uarm_quat - - # get the transition from upper arm origin to lower arm origin - # get transitions from lower arm origin to hand - larm_origin_rh = ts.quat_rotate_vector(avg_uarm_rot_r, self.__uarm_vec) + self.__uarm_orig - larm_rotated = ts.quat_rotate_vector(avg_larm_rot_r, self.__larm_vec) - - hand_origin_rh = larm_rotated + larm_origin_rh - # this is the list for the actual joint positions and rotations return np.hstack([ - avg_larm_rot_r, # hand quat - hand_origin_rh, # hand orig - avg_larm_rot_r, # larm quat - larm_origin_rh, # larm orig - avg_uarm_rot_r, # uarm quat - self.__uarm_orig, - self.__hips_quat + sw_sensor_dat, + ts.quat_to_6drr_1x6(sw_cal_g), + r_pres, + ph_sensor_dat, + ts.quat_to_6drr_1x6(ph_cal_g) ]) - def terminate(self): - self.__active = False - - def is_active(self): - return self.__active - - def stream_loop(self, sensor_q: queue): - # used to estimate delta time and processing speed in Hz - start = datetime.now() - dat = 0 - logging.info("[{}] starting Unity stream loop".format(self.__tag)) - - # this loops while the socket is listening and/or receiving data - self.__active = True - while self.__active: - # get the most recent smartwatch data row from the queue - row = sensor_q.get() - - while sensor_q.qsize() > 5: - row = sensor_q.get() - - # process received data - if row is not None: - # second-wise updates to determine message frequency - now = datetime.now() - if (now - start).seconds >= 5: - start = now - logging.info("[{}] {} Hz".format(self.__tag, dat / 5)) - dat = 0 - - # get message as numpy array - np_msg = self.row_to_arm_pose(row) - # can return None if not enough historical data for smoothing is available - if np_msg is None: - continue - - # send message to Unity - self.process_msg(msg=np_msg) - time.sleep(0.01) - dat += 1 + def make_prediction_from_row_hist(self, row_hist): + return np.c_[row_hist[:, 13:19], row_hist[:, -6:]] @abstractmethod def process_msg(self, msg: np.array): diff --git a/src/wear_mocap_ape/estimate/watch_phone_uarm_nn.py b/src/wear_mocap_ape/estimate/watch_phone_uarm_nn.py new file mode 100644 index 0000000..935103c --- /dev/null +++ b/src/wear_mocap_ape/estimate/watch_phone_uarm_nn.py @@ -0,0 +1,127 @@ +from abc import abstractmethod + +import numpy as np +import torch + +from wear_mocap_ape.data_types.bone_map import BoneMap +from wear_mocap_ape.estimate import models +from wear_mocap_ape.estimate.estimator import Estimator +from wear_mocap_ape.utility import transformations as ts +from wear_mocap_ape.data_types import messaging +from wear_mocap_ape.utility.names import NNS_TARGETS, NNS_INPUTS + + +class WatchPhoneUarmNN(Estimator): + def __init__(self, + model_hash: str, + smooth: int = 1, + stream_mc=True, + monte_carlo_samples=25, + bonemap: BoneMap = None, + tag: str = "NN UARM PHONE"): + self.__tag = tag + + self._stream_mc = stream_mc + self.__mc_samples = monte_carlo_samples + + # simple lookup for values of interest + self.__slp = messaging.WATCH_PHONE_IMU_LOOKUP + + # load model from given parameters + self.__nn_model, params = models.load_deployed_model_from_hash(hash_str=model_hash) + + super().__init__( + model_name=model_hash, + x_inputs=NNS_INPUTS(params["x_inputs_v"]), + y_targets=NNS_TARGETS(params["y_targets_v"]), + smooth=smooth, + normalize=params["normalize"], + seq_len=params["sequence_len"], + stream_mc=stream_mc, + tag=tag, + bonemap=bonemap + ) + + def parse_row_to_xx(self, row: np.array): + if not type(row) is np.array: + row = np.array(row) + + sw_fwd = np.array([ + row[self.__slp["sw_forward_w"]], row[self.__slp["sw_forward_x"]], + row[self.__slp["sw_forward_y"]], row[self.__slp["sw_forward_z"]] + ]) + sw_rot = np.array([ + row[self.__slp["sw_rotvec_w"]], row[self.__slp["sw_rotvec_x"]], + row[self.__slp["sw_rotvec_y"]], row[self.__slp["sw_rotvec_z"]] + ]) + ph_fwd = np.array([ + row[self.__slp["ph_forward_w"]], row[self.__slp["ph_forward_x"]], + row[self.__slp["ph_forward_y"]], row[self.__slp["ph_forward_z"]] + ]) + + ph_rot = np.array([ + row[self.__slp["ph_rotvec_w"]], row[self.__slp["ph_rotvec_x"]], + row[self.__slp["ph_rotvec_y"]], row[self.__slp["ph_rotvec_z"]] + ]) + + sw_sensor_dat = np.array([ + row[self.__slp["sw_dt"]], + row[self.__slp["sw_gyro_x"]], row[self.__slp["sw_gyro_y"]], row[self.__slp["sw_gyro_z"]], + row[self.__slp["sw_lvel_x"]], row[self.__slp["sw_lvel_y"]], row[self.__slp["sw_lvel_z"]], + row[self.__slp["sw_lacc_x"]], row[self.__slp["sw_lacc_y"]], row[self.__slp["sw_lacc_z"]], + row[self.__slp["sw_grav_x"]], row[self.__slp["sw_grav_y"]], row[self.__slp["sw_grav_z"]] + ]) + r_pres = row[self.__slp["sw_pres"]] - row[self.__slp["sw_init_pres"]] + + ph_sensor_dat = np.array([ + row[self.__slp["ph_gyro_x"]], row[self.__slp["ph_gyro_y"]], row[self.__slp["ph_gyro_z"]], + row[self.__slp["ph_lvel_x"]], row[self.__slp["ph_lvel_y"]], row[self.__slp["ph_lvel_z"]], + row[self.__slp["ph_lacc_x"]], row[self.__slp["ph_lacc_y"]], row[self.__slp["ph_lacc_z"]], + row[self.__slp["ph_grav_x"]], row[self.__slp["ph_grav_y"]], row[self.__slp["ph_grav_z"]] + ]) + + # estimate north quat to align Z-axis of global and android coord system + quat_north = ts.calib_watch_left_to_north_quat(sw_fwd) + # the arm orientations if the calib position with left arm forward is perfect + larm_dst_g = np.array([-0.7071068, 0, -0.7071068, 0]) + uarm_dst_g = np.array([0.7071068, 0, 0.7071068, 0]) + + # calibrate watch with offset to perfect position + sw_rot_g = ts.android_quat_to_global(sw_rot, quat_north) + sw_fwd_g = ts.android_quat_to_global(sw_fwd, quat_north) + sw_off_g = ts.hamilton_product(ts.quat_invert(sw_fwd_g), larm_dst_g) + sw_cal_g = ts.hamilton_product(sw_rot_g, sw_off_g) + + # calibrate phone with offset to perfect position + ph_rot_g = ts.android_quat_to_global(ph_rot, quat_north) + ph_fwd_g = ts.android_quat_to_global(ph_fwd, quat_north) + ph_off_g = ts.hamilton_product(ts.quat_invert(ph_fwd_g), uarm_dst_g) + ph_cal_g = ts.hamilton_product(ph_rot_g, ph_off_g) + + return np.hstack([ + sw_sensor_dat, + ts.quat_to_6drr_1x6(sw_cal_g), + r_pres, + ph_sensor_dat, + ts.quat_to_6drr_1x6(ph_cal_g) + ], dtype=np.float32) + + def make_prediction_from_row_hist(self, xx): + # cast to a torch tensor with batch size 1 + xx = torch.tensor(xx[None, :, :]) + with torch.no_grad(): + # make mote carlo predictions if the model makes use of dropout + t_preds = self.__nn_model.monte_carlo_predictions(x=xx, n_samples=self.__mc_samples) + + # if on GPU copy the tensor to host memory first + if self._device.type == 'cuda': + t_preds = t_preds.cpu() + t_preds = t_preds.numpy() + + # we are only interested in the last prediction of the sequence + t_preds = t_preds[:, -1, :] + return t_preds + + @abstractmethod + def process_msg(self, msg: np.array): + return diff --git a/src/wear_mocap_ape/record/watch_only_rec.py b/src/wear_mocap_ape/record/watch_only_rec.py index 82885d0..e63c783 100644 --- a/src/wear_mocap_ape/record/watch_only_rec.py +++ b/src/wear_mocap_ape/record/watch_only_rec.py @@ -6,10 +6,10 @@ from wear_mocap_ape.data_deploy.nn import deploy_models from wear_mocap_ape.data_types.bone_map import BoneMap -from wear_mocap_ape.estimate.watch_only import WatchOnly +from wear_mocap_ape.estimate.watch_only import WatchOnlyNn -class WatchOnlyRecorder(WatchOnly): +class WatchOnlyRecorder(WatchOnlyNn): def __init__(self, file: Path, model_hash: str = deploy_models.LSTM.WATCH_ONLY.value, diff --git a/src/wear_mocap_ape/stream/listener/audio.py b/src/wear_mocap_ape/stream/listener/audio.py index d5a4219..d555d52 100755 --- a/src/wear_mocap_ape/stream/listener/audio.py +++ b/src/wear_mocap_ape/stream/listener/audio.py @@ -15,11 +15,11 @@ class AudioListener: - def __init__(self, ip: str, port: int = config.PORT_LISTEN_WATCH_PHONE_AUDIO, tag: str = "AUDIO"): + def __init__(self, ip: str, port: int = config.PORT_LISTEN_AUDIO, tag: str = "AUDIO"): # Audio recording parameters # See https://github.com/googleapis/python-speech/blob/main/samples/microphone/transcribe_streaming_infinite.py - self.__sample_rate = 32000 - self.__chunk_size = 800 # int(16000 / 10) # 100ms + self.__sample_rate = 44000 + self.__chunk_size = 2048 # int(16000 / 10) # 100ms self.__language_code = "en-US" # a BCP-47 language tag self.__ip = ip self.__port = port # the dual Port @@ -114,7 +114,7 @@ def __play_audio(self, q: queue): # Instantiate PyAudio and initialize PortAudio system resources p = pyaudio.PyAudio() # Open output audio stream - stream = p.open(format=pyaudio.paInt16, + stream = p.open(format=pyaudio.paALSA, frames_per_buffer=self.__chunk_size, channels=1, rate=self.__sample_rate, @@ -127,6 +127,7 @@ def __play_audio(self, q: queue): row = q.get() if row: stream.write(row) + time.sleep(0.01) else: break @@ -185,5 +186,5 @@ def __transcribe(self, q_in: queue, q_out: queue): if __name__ == "__main__": logging.basicConfig(level=logging.INFO) - wp_audio = AudioListener(ip="10.218.100.139", port=config.PORT_LISTEN_WATCH_PHONE_AUDIO) + wp_audio = AudioListener(ip="192.168.1.162", port=config.PORT_LISTEN_AUDIO) wp_audio.play_stream_loop() diff --git a/src/wear_mocap_ape/stream/publisher/watch_only_udp.py b/src/wear_mocap_ape/stream/publisher/watch_only_udp.py index 92f9b60..daf6546 100644 --- a/src/wear_mocap_ape/stream/publisher/watch_only_udp.py +++ b/src/wear_mocap_ape/stream/publisher/watch_only_udp.py @@ -5,23 +5,25 @@ import wear_mocap_ape.config as config from wear_mocap_ape.data_deploy.nn import deploy_models from wear_mocap_ape.data_types.bone_map import BoneMap -from wear_mocap_ape.estimate.watch_only import WatchOnly +from wear_mocap_ape.estimate.watch_only import WatchOnlyNn -class WatchOnlyUDP(WatchOnly): +class WatchOnlyNnUDP(WatchOnlyNn): def __init__(self, ip: str, port: int = config.PORT_PUB_LEFT_ARM, model_hash: str = deploy_models.LSTM.WATCH_ONLY.value, - smooth: int = 10, - stream_monte_carlo=True, - monte_carlo_samples=25, + smooth: int = 1, + stream_mc=True, + mc_samples=25, bonemap: BoneMap = None, + watch_phone: bool = False, tag: str = "PUB WATCH"): super().__init__(model_hash=model_hash, smooth=smooth, - stream_monte_carlo=stream_monte_carlo, - monte_carlo_samples=monte_carlo_samples, + stream_monte_carlo=stream_mc, + monte_carlo_samples=mc_samples, bonemap=bonemap, + watch_phone=watch_phone, tag=tag) self.__tag = tag self.__port = port diff --git a/src/wear_mocap_ape/stream/publisher/watch_phone_pocket_udp.py b/src/wear_mocap_ape/stream/publisher/watch_phone_pocket_kalman_udp.py similarity index 74% rename from src/wear_mocap_ape/stream/publisher/watch_phone_pocket_udp.py rename to src/wear_mocap_ape/stream/publisher/watch_phone_pocket_kalman_udp.py index b187138..3f2c791 100644 --- a/src/wear_mocap_ape/stream/publisher/watch_phone_pocket_udp.py +++ b/src/wear_mocap_ape/stream/publisher/watch_phone_pocket_kalman_udp.py @@ -2,26 +2,29 @@ import socket import struct import numpy as np -from wear_mocap_ape.estimate.watch_phone_pocket import WatchPhonePocket +from wear_mocap_ape.estimate.watch_phone_pocket_kalman import WatchPhonePocketKalman -class WatchPhonePocketUDP(WatchPhonePocket): +class WatchPhonePocketKalmanUDP(WatchPhonePocketKalman): def __init__(self, ip, port, smooth: int = 1, num_ensemble: int = 32, - model_name="SW-model", + model_hash="SW-model-sept-4", window_size: int = 10, stream_mc: bool = True, + normalize: bool = True, tag: str = "KALMAN UDP POCKET PHONE"): super().__init__( smooth=smooth, num_ensemble=num_ensemble, - model_name=model_name, + model_hash=model_hash, window_size=window_size, - stream_mc=stream_mc + stream_mc=stream_mc, + normalize=normalize, + tag=tag ) self.__tag = tag self.__port = port diff --git a/src/wear_mocap_ape/stream/publisher/watch_phone_pocket_nn_udp.py b/src/wear_mocap_ape/stream/publisher/watch_phone_pocket_nn_udp.py new file mode 100644 index 0000000..9f0a89e --- /dev/null +++ b/src/wear_mocap_ape/stream/publisher/watch_phone_pocket_nn_udp.py @@ -0,0 +1,38 @@ +import socket +import struct + +import numpy as np + +from wear_mocap_ape.data_deploy.nn import deploy_models +from wear_mocap_ape.data_types.bone_map import BoneMap +from wear_mocap_ape.estimate.watch_phone_pocket_nn import WatchPhonePocketNN + + +class WatchPhonePocketNnUDP(WatchPhonePocketNN): + def __init__(self, + ip, + port, + smooth: int = 1, + model_hash: str = deploy_models.LSTM.WATCH_PHONE_POCKET.value, + stream_mc: bool = True, + mc_samples: int = 25, + bonemap: BoneMap = None, + tag: str = "PHONE POCKET NN UDP"): + super().__init__( + smooth=smooth, + model_hash=model_hash, + stream_mc=stream_mc, + monte_carlo_samples=mc_samples, + bonemap=bonemap + ) + + self.__tag = tag + self.__port = port + self.__ip = ip + self.__udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.__udp_socket.settimeout(5) + + def process_msg(self, msg: np.array) -> int: + # craft UDP message and send + msg = struct.pack('f' * len(msg), *msg) + return self.__udp_socket.sendto(msg, (self.__ip, self.__port)) diff --git a/src/wear_mocap_ape/stream/publisher/watch_phone_uarm_nn_udp.py b/src/wear_mocap_ape/stream/publisher/watch_phone_uarm_nn_udp.py new file mode 100644 index 0000000..9e486b6 --- /dev/null +++ b/src/wear_mocap_ape/stream/publisher/watch_phone_uarm_nn_udp.py @@ -0,0 +1,44 @@ +import logging +import socket +import struct +import numpy as np + +from wear_mocap_ape.data_deploy.nn.deploy_models import LSTM +from wear_mocap_ape.data_types.bone_map import BoneMap +from wear_mocap_ape.estimate.watch_phone_uarm_nn import WatchPhoneUarmNN + + +class WatchPhoneUarmNnUDP(WatchPhoneUarmNN): + def __init__(self, + ip: str, + port: int, + smooth: int = 1, + model_hash: str = LSTM.WATCH_PHONE_UARM.value, + stream_mc: bool = True, + tag: str = "PUB WATCH PHONE", + bonemap: BoneMap = None): + super().__init__( + smooth=smooth, + tag=tag, + bonemap=bonemap, + stream_mc=stream_mc, + model_hash=model_hash + ) + self.__ip = ip + self.__port = port + self.__tag = tag + self.__port = port + self.__udp_socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.__udp_socket.settimeout(5) + + def process_msg(self, msg: np.array): + """ + The paren class calls this method + whenever a new arm pose estimation finished + """ + # craft UDP message and send + msg = struct.pack('f' * len(msg), *msg) + try: + self.__udp_socket.sendto(msg, (self.__ip, self.__port)) + except TimeoutError: + logging.info(f"[{self.__tag}] timed out") diff --git a/src/wear_mocap_ape/stream/publisher/watch_phone_uarm_udp.py b/src/wear_mocap_ape/stream/publisher/watch_phone_uarm_udp.py index 6e12346..f90a79e 100644 --- a/src/wear_mocap_ape/stream/publisher/watch_phone_uarm_udp.py +++ b/src/wear_mocap_ape/stream/publisher/watch_phone_uarm_udp.py @@ -11,13 +11,12 @@ class WatchPhoneUarmUDP(WatchPhoneUarm): def __init__(self, ip: str, port: int, - smooth: int = 5, + smooth: int = 1, left_hand_mode=True, tag: str = "PUB WATCH PHONE", bonemap: BoneMap = None): super().__init__( smooth=smooth, - left_hand_mode=left_hand_mode, tag=tag, bonemap=bonemap ) diff --git a/src/wear_mocap_ape/utility/data_stats.py b/src/wear_mocap_ape/utility/data_stats.py index d2a5aaa..d1dfa3a 100644 --- a/src/wear_mocap_ape/utility/data_stats.py +++ b/src/wear_mocap_ape/utility/data_stats.py @@ -39,9 +39,8 @@ def get_norm_stats(x_inputs: NNS_INPUTS, y_targets: NNS_TARGETS, data_list: list if data_list is None: return dat else: - # check if amount of data has increased len_m_data = len(data_list) - if dat["data_list_len"] >= len_m_data: + if dat["data_list_len"] == len_m_data: return dat else: logging.info("number of files changed") diff --git a/src/wear_mocap_ape/utility/names.py b/src/wear_mocap_ape/utility/names.py index 702dff5..ed9ad8f 100644 --- a/src/wear_mocap_ape/utility/names.py +++ b/src/wear_mocap_ape/utility/names.py @@ -2,37 +2,32 @@ class NNS_TARGETS(Enum): - ORI_CALIB_UARM_LARM = [ - "gt_uarm_6drr_rh_1", "gt_uarm_6drr_rh_2", "gt_uarm_6drr_rh_3", # uarm 6drr - "gt_uarm_6drr_rh_4", "gt_uarm_6drr_rh_5", "gt_uarm_6drr_rh_6", - "gt_larm_6drr_rh_1", "gt_larm_6drr_rh_2", "gt_larm_6drr_rh_3", # larm 6drr - "gt_larm_6drr_rh_4", "gt_larm_6drr_rh_5", "gt_larm_6drr_rh_6" - ] - POS_RH_LARM_HAND = [ - "gt_hand_orig_rua_x", "gt_hand_orig_rua_y", "gt_hand_orig_rua_z", - "gt_larm_orig_rua_x", "gt_larm_orig_rua_y", "gt_larm_orig_rua_z" - ] - POS_RH_LARM_HAND_HIPS = [ - "gt_hand_orig_rua_x", "gt_hand_orig_rua_y", "gt_hand_orig_rua_z", - "gt_larm_orig_rua_x", "gt_larm_orig_rua_y", "gt_larm_orig_rua_z", + ORI_CAL_LARM_UARM_HIPS = [ + "gt_larm_6drr_cal_1", "gt_larm_6drr_cal_2", "gt_larm_6drr_cal_3", # larm 6drr + "gt_larm_6drr_cal_4", "gt_larm_6drr_cal_5", "gt_larm_6drr_cal_6", + "gt_uarm_6drr_cal_1", "gt_uarm_6drr_cal_2", "gt_uarm_6drr_cal_3", # uarm 6drr + "gt_uarm_6drr_cal_4", "gt_uarm_6drr_cal_5", "gt_uarm_6drr_cal_6", "gt_hips_yrot_cal_sin", "gt_hips_yrot_cal_cos" # hips calibrated y rot ] - ORI_RH_UARM_LARM_HIPS = [ - "gt_uarm_6drr_rh_1", "gt_uarm_6drr_rh_2", "gt_uarm_6drr_rh_3", # uarm 6drr - "gt_uarm_6drr_rh_4", "gt_uarm_6drr_rh_5", "gt_uarm_6drr_rh_6", - "gt_larm_6drr_rh_1", "gt_larm_6drr_rh_2", "gt_larm_6drr_rh_3", # larm 6drr - "gt_larm_6drr_rh_4", "gt_larm_6drr_rh_5", "gt_larm_6drr_rh_6", - "gt_hips_yrot_cal_sin", "gt_hips_yrot_cal_cos" # hips calibrated y rot + ORI_CAL_LARM_UARM = [ + "gt_larm_6drr_cal_1", "gt_larm_6drr_cal_2", "gt_larm_6drr_cal_3", # larm 6drr + "gt_larm_6drr_cal_4", "gt_larm_6drr_cal_5", "gt_larm_6drr_cal_6", + "gt_uarm_6drr_cal_1", "gt_uarm_6drr_cal_2", "gt_uarm_6drr_cal_3", # uarm 6drr + "gt_uarm_6drr_cal_4", "gt_uarm_6drr_cal_5", "gt_uarm_6drr_cal_6" ] - ORI_CAL_LARM_UARM_HIPS = [ + ORI_POS_CAL_LARM_UARM_HIPS = [ + "gt_hand_orig_cal_x", "gt_hand_orig_cal_y", "gt_hand_orig_cal_z", "gt_larm_6drr_cal_1", "gt_larm_6drr_cal_2", "gt_larm_6drr_cal_3", # larm 6drr "gt_larm_6drr_cal_4", "gt_larm_6drr_cal_5", "gt_larm_6drr_cal_6", + "gt_larm_orig_cal_x", "gt_larm_orig_cal_y", "gt_larm_orig_cal_z", "gt_uarm_6drr_cal_1", "gt_uarm_6drr_cal_2", "gt_uarm_6drr_cal_3", # uarm 6drr "gt_uarm_6drr_cal_4", "gt_uarm_6drr_cal_5", "gt_uarm_6drr_cal_6", "gt_hips_yrot_cal_sin", "gt_hips_yrot_cal_cos" # hips calibrated y rot ] + class NNS_INPUTS(Enum): + # watch data only WATCH_ONLY = [ "sw_dt", "sw_gyro_x", "sw_gyro_y", "sw_gyro_z", @@ -43,7 +38,15 @@ class NNS_INPUTS(Enum): "sw_6drr_cal_4", "sw_6drr_cal_5", "sw_6drr_cal_6", "sw_pres_cal" ] - WATCH_PH_HIP = [ + WATCH_ONLY_ACC_ONLY = [ + "sw_dt", + "sw_lacc_x", "sw_lacc_y", "sw_lacc_z", + "sw_6drr_cal_1", "sw_6drr_cal_2", "sw_6drr_cal_3", + "sw_6drr_cal_4", "sw_6drr_cal_5", "sw_6drr_cal_6" + ] + + # watch data and the PH_HIPS estimate + WATCH_HIP = [ "sw_dt", "sw_gyro_x", "sw_gyro_y", "sw_gyro_z", "sw_lvel_x", "sw_lvel_y", "sw_lvel_z", @@ -54,9 +57,27 @@ class NNS_INPUTS(Enum): "sw_pres_cal", "ph_hips_yrot_cal_sin", "ph_hips_yrot_cal_cos" - ] - WATCH_PHONE_CALIB = [ + WATCH_HIP_ACC_ONLY = [ + "sw_dt", + "sw_lacc_x", "sw_lacc_y", "sw_lacc_z", + "sw_6drr_cal_1", "sw_6drr_cal_2", "sw_6drr_cal_3", + "sw_6drr_cal_4", "sw_6drr_cal_5", "sw_6drr_cal_6", + "ph_hips_yrot_cal_sin", + "ph_hips_yrot_cal_cos" + ] + WATCH_HIP_ACC_AND_BAR = [ + "sw_dt", + "sw_lacc_x", "sw_lacc_y", "sw_lacc_z", + "sw_6drr_cal_1", "sw_6drr_cal_2", "sw_6drr_cal_3", + "sw_6drr_cal_4", "sw_6drr_cal_5", "sw_6drr_cal_6", + "sw_pres_cal", + "ph_hips_yrot_cal_sin", + "ph_hips_yrot_cal_cos" + ] + + # watch and phone data + WATCH_PHONE_CAL = [ "sw_dt", "sw_gyro_x", "sw_gyro_y", "sw_gyro_z", "sw_lvel_x", "sw_lvel_y", "sw_lvel_z", diff --git a/src/wear_mocap_ape/utility/transformations.py b/src/wear_mocap_ape/utility/transformations.py index 0d5721e..bb5ef82 100644 --- a/src/wear_mocap_ape/utility/transformations.py +++ b/src/wear_mocap_ape/utility/transformations.py @@ -102,7 +102,7 @@ def quat_rotate_vector(rot: np.array, vec: np.array) -> np.array: raise UserWarning("rot has to have length 4 (w,x,y,z). Rot is {}".format(rot)) # the conjugate of the quaternion - r_s = rot * np.array([1, -1, -1, -1], dtype=np.float64) + r_s = rot * np.array([1, -1, -1, -1], dtype=np.float32) if len(vec.shape) > 1: # if vec is a column of vectors @@ -142,7 +142,7 @@ def hamilton_product(a: np.array, b: np.array) -> np.array: a[0] * b[1] + a[1] * b[0] + a[2] * b[3] - a[3] * b[2], a[0] * b[2] - a[1] * b[3] + a[2] * b[0] + a[3] * b[1], a[0] * b[3] + a[1] * b[2] - a[2] * b[1] + a[3] * b[0] - ], dtype=np.float64) + ], dtype=np.float32) if len(h_p.shape) > 1: return h_p.transpose() else: @@ -166,7 +166,7 @@ def euler_to_quat(e: np.array) -> np.array: sr * cp * cy - cr * sp * sy, cr * sp * cy + sr * cp * sy, cr * cp * sy - sr * sp * cy - ], dtype=np.float64) + ], dtype=np.float32) if len(q.shape) > 1: return q.transpose() @@ -190,7 +190,10 @@ def calib_watch_left_to_north_quat(sw_quat_fwd: np.array) -> float: # smartwatch rotation to global coordinates, which are [-w,x,z,y] r = android_quat_to_global_no_north(sw_quat_fwd) y_rot = reduce_global_quat_to_y_rot(r) - q_north = euler_to_quat(np.array([0, -y_rot, 0])) + if type(y_rot) is np.ndarray: + q_north = euler_to_quat(np.vstack([np.zeros(len(y_rot)), -y_rot, np.zeros(len(y_rot))]).T) + else: + q_north = euler_to_quat(np.array([0, -y_rot, 0])) return hamilton_product(np.array([0.7071068, 0, -0.7071068, 0]), q_north) # rotation to match left hand calibration @@ -244,11 +247,11 @@ def quat_invert(q: np.array): :param q: input quaternion :return: inverse quaternion """ - q_s = q * np.array([1, -1, -1, -1], dtype=np.float64) # the conjugate of the quaternion + q_s = q * np.array([1, -1, -1, -1], dtype=np.float32) # the conjugate of the quaternion if len(q.shape) > 1: - return q_s / np.sum(np.square(q, dtype=np.float64), axis=1, keepdims=True, dtype=np.float64) + return q_s / np.sum(np.square(q, dtype=np.float32), axis=1, keepdims=True, dtype=np.float32) else: - return q_s / np.sum(np.square(q, dtype=np.float64), dtype=np.float64) + return q_s / np.sum(np.square(q, dtype=np.float32), dtype=np.float32) def quat_to_euler(q: np.array): @@ -293,7 +296,7 @@ def quat_to_euler(q: np.array): if len(q.shape) > 1: return np.column_stack([a_x, a_y, a_z]) else: - return np.array([a_x, a_y, a_z], dtype=np.float64) + return np.array([a_x, a_y, a_z], dtype=np.float32) def mocap_pos_to_global(p: np.array): @@ -307,8 +310,7 @@ def mocap_pos_to_global(p: np.array): p[:, [0, 2]] = p[:, [2, 0]] else: p[[0, 2]] = p[[2, 0]] - # invert x and z axis - return p * np.array([-1, 1, -1], dtype=np.float64) + return p * np.array([1, 1, 1], dtype=np.float32) def mocap_quat_to_global(q: np.array): @@ -323,9 +325,9 @@ def mocap_quat_to_global(q: np.array): else: mc_q = np.array([q[3], q[0], q[1], q[2]]) # rotate by -90 around y-axis to align z-axis of both coord systems ... - wordl_quat = np.array([-0.7071068, 0, 0.7071068, 0], dtype=np.float64) + wordl_quat = np.array([0.7071068, 0, 0.7071068, 0], dtype=np.float32) # then flip x-axis and reverse angle to change coord system orientation - n_q = mc_q * np.array([-1, -1, 1, 1], dtype=np.float64) + n_q = mc_q * np.array([-1, -1, 1, 1], dtype=np.float32) return hamilton_product(wordl_quat, n_q) @@ -365,7 +367,7 @@ def quat_a_to_b(vec_a: np.array, vec_b: np.array): w = np.dot(n_a, n_b) if w > 0.9999 or w < -0.9999: - return np.array([0, 0, 1, 0], dtype=np.float64) # 180-degree rotation around y + return np.array([0, 0, 1, 0], dtype=np.float32) # 180-degree rotation around y xyz = np.cross(n_a, n_b) @@ -462,7 +464,7 @@ def exponential_map_to_quaternion(em: np.array): q_img = two_sinc_half_theta * em # imaginary values x,y,z w = np.cos(.5 * theta) # prepend w to x,y,z to get full quaternion - return np.insert(q_img, 0, w, dtype=np.float64) + return np.insert(q_img, 0, w, dtype=np.float32) def six_drr_1x6_to_quat(six_drr: np.array): @@ -486,7 +488,7 @@ def quat_to_rot_mat_1x9(quat: np.array): def trans(q): w, x, y, z = q nq = w * w + x * x + y * y + z * z - if nq < np.finfo(np.float64).eps: + if nq < np.finfo(np.float32).eps: return np.eye(3) s = 2.0 / nq _x = x * s @@ -533,6 +535,7 @@ def rot_mat_to_quat(rot_mat: np.array): ) / 3.0 # Use Hermitian eigenvectors, values for speed vals, vecs = np.linalg.eigh(k) + # Select largest eigenvector, reorder to w,x,y,z quaternion q = vecs[[3, 0, 1, 2], np.argmax(vals)] # Prefer quaternion with positive w