diff --git a/.gitignore b/.gitignore index aedc8d7..e313e8b 100644 --- a/.gitignore +++ b/.gitignore @@ -81,3 +81,5 @@ venv/ # written by setuptools_scm **/_version.py + +tb_logs/ \ No newline at end of file diff --git a/configs/pipeline.yaml b/configs/pipeline.yaml index c005c11..d517ee7 100644 --- a/configs/pipeline.yaml +++ b/configs/pipeline.yaml @@ -32,8 +32,8 @@ train_networks: # run inference on videos in these directories (independent of training videos) inference_dirs: # - videos_new - # - videos-for-each-labeled-frame - - videos_debug + - videos-for-each-labeled-frame + # - videos_debug # training parameters # min_steps: 5000 # max_steps: 5000 diff --git a/lp3d_analysis/train.py b/lp3d_analysis/train.py index 815bba7..730b3eb 100644 --- a/lp3d_analysis/train.py +++ b/lp3d_analysis/train.py @@ -19,8 +19,14 @@ from lightning_pose.utils.predictions import (predict_dataset, predict_single_video) from lightning_pose.utils.scripts import ( # get_callbacks, - calculate_train_batches, compute_metrics, get_data_module, get_dataset, - get_imgaug_transform, get_loss_factories, get_model) + calculate_train_batches, + compute_metrics, + get_data_module, + get_dataset, + get_imgaug_transform, + get_loss_factories, + get_model, +) from moviepy.editor import VideoFileClip from omegaconf import DictConfig, OmegaConf from typeguard import typechecked @@ -420,11 +426,24 @@ def train_and_infer( # Run inference on all InD/OOD videos and compute unsupervised metrics for video_dir in inference_dirs: - video_files = \ - [f for f in os.listdir(os.path.join(data_dir, video_dir)) if f.endswith('.mp4')] + + video_files = [ + f for f in os.listdir(os.path.join(data_dir, video_dir)) if f.endswith('.mp4') + ] + + if len(video_files) == 0: + # assume we have a nested directory: directories full of mp4 files + video_files = [] + sub_video_dirs = os.listdir(os.path.join(data_dir, video_dir)) + for sub_video_dir in sub_video_dirs: + sub_video_dir_abs = os.path.join(data_dir, video_dir, sub_video_dir) + files_tmp = os.listdir(sub_video_dir_abs) + video_files += [f'{sub_video_dir}/{f}' for f in files_tmp if f.endswith('.mp4')] + for video_file in video_files: if csv_prefix: - inference_csv_name = f'{csv_prefix}_{video_file.replace(".mp4", ".csv")}' + raise NotImplementedError + # inference_csv_name = f'{csv_prefix}_{video_file.replace(".mp4", ".csv")}' else: inference_csv_name = video_file.replace(".mp4", ".csv") inference_csv = os.path.join(results_dir, video_dir, inference_csv_name) diff --git a/lp3d_analysis/utils.py b/lp3d_analysis/utils.py new file mode 100644 index 0000000..3dea6e3 --- /dev/null +++ b/lp3d_analysis/utils.py @@ -0,0 +1,22 @@ + + +def extract_ood_frame_predictions( + data_dir: str, + results_dir: str, + overwrite: bool, +) -> None: + + pass + + # look for all files that end in _new.csv -> these are OOD labels + # loop through these + # for each, load the csv file, and iterate through the rows/index + # 'labeled-data//img<#>.png' + # s = 'labeled-data/vid_name/img0000.png' + # s2 = '/'.join(s.split('/')[1:]) + # s3 = s2.replace('png', 'mp4') + # load 51-frame csv file + # extract center frame + # put in dataframe + # save out predictions_.csv + # compute pixel diff --git a/pipelines/pipeline_simple.py b/pipelines/pipeline_simple.py index c3be9d4..de6d587 100644 --- a/pipelines/pipeline_simple.py +++ b/pipelines/pipeline_simple.py @@ -3,16 +3,21 @@ from lp3d_analysis.io import load_cfgs from lp3d_analysis.train import train_and_infer +from lp3d_analysis.utils import extract_ood_frame_predictions # TODO # - before train_and_infer, will be nice to put cfg updates in their own function +# - remove get_callbacks from lp3d_analysis.train.py +# - replace train function with one from LP +# - faster inference with OOD videos: don't rebuild the model every time VALID_MODEL_TYPES = [ 'supervised', 'context', ] + def pipeline(config_file: str): # ------------------------------------------- @@ -69,6 +74,12 @@ def pipeline(config_file: str): inference_dirs=cfg_pipe.train_networks.inference_dirs, overwrite=cfg_pipe.train_networks.overwrite, ) + # Clean up/reorganize OOD data + extract_ood_frame_predictions( + data_dir=data_dir, + results_dir=results_dir, + overwrite=cfg_pipe.train_networks.overwrite, + ) # # # # ------------------------------------------------------------------------------------- # # # # Post-process network outputs to generate potential pseudo labels (chosen in next step) diff --git a/tb_logs/test_model/version_0/events.out.tfevents.1733863018.ip-10-192-11-211.42209.2 b/tb_logs/test_model/version_0/events.out.tfevents.1733863018.ip-10-192-11-211.42209.2 deleted file mode 100644 index c57c669..0000000 Binary files a/tb_logs/test_model/version_0/events.out.tfevents.1733863018.ip-10-192-11-211.42209.2 and /dev/null differ