Skip to content

Commit

Permalink
start post-training analysis
Browse files Browse the repository at this point in the history
  • Loading branch information
themattinthehatt committed Dec 11, 2024
1 parent 11163d3 commit 0810ed5
Show file tree
Hide file tree
Showing 6 changed files with 61 additions and 7 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -81,3 +81,5 @@ venv/

# written by setuptools_scm
**/_version.py

tb_logs/
4 changes: 2 additions & 2 deletions configs/pipeline.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -32,8 +32,8 @@ train_networks:
# run inference on videos in these directories (independent of training videos)
inference_dirs:
# - videos_new
# - videos-for-each-labeled-frame
- videos_debug
- videos-for-each-labeled-frame
# - videos_debug
# training parameters
# min_steps: 5000
# max_steps: 5000
Expand Down
29 changes: 24 additions & 5 deletions lp3d_analysis/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,8 +19,14 @@
from lightning_pose.utils.predictions import (predict_dataset,
predict_single_video)
from lightning_pose.utils.scripts import ( # get_callbacks,
calculate_train_batches, compute_metrics, get_data_module, get_dataset,
get_imgaug_transform, get_loss_factories, get_model)
calculate_train_batches,
compute_metrics,
get_data_module,
get_dataset,
get_imgaug_transform,
get_loss_factories,
get_model,
)
from moviepy.editor import VideoFileClip
from omegaconf import DictConfig, OmegaConf
from typeguard import typechecked
Expand Down Expand Up @@ -420,11 +426,24 @@ def train_and_infer(

# Run inference on all InD/OOD videos and compute unsupervised metrics
for video_dir in inference_dirs:
video_files = \
[f for f in os.listdir(os.path.join(data_dir, video_dir)) if f.endswith('.mp4')]

video_files = [
f for f in os.listdir(os.path.join(data_dir, video_dir)) if f.endswith('.mp4')
]

if len(video_files) == 0:
# assume we have a nested directory: directories full of mp4 files
video_files = []
sub_video_dirs = os.listdir(os.path.join(data_dir, video_dir))
for sub_video_dir in sub_video_dirs:
sub_video_dir_abs = os.path.join(data_dir, video_dir, sub_video_dir)
files_tmp = os.listdir(sub_video_dir_abs)
video_files += [f'{sub_video_dir}/{f}' for f in files_tmp if f.endswith('.mp4')]

for video_file in video_files:
if csv_prefix:
inference_csv_name = f'{csv_prefix}_{video_file.replace(".mp4", ".csv")}'
raise NotImplementedError
# inference_csv_name = f'{csv_prefix}_{video_file.replace(".mp4", ".csv")}'
else:
inference_csv_name = video_file.replace(".mp4", ".csv")
inference_csv = os.path.join(results_dir, video_dir, inference_csv_name)
Expand Down
22 changes: 22 additions & 0 deletions lp3d_analysis/utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@


def extract_ood_frame_predictions(
data_dir: str,
results_dir: str,
overwrite: bool,
) -> None:

pass

# look for all files that end in _new.csv -> these are OOD labels
# loop through these
# for each, load the csv file, and iterate through the rows/index
# 'labeled-data/<vid_name>/img<#>.png'
# s = 'labeled-data/vid_name/img0000.png'
# s2 = '/'.join(s.split('/')[1:])
# s3 = s2.replace('png', 'mp4')
# load 51-frame csv file
# extract center frame
# put in dataframe
# save out predictions_<cam_name>.csv
# compute pixel
11 changes: 11 additions & 0 deletions pipelines/pipeline_simple.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,16 +3,21 @@

from lp3d_analysis.io import load_cfgs
from lp3d_analysis.train import train_and_infer
from lp3d_analysis.utils import extract_ood_frame_predictions


# TODO
# - before train_and_infer, will be nice to put cfg updates in their own function
# - remove get_callbacks from lp3d_analysis.train.py
# - replace train function with one from LP
# - faster inference with OOD videos: don't rebuild the model every time

VALID_MODEL_TYPES = [
'supervised',
'context',
]


def pipeline(config_file: str):

# -------------------------------------------
Expand Down Expand Up @@ -69,6 +74,12 @@ def pipeline(config_file: str):
inference_dirs=cfg_pipe.train_networks.inference_dirs,
overwrite=cfg_pipe.train_networks.overwrite,
)
# Clean up/reorganize OOD data
extract_ood_frame_predictions(
data_dir=data_dir,
results_dir=results_dir,
overwrite=cfg_pipe.train_networks.overwrite,
)

# # # # -------------------------------------------------------------------------------------
# # # # Post-process network outputs to generate potential pseudo labels (chosen in next step)
Expand Down
Binary file not shown.

0 comments on commit 0810ed5

Please sign in to comment.