Skip to content

Commit

Permalink
add video export method to NamedFrame model
Browse files Browse the repository at this point in the history
  • Loading branch information
t-sasatani committed Dec 6, 2024
1 parent 908a6f7 commit 0c239f1
Show file tree
Hide file tree
Showing 4 changed files with 169 additions and 40 deletions.
14 changes: 11 additions & 3 deletions miniscope_io/data/config/process/denoise_example.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,19 @@ noise_patch:
threshold: 20
buffer_size: 5032
buffer_split: 1
diff_multiply: 10
diff_multiply: 1
output_result: True
output_noise_patch: True
output_diff: True
frequency_masking:
enable: True
spacial_LPF_cutoff_radius: 10
vertical_BEF_cutoff: 5
vertical_BEF_cutoff: 1
horizontal_BEF_cutoff: 0
display_mask: False
end_frame: 1000
output_mask: True
output_result: True
output_freq_domain: True
end_frame: 1000
output_result: True
output_dir: 'user_dir/output'
53 changes: 52 additions & 1 deletion miniscope_io/models/frames.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,19 @@
Pydantic models for storing frames and videos.
"""

from pathlib import Path
from typing import List, Optional, TypeVar

import cv2
import numpy as np
from pydantic import BaseModel, Field, model_validator

from miniscope_io.io import VideoWriter
from miniscope_io.logging import init_logger

T = TypeVar("T", np.ndarray, List[np.ndarray], List[List[np.ndarray]])

logger = init_logger('model.frames')

class NamedFrame(BaseModel):
"""
Expand Down Expand Up @@ -75,11 +81,56 @@ def data(self) -> T:
return self.video_list_frame
else:
raise ValueError("Unknown frame type or no frame data provided.")


def export(self, output_path: Path, fps: int, suffix: bool) -> None:
"""
Export the frame data to a file.
Parameters
----------
output_path : str
Path to the output file.
fps : int
Frames per second for the
Raises
------
NotImplementedError
If the frame type is video_list_frame.
"""
if suffix:
output_path = output_path.with_name(output_path.stem + f'_{self.name}')
if self.frame_type == "static_frame":
#write PNG out
cv2.imwrite(
str(output_path.with_suffix('.png')),
self.static_frame)
elif self.frame_type == "video_frame":
writer = VideoWriter.init_video(
path = output_path.with_suffix(".avi"),
width = self.video_frame[0].shape[1],
height = self.video_frame[0].shape[0],
fps=20,
)
logger.info(
f"Writing video to {output_path}.avi:"
f"{self.video_frame[0].shape[1]}x{self.video_frame[0].shape[0]}")
try:
for frame in self.video_frame:
picture = cv2.cvtColor(frame, cv2.COLOR_GRAY2BGR)
writer.write(picture)
finally:
writer.release()

elif self.frame_type == "video_list_frame":
raise NotImplementedError("Exporting video list frames is not yet supported.")
else:
raise ValueError("Unknown frame type or no frame data provided.")

class Config:
"""
Pydantic config for allowing np.ndarray types.
Could be an Numpydantic situation so will look into it later.
"""

arbitrary_types_allowed = True
32 changes: 32 additions & 0 deletions miniscope_io/models/process.py
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,18 @@ class NoisePatchConfig(BaseModel):
default=1,
description="Multiplier for the difference between the mean and the pixel value.",
)
output_result: bool = Field(
default=False,
description="Whether to output the result.",
)
output_noise_patch: bool = Field(
default=False,
description="Whether to output the noise patch.",
)
output_diff: bool = Field(
default=False,
description="Whether to output the difference.",
)

class FreqencyMaskingConfig(BaseModel):
"""
Expand All @@ -76,6 +88,18 @@ class FreqencyMaskingConfig(BaseModel):
default=False,
description="Whether to display the mask.",
)
output_result: bool = Field(
default=False,
description="Whether to output the result.",
)
output_mask: bool = Field(
default=False,
description="Whether to output the mask.",
)
output_freq_domain: bool = Field(
default=False,
description="Whether to output the frequency domain.",
)

class DenoiseConfig(BaseModel, YAMLMixin):
"""
Expand All @@ -97,3 +121,11 @@ class DenoiseConfig(BaseModel, YAMLMixin):
default=None,
description="Frame to end processing at.",
)
output_result: bool = Field(
default=True,
description="Whether to output the result.",
)
output_dir: Optional[str] = Field(
default=None,
description="Directory to save the output in.",
)
110 changes: 74 additions & 36 deletions miniscope_io/process/video.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
This module contains functions for pre-processing video data.
"""

from pathlib import Path
from typing import Tuple

import cv2
Expand All @@ -20,7 +21,7 @@ class FrameProcessor:
A class to process video frames.
"""

def __init__(self, height: int, width: int, buffer_size: int = 5032, buffer_split: int = 1):
def __init__(self, height: int, width: int):
"""
Initialize the FrameProcessor object.
Block size/buffer size will be set by dev config later.
Expand Down Expand Up @@ -191,12 +192,20 @@ def denoise(
Might be useful to define some using environment variables.
"""
reader = VideoReader(video_path)
pathstem = Path(video_path).stem
output_dir = Path.cwd() / config.output_dir
if not output_dir.exists():
output_dir.mkdir(parents=True)
raw_frames = []
patched_frames = []
freq_domain_frames = []
noise_patchs = []
freq_filtered_frames = []
diff_frames = []
output_frames = []

if config.noise_patch.enable:
patched_frames = []
noise_patchs = []
diff_frames = []
if config.frequency_masking.enable:
freq_domain_frames = []
freq_filtered_frames = []

index = 0
fig = plt.figure()
Expand All @@ -216,44 +225,48 @@ def denoise(

try:
for frame in reader.read_frames():
raw_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

if config.end_frame and index > config.end_frame:
break

logger.debug(f"Processing frame {index}")

raw_frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
raw_frames.append(raw_frame)

if index == 0:
previous_frame = raw_frame

patched_frame, noise_patch = processor.patch_noisy_buffer(
raw_frame,
previous_frame,
buffer_size=config.noise_patch.buffer_size,
buffer_split=config.noise_patch.buffer_split,
noise_threshold=config.noise_patch.threshold
)
freq_filtered_frame, frame_freq_domain = processor.remove_stripes(
img=patched_frame, mask=freq_mask
)
diff_frame = cv2.absdiff(raw_frame, freq_filtered_frame)

raw_frames.append(raw_frame)
patched_frames.append(patched_frame)
freq_domain_frames.append(frame_freq_domain)
noise_patchs.append(noise_patch * np.iinfo(np.uint8).max)
freq_filtered_frames.append(freq_filtered_frame)
diff_frames.append(diff_frame * config.noise_patch.diff_multiply)

output_frame = raw_frame.copy()

if config.noise_patch.enable:
patched_frame, noise_patch = processor.patch_noisy_buffer(
output_frame,
previous_frame,
buffer_size=config.noise_patch.buffer_size,
buffer_split=config.noise_patch.buffer_split,
noise_threshold=config.noise_patch.threshold
)
diff_frame = cv2.absdiff(raw_frame, previous_frame)
patched_frames.append(patched_frame)
noise_patchs.append(noise_patch * np.iinfo(np.uint8).max)
diff_frames.append(diff_frame * config.noise_patch.diff_multiply)
output_frame = patched_frame

if config.frequency_masking.enable:
freq_filtered_frame, frame_freq_domain = processor.remove_stripes(
img=patched_frame, mask=freq_mask
)
freq_domain_frames.append(frame_freq_domain)
freq_filtered_frames.append(freq_filtered_frame)
output_frame = freq_filtered_frame
output_frames.append(output_frame)
index += 1
finally:
reader.release()
plt.close(fig)

normalized_frames = VideoProcessor.normalize_video_stack(freq_filtered_frames)
minimum_projection = VideoProcessor.get_minimum_projection(normalized_frames)
minimum_projection = VideoProcessor.get_minimum_projection(output_frames)

subtract_minimum = [(frame - minimum_projection) for frame in normalized_frames]
subtract_minimum = [(frame - minimum_projection) for frame in output_frames]

subtract_minimum = VideoProcessor.normalize_video_stack(subtract_minimum)

Expand All @@ -266,11 +279,36 @@ def denoise(
freq_mask_frame = NamedFrame(
name="Freq mask", static_frame=freq_mask * np.iinfo(np.uint8).max
)
freq_domain_video = NamedFrame(name="Freq domain", video_frame=freq_domain_frames)
freq_filtered_video = NamedFrame(name="Freq filtered", video_frame=freq_filtered_frames)
normalized_video = NamedFrame(name="Normalized", video_frame=normalized_frames)
min_proj_frame = NamedFrame(name="Min Proj", static_frame=minimum_projection)
subtract_video = NamedFrame(name="Subtracted", video_frame=subtract_minimum)

if config.frequency_masking.enable:
freq_domain_video = NamedFrame(
name="freq_domain",
video_frame=freq_domain_frames)
freq_filtered_video = NamedFrame(
name="freq_filtered",
video_frame=freq_filtered_frames)
if config.frequency_masking.output_freq_domain:
freq_domain_video.export(
output_dir / f'{pathstem}',
suffix=True,
fps=20,
)
if config.frequency_masking.output_result:
freq_filtered_video.export(
(output_dir / f'{pathstem}'),
suffix=True,
fps=20,
)

normalized_video = NamedFrame(
name="Normalized",
video_frame=output_frames)
min_proj_frame = NamedFrame(
name="Min Proj",
static_frame=minimum_projection)
subtract_video = NamedFrame(
name="Subtracted",
video_frame=subtract_minimum)

if config.interactive_display.enable:
videos = [
Expand Down

0 comments on commit 0c239f1

Please sign in to comment.