Skip to content

Commit

Permalink
Include chunk shape as a parameter in stream resource for HDF dataset (
Browse files Browse the repository at this point in the history
…#544)

* Adding record for num frames in chunk along with chunk_size field in desc

* Attributes are saved all in a single chunk

* Update tests to account for chunk_size datakey parameter

* Chunk size should be in sres not desc

* Move chunk size to sres parameters

* Refactor tests to reflect changes

* chunk size can be int or none

Co-authored-by: Eugene <ymatviych@bnl.gov>

* Update chunk size signal to non-zero in one of the AD test sets

* Use correct chunk size for PandA, make sure we use chunk size auto

* Add comment on chunk size

* Make chunk_size a tuple that explicitly describes all chunk dims

* Make sure chunk size is tuple even with one dim, update tests, simplify ad standard det tests

* Make chunk_size always tuple of int, default to empty tuple

* Use readback value to avoid disconnect between actual value and signal get

* Follow import convention for tests

* Make use of slicing for detector name in ad_standard_det_factory clearer

* Rename chunk size to chunk shape

* Add space for linting

* Fix test

* Fix merge conflict

* Simplifying ad standard det factory fixture

* Fix unawaited task issue

* kinetix fixture doesn't need to be async

---------

Co-authored-by: Eugene <ymatviych@bnl.gov>
  • Loading branch information
jwlodek and genematx committed Sep 19, 2024
1 parent 0b6ab21 commit 35c07fe
Show file tree
Hide file tree
Showing 11 changed files with 112 additions and 60 deletions.
3 changes: 3 additions & 0 deletions src/ophyd_async/core/_hdf_dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,8 @@ class HDFDataset:
dtype_numpy: str = ""
multiplier: int = 1
swmr: bool = False
# Represents explicit chunk size written to disk.
chunk_shape: tuple[int, ...] = ()


SLICE_NAME = "AD_HDF5_SWMR_SLICE"
Expand Down Expand Up @@ -66,6 +68,7 @@ def __init__(
"dataset": ds.dataset,
"swmr": ds.swmr,
"multiplier": ds.multiplier,
"chunk_shape": ds.chunk_shape,
},
uid=None,
validate=True,
Expand Down
2 changes: 2 additions & 0 deletions src/ophyd_async/epics/adcore/_core_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,4 +135,6 @@ def __init__(self, prefix: str, name="") -> None:
self.array_size0 = epics_signal_r(int, prefix + "ArraySize0")
self.array_size1 = epics_signal_r(int, prefix + "ArraySize1")
self.create_directory = epics_signal_rw(int, prefix + "CreateDirectory")
self.num_frames_chunks = epics_signal_r(int, prefix + "NumFramesChunks_RBV")
self.chunk_size_auto = epics_signal_rw_rbv(bool, prefix + "ChunkSizeAuto")
super().__init__(prefix, name)
10 changes: 10 additions & 0 deletions src/ophyd_async/epics/adcore/_hdf_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -56,6 +56,9 @@ async def open(self, multiplier: int = 1) -> dict[str, DataKey]:
# when directory path PV is processed.
await self.hdf.create_directory.set(info.create_dir_depth)

# Make sure we are using chunk auto-sizing
await asyncio.gather(self.hdf.chunk_size_auto.set(True))

await asyncio.gather(
self.hdf.num_extra_dims.set(0),
self.hdf.lazy_open.set(True),
Expand Down Expand Up @@ -84,6 +87,9 @@ async def open(self, multiplier: int = 1) -> dict[str, DataKey]:
self._multiplier = multiplier
outer_shape = (multiplier,) if multiplier > 1 else ()

# Determine number of frames that will be saved per HDF chunk
frames_per_chunk = await self.hdf.num_frames_chunks.get_value()

# Add the main data
self._datasets = [
HDFDataset(
Expand All @@ -92,6 +98,7 @@ async def open(self, multiplier: int = 1) -> dict[str, DataKey]:
shape=detector_shape,
dtype_numpy=np_dtype,
multiplier=multiplier,
chunk_shape=(frames_per_chunk, *detector_shape),
)
]
# And all the scalar datasets
Expand All @@ -118,6 +125,9 @@ async def open(self, multiplier: int = 1) -> dict[str, DataKey]:
(),
np_datatype,
multiplier,
# NDAttributes appear to always be configured with
# this chunk size
chunk_shape=(16384,),
)
)

Expand Down
6 changes: 5 additions & 1 deletion src/ophyd_async/fastcs/panda/_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,11 @@ async def _update_datasets(self) -> None:

capture_table = await self.panda_data_block.datasets.get_value()
self._datasets = [
HDFDataset(dataset_name, "/" + dataset_name, [1], multiplier=1)
# TODO: Update chunk size to read signal once available in IOC
# Currently PandA IOC sets chunk size to 1024 points per chunk
HDFDataset(
dataset_name, "/" + dataset_name, [1], multiplier=1, chunk_shape=(1024,)
)
for dataset_name in capture_table["name"]
]

Expand Down
29 changes: 11 additions & 18 deletions tests/epics/adaravis/test_aravis.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,9 @@
import re

import pytest
from bluesky.run_engine import RunEngine

from ophyd_async.core import (
DetectorTrigger,
DeviceCollector,
PathProvider,
TriggerInfo,
set_mock_value,
Expand All @@ -14,14 +12,8 @@


@pytest.fixture
async def test_adaravis(
RE: RunEngine,
static_path_provider: PathProvider,
) -> adaravis.AravisDetector:
async with DeviceCollector(mock=True):
test_adaravis = adaravis.AravisDetector("ADARAVIS:", static_path_provider)

return test_adaravis
def test_adaravis(ad_standard_det_factory) -> adaravis.AravisDetector:
return ad_standard_det_factory(adaravis.AravisDetector)


@pytest.mark.parametrize("exposure_time", [0.0, 0.1, 1.0, 10.0, 100.0])
Expand Down Expand Up @@ -80,7 +72,7 @@ def test_gpio_pin_limited(test_adaravis: adaravis.AravisDetector):


async def test_hints_from_hdf_writer(test_adaravis: adaravis.AravisDetector):
assert test_adaravis.hints == {"fields": ["test_adaravis"]}
assert test_adaravis.hints == {"fields": ["test_adaravis1"]}


async def test_can_read(test_adaravis: adaravis.AravisDetector):
Expand All @@ -98,9 +90,9 @@ async def test_decribe_describes_writer_dataset(
await test_adaravis.stage()
await test_adaravis.prepare(one_shot_trigger_info)
assert await test_adaravis.describe() == {
"test_adaravis": {
"source": "mock+ca://ADARAVIS:HDF1:FullFileName_RBV",
"shape": (0, 0),
"test_adaravis1": {
"source": "mock+ca://ARAVIS1:HDF1:FullFileName_RBV",
"shape": (10, 10),
"dtype": "array",
"dtype_numpy": "|i1",
"external": "STREAM:",
Expand All @@ -125,12 +117,13 @@ async def test_can_collect(
assert docs[0][0] == "stream_resource"
stream_resource = docs[0][1]
sr_uid = stream_resource["uid"]
assert stream_resource["data_key"] == "test_adaravis"
assert stream_resource["data_key"] == "test_adaravis1"
assert stream_resource["uri"] == "file://localhost" + str(full_file_name)
assert stream_resource["parameters"] == {
"dataset": "/entry/data/data",
"swmr": False,
"multiplier": 1,
"chunk_shape": (1, 10, 10),
}
assert docs[1][0] == "stream_datum"
stream_datum = docs[1][1]
Expand All @@ -148,9 +141,9 @@ async def test_can_decribe_collect(
await test_adaravis.stage()
await test_adaravis.prepare(one_shot_trigger_info)
assert (await test_adaravis.describe_collect()) == {
"test_adaravis": {
"source": "mock+ca://ADARAVIS:HDF1:FullFileName_RBV",
"shape": (0, 0),
"test_adaravis1": {
"source": "mock+ca://ARAVIS1:HDF1:FullFileName_RBV",
"shape": (10, 10),
"dtype": "array",
"dtype_numpy": "|i1",
"external": "STREAM:",
Expand Down
3 changes: 3 additions & 0 deletions tests/epics/adcore/test_writers.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,9 @@ async def hdf_writer_with_stats(
hdf = adcore.NDFileHDFIO("HDF:")
stats = adcore.NDPluginStatsIO("FOO:")

# Set number of frames per chunk to something reasonable
set_mock_value(hdf.num_frames_chunks, 2)

return adcore.ADHDFWriter(
hdf,
static_path_provider,
Expand Down
32 changes: 13 additions & 19 deletions tests/epics/adkinetix/test_kinetix.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,18 @@
import pytest
from bluesky.run_engine import RunEngine

from ophyd_async.core import (
DetectorTrigger,
DeviceCollector,
StaticPathProvider,
set_mock_value,
)
from ophyd_async.core._detector import TriggerInfo
from ophyd_async.epics import adkinetix
from ophyd_async.epics.adkinetix._kinetix_io import KinetixTriggerMode


@pytest.fixture
async def test_adkinetix(
RE: RunEngine,
static_path_provider: StaticPathProvider,
) -> adkinetix.KinetixDetector:
async with DeviceCollector(mock=True):
test_adkinetix = adkinetix.KinetixDetector("KINETIX:", static_path_provider)

return test_adkinetix
def test_adkinetix(ad_standard_det_factory):
return ad_standard_det_factory(adkinetix.KinetixDetector)


async def test_get_deadtime(
Expand All @@ -30,7 +23,7 @@ async def test_get_deadtime(


async def test_trigger_modes(test_adkinetix: adkinetix.KinetixDetector):
set_mock_value(test_adkinetix.drv.trigger_mode, "Internal")
set_mock_value(test_adkinetix.drv.trigger_mode, KinetixTriggerMode.internal)

async def setup_trigger_mode(trig_mode: DetectorTrigger):
await test_adkinetix.controller.prepare(
Expand Down Expand Up @@ -58,7 +51,7 @@ async def setup_trigger_mode(trig_mode: DetectorTrigger):


async def test_hints_from_hdf_writer(test_adkinetix: adkinetix.KinetixDetector):
assert test_adkinetix.hints == {"fields": ["test_adkinetix"]}
assert test_adkinetix.hints == {"fields": ["test_adkinetix1"]}


async def test_can_read(test_adkinetix: adkinetix.KinetixDetector):
Expand All @@ -76,9 +69,9 @@ async def test_decribe_describes_writer_dataset(
await test_adkinetix.stage()
await test_adkinetix.prepare(one_shot_trigger_info)
assert await test_adkinetix.describe() == {
"test_adkinetix": {
"source": "mock+ca://KINETIX:HDF1:FullFileName_RBV",
"shape": (0, 0),
"test_adkinetix1": {
"source": "mock+ca://KINETIX1:HDF1:FullFileName_RBV",
"shape": (10, 10),
"dtype": "array",
"dtype_numpy": "|i1",
"external": "STREAM:",
Expand All @@ -103,12 +96,13 @@ async def test_can_collect(
assert docs[0][0] == "stream_resource"
stream_resource = docs[0][1]
sr_uid = stream_resource["uid"]
assert stream_resource["data_key"] == "test_adkinetix"
assert stream_resource["data_key"] == "test_adkinetix1"
assert stream_resource["uri"] == "file://localhost" + str(full_file_name)
assert stream_resource["parameters"] == {
"dataset": "/entry/data/data",
"swmr": False,
"multiplier": 1,
"chunk_shape": (1, 10, 10),
}
assert docs[1][0] == "stream_datum"
stream_datum = docs[1][1]
Expand All @@ -126,9 +120,9 @@ async def test_can_decribe_collect(
await test_adkinetix.stage()
await test_adkinetix.prepare(one_shot_trigger_info)
assert (await test_adkinetix.describe_collect()) == {
"test_adkinetix": {
"source": "mock+ca://KINETIX:HDF1:FullFileName_RBV",
"shape": (0, 0),
"test_adkinetix1": {
"source": "mock+ca://KINETIX1:HDF1:FullFileName_RBV",
"shape": (10, 10),
"dtype": "array",
"dtype_numpy": "|i1",
"external": "STREAM:",
Expand Down
40 changes: 19 additions & 21 deletions tests/epics/advimba/test_vimba.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,22 @@
import pytest
from bluesky.run_engine import RunEngine

from ophyd_async.core import (
DetectorTrigger,
DeviceCollector,
PathProvider,
set_mock_value,
)
from ophyd_async.core._detector import TriggerInfo
from ophyd_async.epics import advimba
from ophyd_async.epics.advimba._vimba_io import (
VimbaExposeOutMode,
VimbaOnOff,
VimbaTriggerSource,
)


@pytest.fixture
async def test_advimba(
RE: RunEngine,
static_path_provider: PathProvider,
) -> advimba.VimbaDetector:
async with DeviceCollector(mock=True):
test_advimba = advimba.VimbaDetector("VIMBA:", static_path_provider)

return test_advimba
def test_advimba(ad_standard_det_factory) -> advimba.VimbaDetector:
return ad_standard_det_factory(advimba.VimbaDetector)


async def test_get_deadtime(
Expand All @@ -30,9 +27,9 @@ async def test_get_deadtime(


async def test_arming_trig_modes(test_advimba: advimba.VimbaDetector):
set_mock_value(test_advimba.drv.trigger_source, "Freerun")
set_mock_value(test_advimba.drv.trigger_mode, "Off")
set_mock_value(test_advimba.drv.exposure_mode, "Timed")
set_mock_value(test_advimba.drv.trigger_source, VimbaTriggerSource.freerun)
set_mock_value(test_advimba.drv.trigger_mode, VimbaOnOff.off)
set_mock_value(test_advimba.drv.exposure_mode, VimbaExposeOutMode.timed)

async def setup_trigger_mode(trig_mode: DetectorTrigger):
await test_advimba.controller.prepare(TriggerInfo(number=1, trigger=trig_mode))
Expand Down Expand Up @@ -68,7 +65,7 @@ async def setup_trigger_mode(trig_mode: DetectorTrigger):


async def test_hints_from_hdf_writer(test_advimba: advimba.VimbaDetector):
assert test_advimba.hints == {"fields": ["test_advimba"]}
assert test_advimba.hints == {"fields": ["test_advimba1"]}


async def test_can_read(test_advimba: advimba.VimbaDetector):
Expand All @@ -86,9 +83,9 @@ async def test_decribe_describes_writer_dataset(
await test_advimba.stage()
await test_advimba.prepare(one_shot_trigger_info)
assert await test_advimba.describe() == {
"test_advimba": {
"source": "mock+ca://VIMBA:HDF1:FullFileName_RBV",
"shape": (0, 0),
"test_advimba1": {
"source": "mock+ca://VIMBA1:HDF1:FullFileName_RBV",
"shape": (10, 10),
"dtype": "array",
"dtype_numpy": "|i1",
"external": "STREAM:",
Expand All @@ -113,12 +110,13 @@ async def test_can_collect(
assert docs[0][0] == "stream_resource"
stream_resource = docs[0][1]
sr_uid = stream_resource["uid"]
assert stream_resource["data_key"] == "test_advimba"
assert stream_resource["data_key"] == "test_advimba1"
assert stream_resource["uri"] == "file://localhost" + str(full_file_name)
assert stream_resource["parameters"] == {
"dataset": "/entry/data/data",
"swmr": False,
"multiplier": 1,
"chunk_shape": (1, 10, 10),
}
assert docs[1][0] == "stream_datum"
stream_datum = docs[1][1]
Expand All @@ -136,9 +134,9 @@ async def test_can_decribe_collect(
await test_advimba.stage()
await test_advimba.prepare(one_shot_trigger_info)
assert (await test_advimba.describe_collect()) == {
"test_advimba": {
"source": "mock+ca://VIMBA:HDF1:FullFileName_RBV",
"shape": (0, 0),
"test_advimba1": {
"source": "mock+ca://VIMBA1:HDF1:FullFileName_RBV",
"shape": (10, 10),
"dtype": "array",
"dtype_numpy": "|i1",
"external": "STREAM:",
Expand Down
38 changes: 38 additions & 0 deletions tests/epics/conftest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,38 @@
from collections.abc import Callable

import pytest
from bluesky.run_engine import RunEngine

from ophyd_async.core._detector import StandardDetector
from ophyd_async.core._device import DeviceCollector
from ophyd_async.core._mock_signal_utils import set_mock_value


@pytest.fixture
def ad_standard_det_factory(
RE: RunEngine,
static_path_provider,
) -> Callable:
def generate_ad_standard_det(
ad_standard_detector_class, number=1
) -> StandardDetector:
# Dynamically generate a name based on the class of detector
detector_name = ad_standard_detector_class.__name__
if detector_name.endswith("Detector"):
detector_name = detector_name[: -len("Detector")]

with DeviceCollector(mock=True):
test_adstandard_det = ad_standard_detector_class(
f"{detector_name.upper()}{number}:",
static_path_provider,
name=f"test_ad{detector_name.lower()}{number}",
)

# Set number of frames per chunk and frame dimensions to something reasonable
set_mock_value(test_adstandard_det.hdf.num_frames_chunks, 1)
set_mock_value(test_adstandard_det.drv.array_size_x, 10)
set_mock_value(test_adstandard_det.drv.array_size_y, 10)

return test_adstandard_det

return generate_ad_standard_det
Loading

0 comments on commit 35c07fe

Please sign in to comment.