Skip to content

Commit

Permalink
docs changes
Browse files Browse the repository at this point in the history
  • Loading branch information
ZohebShaikh committed Sep 11, 2024
1 parent 97e8fd0 commit 6d13ddd
Show file tree
Hide file tree
Showing 2 changed files with 143 additions and 127 deletions.
25 changes: 16 additions & 9 deletions src/ophyd_async/core/_detector.py
Original file line number Diff line number Diff line change
Expand Up @@ -79,16 +79,23 @@ def get_deadtime(self, exposure: float | None) -> float:

@abstractmethod
async def prepare(self, trigger_info: TriggerInfo):
""", do all necessary steps to prepare detector for triggers.
"""
Do all necessary steps to prepare the detector for triggers.
Args:
num: Expected number of frames
trigger: Type of trigger for which to prepare the detector. Defaults to
DetectorTrigger.internal.
exposure: Exposure time with which to set up the detector. Defaults to None
if not applicable or the detector is expected to use its previously-set
exposure time.
trigger_info: This is a Pydantic model which contains
number Expected number of frames.
trigger Type of trigger for which to prepare the detector. Defaults
to DetectorTrigger.internal.
livetime Livetime / Exposure time with which to set up the detector.
Defaults to None
if not applicable or the detector is expected to use its previously-set
exposure time.
deadtime Defaults to None. This is the minimum deadtime between
triggers.
multiplier The number of triggers grouped into a single StreamDatum
index.
"""
...

@abstractmethod
async def arm(self) -> AsyncStatus:
Expand Down Expand Up @@ -310,7 +317,6 @@ async def kickoff(self):
@WatchableAsyncStatus.wrap
async def complete(self):
assert self._arm_status, "Prepare not run"
await self._arm_status
assert self._trigger_info
async for index in self.writer.observe_indices_written(
self._trigger_info.frame_timeout
Expand All @@ -331,6 +337,7 @@ async def complete(self):
)
if index >= self._trigger_info.number:
break
self._arm_status = None

async def describe_collect(self) -> Dict[str, DataKey]:
return self._describe
Expand Down
245 changes: 127 additions & 118 deletions tests/fastcs/panda/test_hdf_panda.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,11 @@
import os
from typing import Dict
from unittest.mock import ANY

import bluesky.plan_stubs as bps
import numpy as np
import pytest
from bluesky import RunEngine

from ophyd_async.core import (
Device,
Expand All @@ -11,11 +15,17 @@
callback_on_mock_put,
set_mock_value,
)
from ophyd_async.core._flyer import StandardFlyer
from ophyd_async.core._signal import assert_emitted
from ophyd_async.fastcs.panda import (
DatasetTable,
HDFPanda,
PandaHdf5DatasetType,
)
from ophyd_async.fastcs.panda._trigger import StaticSeqTableTriggerLogic
from ophyd_async.plan_stubs._fly import (
prepare_static_seq_table_flyer_and_detectors_with_same_trigger,
)


@pytest.fixture
Expand Down Expand Up @@ -64,121 +74,120 @@ async def test_hdf_panda_passes_blocks_to_controller(mock_hdf_panda: HDFPanda):
assert mock_hdf_panda.controller.pcap is mock_hdf_panda.pcap


# async def test_hdf_panda_hardware_triggered_flyable(
# RE: RunEngine,
# mock_hdf_panda,
# tmp_path,
# ):
# docs = {}

# def append_and_print(name, doc):
# if name not in docs:
# docs[name] = []
# docs[name] += [doc]

# RE.subscribe(append_and_print)

# shutter_time = 0.004
# exposure = 1

# trigger_logic = StaticSeqTableTriggerLogic(mock_hdf_panda.seq[1])
# flyer = StandardFlyer(trigger_logic, [], name="flyer")

# def flying_plan():
# yield from bps.stage_all(mock_hdf_panda, flyer)

# yield from prepare_static_seq_table_flyer_and_detectors_with_same_trigger(
# flyer,
# [mock_hdf_panda],
# number_of_frames=1,
# exposure=exposure,
# shutter_time=shutter_time,
# )

# yield from bps.open_run()
# yield from bps.declare_stream(mock_hdf_panda, name="main_stream",
# collect=True)

# set_mock_value(flyer.trigger_logic.seq.active, 1)

# yield from bps.kickoff(flyer, wait=True)
# yield from bps.kickoff(mock_hdf_panda)

# yield from bps.complete(flyer, wait=False, group="complete")
# yield from bps.complete(mock_hdf_panda, wait=False, group="complete")

# # Manually incremenet the index as if a frame was taken
# set_mock_value(mock_hdf_panda.data.num_captured, 1)
# set_mock_value(flyer.trigger_logic.seq.active, 0)

# done = False
# while not done:
# try:
# yield from bps.wait(group="complete", timeout=0.5)
# except TimeoutError:
# pass
# else:
# done = True
# yield from bps.collect(
# mock_hdf_panda,
# return_payload=False,
# name="main_stream",
# )
# yield from bps.wait(group="complete")
# yield from bps.close_run()

# yield from bps.unstage_all(flyer, mock_hdf_panda)
# yield from bps.wait_for([lambda: mock_hdf_panda.controller.disarm()])

# # fly scan
# RE(flying_plan())

# assert_emitted(
# docs, start=1, descriptor=1, stream_resource=2, stream_datum=2, stop=1
# )

# # test descriptor
# data_key_names: Dict[str, str] = docs["descriptor"][0]["object_keys"]["panda"]
# assert data_key_names == ["x", "y"]
# for data_key_name in data_key_names:
# assert (
# docs["descriptor"][0]["data_keys"][data_key_name]["source"]
# == "mock+soft://panda-data-hdf_directory"
# )

# # test stream resources
# for dataset_name, stream_resource, data_key_name in zip(
# ("x", "y"), docs["stream_resource"], data_key_names
# ):

# def assert_resource_document():
# assert stream_resource == {
# "run_start": docs["start"][0]["uid"],
# "uid": ANY,
# "data_key": data_key_name,
# "mimetype": "application/x-hdf5",
# "uri": "file://localhost" + str(tmp_path / "test-panda.h5"),
# "parameters": {
# "dataset": f"/{dataset_name}",
# "swmr": False,
# "multiplier": 1,
# },
# }
# assert "test-panda.h5" in stream_resource["uri"]

# assert_resource_document()

# # test stream datum
# for stream_datum in docs["stream_datum"]:
# assert stream_datum["descriptor"] == docs["descriptor"][0]["uid"]
# assert stream_datum["seq_nums"] == {
# "start": 1,
# "stop": 2,
# }
# assert stream_datum["indices"] == {
# "start": 0,
# "stop": 1,
# }
# assert stream_datum["stream_resource"] in [
# sd["uid"].split("/")[0] for sd in docs["stream_datum"]
# ]
async def test_hdf_panda_hardware_triggered_flyable(
RE: RunEngine,
mock_hdf_panda,
tmp_path,
):
docs = {}

def append_and_print(name, doc):
if name not in docs:
docs[name] = []
docs[name] += [doc]

RE.subscribe(append_and_print)

shutter_time = 0.004
exposure = 1

trigger_logic = StaticSeqTableTriggerLogic(mock_hdf_panda.seq[1])
flyer = StandardFlyer(trigger_logic, [], name="flyer")

def flying_plan():
yield from bps.stage_all(mock_hdf_panda, flyer)

yield from prepare_static_seq_table_flyer_and_detectors_with_same_trigger(
flyer,
[mock_hdf_panda],
number_of_frames=1,
exposure=exposure,
shutter_time=shutter_time,
)

yield from bps.open_run()
yield from bps.declare_stream(mock_hdf_panda, name="main_stream", collect=True)

set_mock_value(flyer.trigger_logic.seq.active, 1)

yield from bps.kickoff(flyer, wait=True)
yield from bps.kickoff(mock_hdf_panda)

yield from bps.complete(flyer, wait=False, group="complete")
yield from bps.complete(mock_hdf_panda, wait=False, group="complete")

# Manually incremenet the index as if a frame was taken
set_mock_value(mock_hdf_panda.data.num_captured, 1)
set_mock_value(flyer.trigger_logic.seq.active, 0)

done = False
while not done:
try:
yield from bps.wait(group="complete", timeout=0.5)
except TimeoutError:
pass
else:
done = True
yield from bps.collect(
mock_hdf_panda,
return_payload=False,
name="main_stream",
)
yield from bps.wait(group="complete")
yield from bps.close_run()

yield from bps.unstage_all(flyer, mock_hdf_panda)
yield from bps.wait_for([lambda: mock_hdf_panda.controller.disarm()])

# fly scan
RE(flying_plan())

assert_emitted(
docs, start=1, descriptor=1, stream_resource=2, stream_datum=2, stop=1
)

# test descriptor
data_key_names: Dict[str, str] = docs["descriptor"][0]["object_keys"]["panda"]
assert data_key_names == ["x", "y"]
for data_key_name in data_key_names:
assert (
docs["descriptor"][0]["data_keys"][data_key_name]["source"]
== "mock+soft://panda-data-hdf_directory"
)

# test stream resources
for dataset_name, stream_resource, data_key_name in zip(
("x", "y"), docs["stream_resource"], data_key_names
):

def assert_resource_document():
assert stream_resource == {
"run_start": docs["start"][0]["uid"],
"uid": ANY,
"data_key": data_key_name,
"mimetype": "application/x-hdf5",
"uri": "file://localhost" + str(tmp_path / "test-panda.h5"),
"parameters": {
"dataset": f"/{dataset_name}",
"swmr": False,
"multiplier": 1,
},
}
assert "test-panda.h5" in stream_resource["uri"]

assert_resource_document()

# test stream datum
for stream_datum in docs["stream_datum"]:
assert stream_datum["descriptor"] == docs["descriptor"][0]["uid"]
assert stream_datum["seq_nums"] == {
"start": 1,
"stop": 2,
}
assert stream_datum["indices"] == {
"start": 0,
"stop": 1,
}
assert stream_datum["stream_resource"] in [
sd["uid"].split("/")[0] for sd in docs["stream_datum"]
]

0 comments on commit 6d13ddd

Please sign in to comment.