Skip to content

Commit

Permalink
Only pass data block to init of PandAHDFWriter instead of entire Pand…
Browse files Browse the repository at this point in the history
…A device (bluesky#512)

* Update to hdf_panda and panda writer to only need data block instead of common panda blocks

* Resolve issues with tests that appeared due to changing init signature for PandAHDFWriter
  • Loading branch information
jwlodek authored Aug 9, 2024
1 parent d1c4e3c commit e4fb849
Show file tree
Hide file tree
Showing 3 changed files with 32 additions and 32 deletions.
2 changes: 1 addition & 1 deletion src/ophyd_async/fastcs/panda/_hdf_panda.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def __init__(
prefix=prefix,
path_provider=path_provider,
name_provider=lambda: name,
panda_device=self,
panda_data_block=self.data,
)
super().__init__(
controller=controller,
Expand Down
34 changes: 17 additions & 17 deletions src/ophyd_async/fastcs/panda/_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@
wait_for_value,
)

from ._block import CommonPandaBlocks
from ._block import DataBlock


class PandaHDFWriter(DetectorWriter):
Expand All @@ -27,9 +27,9 @@ def __init__(
prefix: str,
path_provider: PathProvider,
name_provider: NameProvider,
panda_device: CommonPandaBlocks,
panda_data_block: DataBlock,
) -> None:
self.panda_device = panda_device
self.panda_data_block = panda_data_block
self._prefix = prefix
self._path_provider = path_provider
self._name_provider = name_provider
Expand All @@ -42,23 +42,23 @@ async def open(self, multiplier: int = 1) -> Dict[str, DataKey]:
"""Retrieve and get descriptor of all PandA signals marked for capture"""

# Ensure flushes are immediate
await self.panda_device.data.flush_period.set(0)
await self.panda_data_block.flush_period.set(0)

self._file = None
info = self._path_provider(device_name=self.panda_device.name)
info = self._path_provider(device_name=self._name_provider())
# Set the initial values
await asyncio.gather(
self.panda_device.data.hdf_directory.set(info.directory_path),
self.panda_device.data.hdf_file_name.set(
self.panda_data_block.hdf_directory.set(info.directory_path),
self.panda_data_block.hdf_file_name.set(
f"{info.filename}.h5",
),
self.panda_device.data.num_capture.set(0),
self.panda_data_block.num_capture.set(0),
# TODO: Set create_dir_depth once available
# https://github.com/bluesky/ophyd-async/issues/317
)

# Wait for it to start, stashing the status that tells us when it finishes
await self.panda_device.data.capture.set(True)
await self.panda_data_block.capture.set(True)
if multiplier > 1:
raise ValueError(
"All PandA datasets should be scalar, multiplier should be 1"
Expand All @@ -74,7 +74,7 @@ async def _describe(self) -> Dict[str, DataKey]:
await self._update_datasets()
describe = {
ds.data_key: DataKey(
source=self.panda_device.data.hdf_directory.source,
source=self.panda_data_block.hdf_directory.source,
shape=ds.shape,
dtype="array" if ds.shape != [1] else "number",
dtype_numpy="<f8", # PandA data should always be written as Float64
Expand All @@ -90,7 +90,7 @@ async def _update_datasets(self) -> None:
representation of datasets that the panda will write.
"""

capture_table = await self.panda_device.data.datasets.get_value()
capture_table = await self.panda_data_block.datasets.get_value()
self._datasets = [
HDFDataset(dataset_name, "/" + dataset_name, [1], multiplier=1)
for dataset_name in capture_table["name"]
Expand All @@ -106,18 +106,18 @@ def matcher(value: int) -> bool:

matcher.__name__ = f"index_at_least_{index}"
await wait_for_value(
self.panda_device.data.num_captured, matcher, timeout=timeout
self.panda_data_block.num_captured, matcher, timeout=timeout
)

async def get_indices_written(self) -> int:
return await self.panda_device.data.num_captured.get_value()
return await self.panda_data_block.num_captured.get_value()

async def observe_indices_written(
self, timeout=DEFAULT_TIMEOUT
) -> AsyncGenerator[int, None]:
"""Wait until a specific index is ready to be collected"""
async for num_captured in observe_value(
self.panda_device.data.num_captured, timeout
self.panda_data_block.num_captured, timeout
):
yield num_captured // self._multiplier

Expand All @@ -128,8 +128,8 @@ async def collect_stream_docs(
if indices_written:
if not self._file:
self._file = HDFFile(
Path(await self.panda_device.data.hdf_directory.get_value())
/ Path(await self.panda_device.data.hdf_file_name.get_value()),
Path(await self.panda_data_block.hdf_directory.get_value())
/ Path(await self.panda_data_block.hdf_file_name.get_value()),
self._datasets,
)
for doc in self._file.stream_resources():
Expand All @@ -139,6 +139,6 @@ async def collect_stream_docs(

# Could put this function as default for StandardDetector
async def close(self):
await self.panda_device.data.capture.set(
await self.panda_data_block.capture.set(
False, wait=True, timeout=DEFAULT_TIMEOUT
)
28 changes: 14 additions & 14 deletions tests/fastcs/panda/test_writer.py
Original file line number Diff line number Diff line change
Expand Up @@ -103,8 +103,8 @@ async def mock_writer(tmp_path, mock_panda) -> PandaHDFWriter:
writer = PandaHDFWriter(
prefix="TEST-PANDA",
path_provider=dp,
name_provider=lambda: "test-panda",
panda_device=mock_panda,
name_provider=lambda: mock_panda.name,
panda_data_block=mock_panda.data,
)

return writer
Expand All @@ -114,9 +114,9 @@ async def mock_writer(tmp_path, mock_panda) -> PandaHDFWriter:
async def test_open_returns_correct_descriptors(
mock_writer: PandaHDFWriter, table: DatasetTable
):
assert hasattr(mock_writer.panda_device, "data")
assert hasattr(mock_writer, "panda_data_block")
set_mock_value(
mock_writer.panda_device.data.datasets,
mock_writer.panda_data_block.datasets,
table,
)
description = await mock_writer.open() # to make capturing status not time out
Expand All @@ -126,7 +126,7 @@ async def test_open_returns_correct_descriptors(
):
assert key == expected_key
assert entry == {
"source": mock_writer.panda_device.data.hdf_directory.source,
"source": mock_writer.panda_data_block.hdf_directory.source,
"shape": [
1,
],
Expand All @@ -138,16 +138,16 @@ async def test_open_returns_correct_descriptors(

async def test_open_close_sets_capture(mock_writer: PandaHDFWriter):
assert isinstance(await mock_writer.open(), dict)
assert await mock_writer.panda_device.data.capture.get_value()
assert await mock_writer.panda_data_block.capture.get_value()
await mock_writer.close()
assert not await mock_writer.panda_device.data.capture.get_value()
assert not await mock_writer.panda_data_block.capture.get_value()


async def test_open_sets_file_path_and_name(mock_writer: PandaHDFWriter, tmp_path):
await mock_writer.open()
path = await mock_writer.panda_device.data.hdf_directory.get_value()
assert path == tmp_path / mock_writer.panda_device.name
name = await mock_writer.panda_device.data.hdf_file_name.get_value()
path = await mock_writer.panda_data_block.hdf_directory.get_value()
assert path == tmp_path / mock_writer._name_provider()
name = await mock_writer.panda_data_block.hdf_file_name.get_value()
assert name == "data.h5"


Expand All @@ -158,16 +158,16 @@ async def test_open_errors_when_multiplier_not_one(mock_writer: PandaHDFWriter):

async def test_get_indices_written(mock_writer: PandaHDFWriter):
await mock_writer.open()
set_mock_value(mock_writer.panda_device.data.num_captured, 4)
set_mock_value(mock_writer.panda_data_block.num_captured, 4)
written = await mock_writer.get_indices_written()
assert written == 4


async def test_wait_for_index(mock_writer: PandaHDFWriter):
await mock_writer.open()
set_mock_value(mock_writer.panda_device.data.num_captured, 3)
set_mock_value(mock_writer.panda_data_block.num_captured, 3)
await mock_writer.wait_for_index(3, timeout=1)
set_mock_value(mock_writer.panda_device.data.num_captured, 2)
set_mock_value(mock_writer.panda_data_block.num_captured, 2)
with pytest.raises(TimeoutError):
await mock_writer.wait_for_index(3, timeout=0.1)

Expand All @@ -179,7 +179,7 @@ async def test_collect_stream_docs(
table: DatasetTable,
):
# Give the mock writer datasets
set_mock_value(mock_writer.panda_device.data.datasets, table)
set_mock_value(mock_writer.panda_data_block.datasets, table)

await mock_writer.open()

Expand Down

0 comments on commit e4fb849

Please sign in to comment.