diff --git a/src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_session.py b/src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_2p_only_session.py similarity index 67% rename from src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_session.py rename to src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_2p_only_session.py index e377eda..d8f9293 100644 --- a/src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_session.py +++ b/src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_2p_only_session.py @@ -4,8 +4,9 @@ from typing import Union from neuroconv.utils import load_dict_from_file, dict_deep_update from higley_lab_to_nwb.benisty_2024 import Benisty2024NWBConverter +from higley_lab_to_nwb.lohani_2022.interfaces.lohani_2022_spike2signals_interface import get_streams import os - +import glob def _get_sampling_frequency_and_image_size(folder_path: Union[str, Path]): from roiextractors.extractors.tiffimagingextractors.scanimagetiff_utils import ( @@ -27,7 +28,6 @@ def _get_sampling_frequency_and_image_size(folder_path: Union[str, Path]): image_size = [_num_rows, _num_columns] return parsed_metadata["sampling_frequency"], image_size - def session_to_nwb( folder_path: Union[str, Path], output_dir_path: Union[str, Path], session_id: str, stub_test: bool = False ): @@ -42,6 +42,45 @@ def session_to_nwb( source_data = dict() conversion_options = dict() + search_pattern = "_".join(session_id.split("_")[:2]) + + # Add Analog signals from Spike2 + file_path = glob.glob(os.path.join(folder_path, f"{search_pattern}*.smrx"))[0] + stream_ids, stream_names = get_streams(file_path=file_path) + + # Define each smrx signal name + TTLsignals_name_map = { + stream_ids[stream_names == "galvo"][0]: "TTLSignal2PExcitation", + stream_ids[stream_names == "pupilcam"][0]: "TTLSignalPupilCamera", + } + behavioral_name_map = { + stream_ids[stream_names == "wheel"][0]: "WheelSignal", + } + + source_data.update( + dict( + Spike2Signals=dict( + file_path=file_path, + ttl_stream_ids_to_names_map=TTLsignals_name_map, + behavioral_stream_ids_to_names_map=behavioral_name_map, + ) + ) + ) + conversion_options.update(dict(Spike2Signals=dict(stub_test=stub_test))) + + if "vis_stim" in session_id: + csv_file_path = glob.glob(os.path.join(folder_path, f"{search_pattern}*.csv"))[0] + source_data.update( + dict( + VisualStimulusInterface=dict( + spike2_file_path=file_path, + csv_file_path=csv_file_path, + stream_id=stream_ids[stream_names == "Vis"][0], + ) + ) + ) + conversion_options.update(dict(VisualStimulusInterface=dict(stub_test=stub_test))) + # Add 2p Imaging imaging_path = folder_path / "tiff" source_data.update(dict(TwoPhotonImaging=dict(folder_path=str(imaging_path), file_pattern="*.tif"))) @@ -84,14 +123,28 @@ def session_to_nwb( ) ) + # Add Behavioral Video Recording + avi_files = glob.glob(os.path.join(folder_path, f"{search_pattern}*.avi")) + video_file_path = avi_files[0] + source_data.update(dict(Video=dict(file_paths=[video_file_path], verbose=False))) + conversion_options.update(dict(Video=dict(stub_test=stub_test))) + + # Add Facemap outpt + # mat_files = glob.glob(os.path.join(folder_path, f"{search_pattern}*_proc.mat")) + # mat_file_path = mat_files[0] + # source_data.update( + # dict( + # FacemapInterface=dict(mat_file_path=str(mat_file_path), video_file_path=str(video_file_path), verbose=False) + # ) + # ) + # Add ophys metadata - ophys_metadata_path = Path(__file__).parent / "metadata" / "benisty_2024_ophys_metadata.yaml" + ophys_metadata_path = Path(__file__).parent / "metadata" / "benisty_2024_ophys_2p_only_metadata.yaml" ophys_metadata = load_dict_from_file(ophys_metadata_path) converter = Benisty2024NWBConverter(source_data=source_data, ophys_metadata=ophys_metadata) # Add datetime to conversion - metadata = converter.get_metadata() subject_id = session_id.split("_")[1] metadata["Subject"].update(subject_id=subject_id) diff --git a/src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_all_sessions.py b/src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_all_sessions.py index ef39390..5482681 100644 --- a/src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_all_sessions.py +++ b/src/higley_lab_to_nwb/benisty_2024/benisty_2024_convert_all_sessions.py @@ -1,9 +1,8 @@ """Primary script to run to convert an entire session for of data using the NWBConverter.""" from pathlib import Path -from typing import Union import os -from .benisty_2024_convert_session import session_to_nwb +from .benisty_2024_convert_2p_only_session import session_to_nwb diff --git a/src/higley_lab_to_nwb/benisty_2024/benisty_2024_nwbconverter.py b/src/higley_lab_to_nwb/benisty_2024/benisty_2024_nwbconverter.py index fc176fb..e91f9ad 100644 --- a/src/higley_lab_to_nwb/benisty_2024/benisty_2024_nwbconverter.py +++ b/src/higley_lab_to_nwb/benisty_2024/benisty_2024_nwbconverter.py @@ -5,6 +5,12 @@ from neuroconv.utils import DeepDict from neuroconv.datainterfaces import ScanImageMultiFileImagingInterface, Suite2pSegmentationInterface +from higley_lab_to_nwb.lohani_2022.interfaces import ( + Lohani2022Spike2SignalsInterface, + Lohani2022VisualStimulusInterface, +) +from neuroconv.datainterfaces import VideoInterface, FacemapInterface + from .interfaces import Benisty2024CidanSegmentationInterface from higley_lab_to_nwb.lohani_2022.interfaces import Lohani2022Spike2SignalsInterface class Benisty2024NWBConverter(NWBConverter): @@ -13,13 +19,16 @@ class Benisty2024NWBConverter(NWBConverter): data_interface_classes = dict( TwoPhotonImaging=ScanImageMultiFileImagingInterface, Suite2pSegmentation=Suite2pSegmentationInterface, - CIDANSegmentation=Benisty2024CidanSegmentationInterface, Spike2Signals=Lohani2022Spike2SignalsInterface, + CIDANSegmentation=Benisty2024CidanSegmentationInterface, + Video=VideoInterface, + FacemapInterface=FacemapInterface, + VisualStimulusInterface=Lohani2022VisualStimulusInterface, ) - def __init__(self, source_data: Dict[str, dict],ophys_metadata: Dict[str, dict], verbose: bool = True): + def __init__(self, source_data: Dict[str, dict], ophys_metadata: Dict[str, dict], verbose: bool = True): super().__init__(source_data, verbose) - self.ophys_metadata=ophys_metadata + self.ophys_metadata = ophys_metadata def get_metadata(self) -> DeepDict: metadata = super().get_metadata() @@ -38,7 +47,7 @@ def get_metadata(self) -> DeepDict: metadata["Ophys"]["ImagingPlane"] = self.ophys_metadata["Ophys"]["ImagingPlane"] return metadata - + def temporally_align_data_interfaces(self): ttlsignal_interface = self.data_interface_objects["Spike2Signals"] # Synch imaging @@ -54,4 +63,20 @@ def temporally_align_data_interfaces(self): ) ttl_times = ttlsignal_interface.get_event_times_from_ttl(stream_id=stream_id) imaging_interface.set_aligned_starting_time(ttl_times[0]) - segmentation_interface.set_aligned_starting_time(ttl_times[0]) \ No newline at end of file + segmentation_interface.set_aligned_starting_time(ttl_times[0]) + + # Synch behaviour + video_interface = self.data_interface_objects["Video"] + # facemap_interface = self.data_interface_objects["FacemapInterface"] + video_interface._timestamps = video_interface.get_timestamps() + stream_id = next( + ( + stream_id + for stream_id, stream_name in ttlsignal_interface.ttl_stream_ids_to_names_map.items() + if stream_name == "TTLSignalPupilCamera" + ), + None, + ) + ttl_times = ttlsignal_interface.get_event_times_from_ttl(stream_id=stream_id) + video_interface.set_aligned_starting_time(ttl_times[0]) + # facemap_interface.set_aligned_starting_time(ttl_times[0]) diff --git a/src/higley_lab_to_nwb/benisty_2024/benisty_2024_requirements.txt b/src/higley_lab_to_nwb/benisty_2024/benisty_2024_requirements.txt index 26a5e6e..3ea173a 100644 --- a/src/higley_lab_to_nwb/benisty_2024/benisty_2024_requirements.txt +++ b/src/higley_lab_to_nwb/benisty_2024/benisty_2024_requirements.txt @@ -1,4 +1,5 @@ nwb-conversion-tools==0.11.1 # Example of specific pinned dependecy some-extra-package==1.11.3 # Example of another extra package that's necessary for the current conversion roiextractors -neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@facemap \ No newline at end of file +neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@facemap +ndx-facemap-motionsvd @ git+https://github.com/catalystneuro/ndx-facemap-motionsvd.git@main \ No newline at end of file diff --git a/src/higley_lab_to_nwb/benisty_2024/metadata/benisty_2024_ophys_metadata.yaml b/src/higley_lab_to_nwb/benisty_2024/metadata/benisty_2024_ophys_2p_only_metadata.yaml similarity index 100% rename from src/higley_lab_to_nwb/benisty_2024/metadata/benisty_2024_ophys_metadata.yaml rename to src/higley_lab_to_nwb/benisty_2024/metadata/benisty_2024_ophys_2p_only_metadata.yaml diff --git a/src/higley_lab_to_nwb/benisty_2024/utils/lohani_2022_utils.py b/src/higley_lab_to_nwb/benisty_2024/utils/lohani_2022_utils.py deleted file mode 100644 index ed526f6..0000000 --- a/src/higley_lab_to_nwb/benisty_2024/utils/lohani_2022_utils.py +++ /dev/null @@ -1,46 +0,0 @@ -import tifffile -from natsort import natsorted -import numpy as np -from datetime import datetime -from zoneinfo import ZoneInfo - -def read_session_start_time(folder_path): - tiff_file_paths = natsorted(folder_path.glob("*.tif")) - with tifffile.TiffFile(tiff_file_paths[0]) as tif: - metadata = tif.pages[0].tags['ImageDescription'].value - - lines = metadata.split('\r\n') - date_time_str = lines[1] - # Assuming the timezone is always 'Eastern Standard Time' - date_time_obj = datetime.strptime(date_time_str[:-len(' Eastern Standard Time')], '%a, %d %b %Y %H:%M:%S') - date_time_obj = date_time_obj.replace(tzinfo=ZoneInfo('US/Eastern')) - - return date_time_obj - -def get_tiff_file_paths_sorted_by_channel(folder_path: str, start_frame_index: int = 0, stub_test: bool = False): - - tiff_file_paths = natsorted(folder_path.glob("*.tif")) - if stub_test: - tiff_file_paths = tiff_file_paths[:100] # for testing - selected_tiff_file_paths = tiff_file_paths[start_frame_index::3] - - return selected_tiff_file_paths - - -def create_tiff_stack(folder_path: str, output_file_path: str, start_frame_index: int = 0, frame_side: str = "left", stub_test: bool = False): - - selected_tiff_file_paths = get_tiff_file_paths_sorted_by_channel( - folder_path=folder_path, start_frame_index=start_frame_index, stub_test = stub_test - ) - frames = [tifffile.imread(file_path) for file_path in selected_tiff_file_paths] - - if frame_side == "left": - stack = np.stack([frame[:, :512].transpose(1,0) for frame in frames], axis=0) - elif frame_side == "right": - stack = np.stack([frame[:, 512:].transpose(1,0) for frame in frames], axis=0) - else: - raise ValueError("frame_side must be either 'right' or 'left'") - - tifffile.imwrite(output_file_path, stack) - - return \ No newline at end of file