From 444221e976b3ecffd8ef5a73b91cae69fe5421a3 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 5 Nov 2024 21:11:48 -0600 Subject: [PATCH 01/20] update deeplabcut to ndx-pose 0.2 --- pyproject.toml | 2 +- .../behavior/deeplabcut/_dlc_utils.py | 93 +++++++++++-------- .../deeplabcut/deeplabcutdatainterface.py | 14 ++- .../behavior/test_behavior_interfaces.py | 32 +++++-- .../behavior/test_lightningpose_converter.py | 9 ++ 5 files changed, 102 insertions(+), 48 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index a83380467..b3f7c780b 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -117,7 +117,7 @@ sleap = [ "sleap-io>=0.0.2; python_version>='3.9'", ] deeplabcut = [ - "ndx-pose==0.1.1", + "ndx-pose>=0.2", "tables; platform_system != 'Darwin'", "tables>=3.10.1; platform_system == 'Darwin' and python_version >= '3.10'", ] diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py index 9e368fb39..8f3fa4224 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py @@ -1,4 +1,3 @@ -import importlib import pickle import warnings from pathlib import Path @@ -93,7 +92,7 @@ def _get_cv2_timestamps(file_path: Union[Path, str]): return timestamps -def _get_movie_timestamps(movie_file, VARIABILITYBOUND=1000, infer_timestamps=True): +def _get_video_timestamps(movie_file, VARIABILITYBOUND=1000, infer_timestamps=True): """ Return numpy array of the timestamps for a video. @@ -251,21 +250,6 @@ def _get_video_info_from_config_file(config_file_path: Path, vidname: str): return video_file_path, image_shape -def _get_pes_args( - *, - h5file: Path, - individual_name: str, -): - h5file = Path(h5file) - - _, scorer = h5file.stem.split("DLC") - scorer = "DLC" + scorer - - df = _ensure_individuals_in_header(pd.read_hdf(h5file), individual_name) - - return scorer, df - - def _write_pes_to_nwbfile( nwbfile, animal, @@ -278,13 +262,52 @@ def _write_pes_to_nwbfile( exclude_nans, pose_estimation_container_kwargs: Optional[dict] = None, ): - - from ndx_pose import PoseEstimation, PoseEstimationSeries + """ + Updated version of _write_pes_to_nwbfile to work with ndx-pose v0.2.0+ + """ + from ndx_pose import PoseEstimation, PoseEstimationSeries, Skeleton, Skeletons + from pynwb.file import Subject pose_estimation_container_kwargs = pose_estimation_container_kwargs or dict() + pose_estimation_name = pose_estimation_container_kwargs.get("name", "PoseEstimationDeepLabCut") + + # Create a subject if it doesn't exist + if nwbfile.subject is None: + subject = Subject(subject_id=animal) + nwbfile.subject = subject + else: + subject = nwbfile.subject + + # Create skeleton from the keypoints + keypoints = df_animal.columns.get_level_values("bodyparts").unique() + animal = animal if animal else "" + subject = subject if animal == subject.subject_id else None + skeleton_name = f"Skeleton{pose_estimation_name}_{animal.capitalize()}" + skeleton = Skeleton( + name=skeleton_name, + nodes=list(keypoints), + edges=np.array(paf_graph) if paf_graph else None, # Convert paf_graph to numpy array + subject=subject, + ) + + # Create Skeletons container + if "behavior" not in nwbfile.processing: + behavior_processing_module = nwbfile.create_processing_module( + name="behavior", description="processed behavioral data" + ) + skeletons = Skeletons(skeletons=[skeleton]) + behavior_processing_module.add(skeletons) + else: + behavior_processing_module = nwbfile.processing["behavior"] + if "Skeletons" not in behavior_processing_module.data_interfaces: + skeletons = Skeletons(skeletons=[skeleton]) + behavior_processing_module.add(skeletons) + else: + skeletons = behavior_processing_module["Skeletons"] + skeletons.add_skeletons(skeleton) pose_estimation_series = [] - for keypoint in df_animal.columns.get_level_values("bodyparts").unique(): + for keypoint in keypoints: data = df_animal.xs(keypoint, level="bodyparts", axis=1).to_numpy() if exclude_nans: @@ -306,34 +329,30 @@ def _write_pes_to_nwbfile( ) pose_estimation_series.append(pes) - deeplabcut_version = None - is_deeplabcut_installed = importlib.util.find_spec(name="deeplabcut") is not None - if is_deeplabcut_installed: - deeplabcut_version = importlib.metadata.version(distribution_name="deeplabcut") + camera_name = pose_estimation_name + if camera_name not in nwbfile.devices: + camera = nwbfile.create_device( + name=camera_name, + description="Camera used for behavioral recording and pose estimation.", + ) + else: + camera = nwbfile.devices[camera_name] - # TODO, taken from the original implementation, improve it if the video is passed + # Create PoseEstimation container with updated arguments dimensions = [list(map(int, image_shape.split(",")))[1::2]] pose_estimation_default_kwargs = dict( pose_estimation_series=pose_estimation_series, description="2D keypoint coordinates estimated using DeepLabCut.", - original_videos=[video_file_path], + original_videos=[video_file_path] if video_file_path else None, dimensions=dimensions, + devices=[camera], scorer=scorer, source_software="DeepLabCut", - source_software_version=deeplabcut_version, - nodes=[pes.name for pes in pose_estimation_series], - edges=paf_graph if paf_graph else None, - **pose_estimation_container_kwargs, + skeleton=skeleton, ) pose_estimation_default_kwargs.update(pose_estimation_container_kwargs) pose_estimation_container = PoseEstimation(**pose_estimation_default_kwargs) - if "behavior" in nwbfile.processing: # TODO: replace with get_module - behavior_processing_module = nwbfile.processing["behavior"] - else: - behavior_processing_module = nwbfile.create_processing_module( - name="behavior", description="processed behavioral data" - ) behavior_processing_module.add(pose_estimation_container) return nwbfile @@ -400,7 +419,7 @@ def add_subject_to_nwbfile( if video_file_path is None: timestamps = df.index.tolist() # setting timestamps to dummy else: - timestamps = _get_movie_timestamps(video_file_path, infer_timestamps=True) + timestamps = _get_video_timestamps(video_file_path, infer_timestamps=True) # Fetch the corresponding metadata pickle file, we extract the edges graph from here # TODO: This is the original implementation way to extract the file name but looks very brittle. Improve it diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py index 21b054e85..a69db1396 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py @@ -12,7 +12,7 @@ class DeepLabCutInterface(BaseTemporalAlignmentInterface): """Data interface for DeepLabCut datasets.""" display_name = "DeepLabCut" - keywords = ("DLC",) + keywords = ("DLC", "DeepLabCut", "pose estimation", "behavior") associated_suffixes = (".h5",) info = "Interface for handling data from DeepLabCut." @@ -47,6 +47,9 @@ def __init__( verbose: bool, default: True controls verbosity. """ + # Fail quick if the user doesn't have the required dependencies + from ndx_pose import PoseEstimation, PoseEstimationSeries # noqa F401 + from ._dlc_utils import _read_config file_path = Path(file_path) @@ -58,6 +61,8 @@ def __init__( self.config_dict = _read_config(config_file_path=config_file_path) self.subject_name = subject_name self.verbose = verbose + self.pose_estimation_container_kwargs = dict() + super().__init__(file_path=file_path, config_file_path=config_file_path) def get_metadata(self): @@ -97,7 +102,7 @@ def add_to_nwbfile( self, nwbfile: NWBFile, metadata: Optional[dict] = None, - container_name: str = "PoseEstimation", + container_name: str = "PoseEstimationDeepLabCut", ): """ Conversion from DLC output files to nwb. Derived from dlc2nwb library. @@ -108,14 +113,17 @@ def add_to_nwbfile( nwb file to which the recording information is to be added metadata: dict metadata info for constructing the nwb file (optional). + container_name: str, default: "PoseEstimationDeepLabCut" + name of the PoseEstimation container in the nwb """ from ._dlc_utils import add_subject_to_nwbfile + self.pose_estimation_container_kwargs["name"] = container_name add_subject_to_nwbfile( nwbfile=nwbfile, h5file=str(self.source_data["file_path"]), individual_name=self.subject_name, config_file=self.source_data["config_file_path"], timestamps=self._timestamps, - pose_estimation_container_kwargs=dict(name=container_name), + pose_estimation_container_kwargs=self.pose_estimation_container_kwargs, ) diff --git a/tests/test_on_data/behavior/test_behavior_interfaces.py b/tests/test_on_data/behavior/test_behavior_interfaces.py index 8e3e01d61..7d5e6ee51 100644 --- a/tests/test_on_data/behavior/test_behavior_interfaces.py +++ b/tests/test_on_data/behavior/test_behavior_interfaces.py @@ -41,7 +41,16 @@ except ImportError: from setup_paths import BEHAVIOR_DATA_PATH, OUTPUT_PATH +from importlib.metadata import version +from packaging import version as version_parse + +ndx_pose_version = version("ndx-pose") + + +@pytest.mark.skipif( + version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2"), reason="ndx_pose version is smaller than 0.2" +) class TestLightningPoseDataInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): data_interface_cls = LightningPoseDataInterface interface_kwargs = dict( @@ -155,6 +164,9 @@ def check_read_nwb(self, nwbfile_path: str): assert_array_equal(pose_estimation_series.data[:], test_data[["x", "y"]].values) +@pytest.mark.skipif( + version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2"), reason="ndx_pose version is smaller than 0.2" +) class TestLightningPoseDataInterfaceWithStubTest(DataInterfaceTestMixin, TemporalAlignmentMixin): data_interface_cls = LightningPoseDataInterface interface_kwargs = dict( @@ -357,7 +369,7 @@ def check_renaming_instance(self, nwbfile_path: str): with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: nwbfile = io.read() assert "behavior" in nwbfile.processing - assert "PoseEstimation" not in nwbfile.processing["behavior"].data_interfaces + assert "PoseEstimationDeepLabCut" not in nwbfile.processing["behavior"].data_interfaces assert custom_container_name in nwbfile.processing["behavior"].data_interfaces def check_read_nwb(self, nwbfile_path: str): @@ -365,9 +377,11 @@ def check_read_nwb(self, nwbfile_path: str): nwbfile = io.read() assert "behavior" in nwbfile.processing processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces - assert "PoseEstimation" in processing_module_interfaces + assert "PoseEstimationDeepLabCut" in processing_module_interfaces - pose_estimation_series_in_nwb = processing_module_interfaces["PoseEstimation"].pose_estimation_series + pose_estimation_series_in_nwb = processing_module_interfaces[ + "PoseEstimationDeepLabCut" + ].pose_estimation_series expected_pose_estimation_series = ["ind1_leftear", "ind1_rightear", "ind1_snout", "ind1_tailbase"] expected_pose_estimation_series_are_in_nwb_file = [ @@ -395,9 +409,11 @@ def check_read_nwb(self, nwbfile_path: str): nwbfile = io.read() assert "behavior" in nwbfile.processing processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces - assert "PoseEstimation" in processing_module_interfaces + assert "PoseEstimationDeepLabCut" in processing_module_interfaces - pose_estimation_series_in_nwb = processing_module_interfaces["PoseEstimation"].pose_estimation_series + pose_estimation_series_in_nwb = processing_module_interfaces[ + "PoseEstimationDeepLabCut" + ].pose_estimation_series expected_pose_estimation_series = ["ind1_leftear", "ind1_rightear", "ind1_snout", "ind1_tailbase"] expected_pose_estimation_series_are_in_nwb_file = [ @@ -441,9 +457,11 @@ def check_custom_timestamps(self, nwbfile_path: str): nwbfile = io.read() assert "behavior" in nwbfile.processing processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces - assert "PoseEstimation" in processing_module_interfaces + assert "PoseEstimationDeepLabCut" in processing_module_interfaces - pose_estimation_series_in_nwb = processing_module_interfaces["PoseEstimation"].pose_estimation_series + pose_estimation_series_in_nwb = processing_module_interfaces[ + "PoseEstimationDeepLabCut" + ].pose_estimation_series for pose_estimation in pose_estimation_series_in_nwb.values(): pose_timestamps = pose_estimation.timestamps diff --git a/tests/test_on_data/behavior/test_lightningpose_converter.py b/tests/test_on_data/behavior/test_lightningpose_converter.py index 4d0f8ab89..9aad9f131 100644 --- a/tests/test_on_data/behavior/test_lightningpose_converter.py +++ b/tests/test_on_data/behavior/test_lightningpose_converter.py @@ -1,11 +1,15 @@ import shutil import tempfile from datetime import datetime +from importlib.metadata import version from pathlib import Path from warnings import warn +import pytest from hdmf.testing import TestCase from ndx_pose import PoseEstimation +from packaging import version +from packaging import version as version_parse from pynwb import NWBHDF5IO from pynwb.image import ImageSeries @@ -16,7 +20,12 @@ from ..setup_paths import BEHAVIOR_DATA_PATH +ndx_pose_version = version("ndx-pose") + +@pytest.mark.skipif( + version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2"), reason="ndx_pose version is smaller than 0.2" +) class TestLightningPoseConverter(TestCase): @classmethod def setUpClass(cls) -> None: From 0cf99e73a63c2c8b69609dea4dd053435e762fe6 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 5 Nov 2024 21:12:18 -0600 Subject: [PATCH 02/20] relax contrain --- pyproject.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pyproject.toml b/pyproject.toml index b3f7c780b..b125b38b8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -127,7 +127,7 @@ video = [ "opencv-python-headless>=4.8.1.78", ] lightningpose = [ - "ndx-pose==0.1.1", + "ndx-pose>=0.1.1", "neuroconv[video]", ] medpc = [ From c979f808365b7d4b8bd4eeb18078389b56b25f99 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 5 Nov 2024 21:13:49 -0600 Subject: [PATCH 03/20] soft bound on lighting interface --- .../behavior/lightningpose/lightningposedatainterface.py | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py index f103b7c9a..dcb0a05d7 100644 --- a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py @@ -80,10 +80,19 @@ def __init__( verbose : bool, default: True controls verbosity. ``True`` by default. """ + from importlib.metadata import version + + from packaging import version as version_parse + from neuroconv.datainterfaces.behavior.video.video_utils import ( VideoCaptureContext, ) + ndx_pose_version = version("ndx-pose") + + if version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2.0"): + raise ImportError("The ndx-pose version must be less than 0.2.0.") + self._vc = VideoCaptureContext self.file_path = Path(file_path) From 50ecd760e4d878dc672832d48098d7a1725ae581 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 5 Nov 2024 21:20:07 -0600 Subject: [PATCH 04/20] changelog --- CHANGELOG.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/CHANGELOG.md b/CHANGELOG.md index fa679434a..eea373f3f 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -7,6 +7,8 @@ ## Features ## Improvements +* Use the latest version of ndx-pose for `DeepLabCutInterface` [PR #1128](https://github.com/catalystneuro/neuroconv/pull/1128) + # v0.6.5 (November 1, 2024) From 9301d7f809f104735848744c5179ef4097dd4e92 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Fri, 20 Dec 2024 13:06:34 -0600 Subject: [PATCH 05/20] changelog --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index eea642cc6..f03048573 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -5,9 +5,11 @@ ## Bug Fixes ## Features +* Use the latest version of ndx-pose for `DeepLabCutInterface` [PR #1128](https://github.com/catalystneuro/neuroconv/pull/1128) ## Improvements + # v0.6.9 (Upcoming) Small fixes should be here. @@ -47,7 +49,6 @@ Small fixes should be here. *`SpikeGLXNIDQInterface` now handdles digital demuxed channels (`XD0`) [#1152](https://github.com/catalystneuro/neuroconv/pull/1152) ## Improvements -* Use the latest version of ndx-pose for `DeepLabCutInterface` [PR #1128](https://github.com/catalystneuro/neuroconv/pull/1128) * Use mixing tests for ecephy's mocks [PR #1136](https://github.com/catalystneuro/neuroconv/pull/1136) * Use pytest format for dandi tests to avoid window permission error on teardown [PR #1151](https://github.com/catalystneuro/neuroconv/pull/1151) * Added many docstrings for public functions [PR #1063](https://github.com/catalystneuro/neuroconv/pull/1063) From 8046d0d0a125f58175422da450bb062d9f4f1cfd Mon Sep 17 00:00:00 2001 From: Paul Adkisson Date: Tue, 14 Jan 2025 05:07:13 +1100 Subject: [PATCH 06/20] Update ndx-pose for Lightning pose (#1170) --- pyproject.toml | 2 +- .../lightningposedatainterface.py | 52 ++++++++++++------- .../behavior/test_behavior_interfaces.py | 9 +--- .../behavior/test_lightningpose_converter.py | 10 +--- 4 files changed, 36 insertions(+), 37 deletions(-) diff --git a/pyproject.toml b/pyproject.toml index de1dd693f..c13dda138 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -128,7 +128,7 @@ video = [ "opencv-python-headless>=4.8.1.78", ] lightningpose = [ - "ndx-pose>=0.1.1", + "ndx-pose>=0.2", "neuroconv[video]", ] medpc = [ diff --git a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py index 0ec17c810..31d5810d8 100644 --- a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py @@ -40,9 +40,10 @@ def get_metadata_schema(self) -> dict: description=dict(type="string"), scorer=dict(type="string"), source_software=dict(type="string", default="LightningPose"), + camera_name=dict(type="string", default="CameraPoseEstimation"), ), patternProperties={ - "^(?!(name|description|scorer|source_software)$)[a-zA-Z0-9_]+$": dict( + "^(?!(name|description|scorer|source_software|camera_name)$)[a-zA-Z0-9_]+$": dict( title="PoseEstimationSeries", type="object", properties=dict(name=dict(type="string"), description=dict(type="string")), @@ -80,22 +81,15 @@ def __init__( verbose : bool, default: True controls verbosity. ``True`` by default. """ - from importlib.metadata import version # This import is to assure that the ndx_pose is in the global namespace when an pynwb.io object is created # For more detail, see https://github.com/rly/ndx-pose/issues/36 import ndx_pose # noqa: F401 - from packaging import version as version_parse from neuroconv.datainterfaces.behavior.video.video_utils import ( VideoCaptureContext, ) - ndx_pose_version = version("ndx-pose") - - if version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2.0"): - raise ImportError("The ndx-pose version must be less than 0.2.0.") - self._vc = VideoCaptureContext self.file_path = Path(file_path) @@ -170,6 +164,7 @@ def get_metadata(self) -> DeepDict: description="Contains the pose estimation series for each keypoint.", scorer=self.scorer_name, source_software="LightningPose", + camera_name="CameraPoseEstimation", ) for keypoint_name in self.keypoint_names: keypoint_name_without_spaces = keypoint_name.replace(" ", "") @@ -206,7 +201,7 @@ def add_to_nwbfile( The description of how the confidence was computed, e.g., 'Softmax output of the deep neural network'. stub_test : bool, default: False """ - from ndx_pose import PoseEstimation, PoseEstimationSeries + from ndx_pose import PoseEstimation, PoseEstimationSeries, Skeleton, Skeletons metadata_copy = deepcopy(metadata) @@ -223,15 +218,14 @@ def add_to_nwbfile( original_video_name = str(self.original_video_file_path) else: original_video_name = metadata_copy["Behavior"]["Videos"][0]["name"] - - pose_estimation_kwargs = dict( - name=pose_estimation_metadata["name"], - description=pose_estimation_metadata["description"], - source_software=pose_estimation_metadata["source_software"], - scorer=pose_estimation_metadata["scorer"], - original_videos=[original_video_name], - dimensions=[self.dimension], - ) + camera_name = pose_estimation_metadata["camera_name"] + if camera_name in nwbfile.devices: + camera = nwbfile.devices[camera_name] + else: + camera = nwbfile.create_device( + name=camera_name, + description="Camera used for behavioral recording and pose estimation.", + ) pose_estimation_data = self.pose_estimation_data if not stub_test else self.pose_estimation_data.head(n=10) timestamps = self.get_timestamps(stub_test=stub_test) @@ -263,8 +257,28 @@ def add_to_nwbfile( pose_estimation_series.append(PoseEstimationSeries(**pose_estimation_series_kwargs)) - pose_estimation_kwargs.update( + # Add Skeleton(s) + nodes = [keypoint_name.replace(" ", "") for keypoint_name in self.keypoint_names] + subject = nwbfile.subject if nwbfile.subject is not None else None + name = f"Skeleton{pose_estimation_name}" + skeleton = Skeleton(name=name, nodes=nodes, subject=subject) + if "Skeletons" in behavior.data_interfaces: + skeletons = behavior.data_interfaces["Skeletons"] + skeletons.add_skeletons(skeleton) + else: + skeletons = Skeletons(skeletons=[skeleton]) + behavior.add(skeletons) + + pose_estimation_kwargs = dict( + name=pose_estimation_metadata["name"], + description=pose_estimation_metadata["description"], + source_software=pose_estimation_metadata["source_software"], + scorer=pose_estimation_metadata["scorer"], + original_videos=[original_video_name], + dimensions=[self.dimension], pose_estimation_series=pose_estimation_series, + devices=[camera], + skeleton=skeleton, ) if self.source_data["labeled_video_file_path"]: diff --git a/tests/test_on_data/behavior/test_behavior_interfaces.py b/tests/test_on_data/behavior/test_behavior_interfaces.py index 00fcf7c5d..fbf007a2c 100644 --- a/tests/test_on_data/behavior/test_behavior_interfaces.py +++ b/tests/test_on_data/behavior/test_behavior_interfaces.py @@ -42,14 +42,9 @@ from importlib.metadata import version -from packaging import version as version_parse - ndx_pose_version = version("ndx-pose") -@pytest.mark.skipif( - version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2"), reason="ndx_pose version is smaller than 0.2" -) class TestLightningPoseDataInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): data_interface_cls = LightningPoseDataInterface interface_kwargs = dict( @@ -94,6 +89,7 @@ def setup_metadata(self, request): description="Contains the pose estimation series for each keypoint.", scorer="heatmap_tracker", source_software="LightningPose", + camera_name="CameraPoseEstimation", ) ) cls.expected_metadata[cls.pose_estimation_name].update( @@ -165,9 +161,6 @@ def check_read_nwb(self, nwbfile_path: str): assert_array_equal(pose_estimation_series.data[:], test_data[["x", "y"]].values) -@pytest.mark.skipif( - version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2"), reason="ndx_pose version is smaller than 0.2" -) class TestLightningPoseDataInterfaceWithStubTest(DataInterfaceTestMixin, TemporalAlignmentMixin): data_interface_cls = LightningPoseDataInterface interface_kwargs = dict( diff --git a/tests/test_on_data/behavior/test_lightningpose_converter.py b/tests/test_on_data/behavior/test_lightningpose_converter.py index ebf2f59f5..e72a3f687 100644 --- a/tests/test_on_data/behavior/test_lightningpose_converter.py +++ b/tests/test_on_data/behavior/test_lightningpose_converter.py @@ -1,14 +1,10 @@ import shutil import tempfile from datetime import datetime -from importlib.metadata import version from pathlib import Path from warnings import warn -import pytest from hdmf.testing import TestCase -from packaging import version -from packaging import version as version_parse from pynwb import NWBHDF5IO from pynwb.image import ImageSeries @@ -19,12 +15,7 @@ from ..setup_paths import BEHAVIOR_DATA_PATH -ndx_pose_version = version("ndx-pose") - -@pytest.mark.skipif( - version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2"), reason="ndx_pose version is smaller than 0.2" -) class TestLightningPoseConverter(TestCase): @classmethod def setUpClass(cls) -> None: @@ -73,6 +64,7 @@ def setUpClass(cls) -> None: description="Contains the pose estimation series for each keypoint.", scorer="heatmap_tracker", source_software="LightningPose", + camera_name="CameraPoseEstimation", ) cls.pose_estimation_metadata.update( From abc999373a49a6474ff03d683f6d4279c41047f9 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Mon, 20 Jan 2025 15:58:29 -0600 Subject: [PATCH 07/20] changelog correction --- CHANGELOG.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 27025125e..931517f11 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,11 +6,12 @@ ## Features * Added `metadata` and `conversion_options` as arguments to `NWBConverter.temporally_align_data_interfaces` [PR #1162](https://github.com/catalystneuro/neuroconv/pull/1162) +* Use the latest version of ndx-pose for `DeepLabCutInterface` [PR #1128](https://github.com/catalystneuro/neuroconv/pull/1128) ## Improvements -# v0.6.7 (January 20, 2024) +# v0.6.7 (January 20, 2025) ## Deprecations From 6d765a34a2f711a4afda80a9b4afc4bc1766c928 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Mon, 20 Jan 2025 16:32:45 -0600 Subject: [PATCH 08/20] fix tests --- .../deeplabcut/deeplabcutdatainterface.py | 2 + .../behavior/sleap/sleapdatainterface.py | 13 + .../behavior/test_behavior_interfaces.py | 532 ----------------- .../test_pose_estimation_interfaces.py | 565 ++++++++++++++++++ 4 files changed, 580 insertions(+), 532 deletions(-) create mode 100644 tests/test_on_data/behavior/test_pose_estimation_interfaces.py diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py index e42926c4d..ea9a7bab3 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py @@ -120,6 +120,8 @@ def add_to_nwbfile( """ from ._dlc_utils import _add_subject_to_nwbfile + self.pose_estimation_container_kwargs["name"] = container_name + _add_subject_to_nwbfile( nwbfile=nwbfile, file_path=str(self.source_data["file_path"]), diff --git a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py index 713b21c98..0841754bb 100644 --- a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py @@ -49,6 +49,19 @@ def __init__( frames_per_second : float, optional The frames per second (fps) or sampling rate of the video. """ + + # This import is to assure that the ndx_pose is in the global namespace when an pynwb.io object is created + # For more detail, see https://github.com/rly/ndx-pose/issues/36 + from importlib.metadata import version + + import ndx_pose # noqa: F401 + from packaging import version as version_parse + + ndx_pose_version = version("ndx-pose") + + if version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2.0"): + raise ImportError("The ndx-pose version must be less than 0.2.0.") + self.file_path = Path(file_path) self.sleap_io = get_package(package_name="sleap_io") self.video_file_path = video_file_path diff --git a/tests/test_on_data/behavior/test_behavior_interfaces.py b/tests/test_on_data/behavior/test_behavior_interfaces.py index fbf007a2c..2115e6dd4 100644 --- a/tests/test_on_data/behavior/test_behavior_interfaces.py +++ b/tests/test_on_data/behavior/test_behavior_interfaces.py @@ -1,30 +1,23 @@ -import sys import unittest from datetime import datetime, timezone from pathlib import Path import numpy as np -import pandas as pd import pytest -import sleap_io from hdmf.testing import TestCase from natsort import natsorted from ndx_miniscope import Miniscope from ndx_miniscope.utils import get_timestamps from numpy.testing import assert_array_equal -from parameterized import param, parameterized from pynwb import NWBHDF5IO from pynwb.behavior import Position, SpatialSeries from neuroconv import NWBConverter from neuroconv.datainterfaces import ( - DeepLabCutInterface, FicTracDataInterface, - LightningPoseDataInterface, MedPCInterface, MiniscopeBehaviorInterface, NeuralynxNvtInterface, - SLEAPInterface, VideoInterface, ) from neuroconv.tools.testing.data_interface_mixins import ( @@ -33,154 +26,12 @@ TemporalAlignmentMixin, VideoInterfaceMixin, ) -from neuroconv.utils import DeepDict try: from ..setup_paths import BEHAVIOR_DATA_PATH, OPHYS_DATA_PATH, OUTPUT_PATH except ImportError: from setup_paths import BEHAVIOR_DATA_PATH, OUTPUT_PATH -from importlib.metadata import version - -ndx_pose_version = version("ndx-pose") - - -class TestLightningPoseDataInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): - data_interface_cls = LightningPoseDataInterface - interface_kwargs = dict( - file_path=str(BEHAVIOR_DATA_PATH / "lightningpose" / "outputs/2023-11-09/10-14-37/video_preds/test_vid.csv"), - original_video_file_path=str( - BEHAVIOR_DATA_PATH / "lightningpose" / "outputs/2023-11-09/10-14-37/video_preds/test_vid.mp4" - ), - ) - conversion_options = dict(reference_frame="(0,0) corresponds to the top left corner of the video.") - save_directory = OUTPUT_PATH - - @pytest.fixture(scope="class", autouse=True) - def setup_metadata(self, request): - - cls = request.cls - - cls.pose_estimation_name = "PoseEstimation" - cls.original_video_height = 406 - cls.original_video_width = 396 - cls.expected_keypoint_names = [ - "paw1LH_top", - "paw2LF_top", - "paw3RF_top", - "paw4RH_top", - "tailBase_top", - "tailMid_top", - "nose_top", - "obs_top", - "paw1LH_bot", - "paw2LF_bot", - "paw3RF_bot", - "paw4RH_bot", - "tailBase_bot", - "tailMid_bot", - "nose_bot", - "obsHigh_bot", - "obsLow_bot", - ] - cls.expected_metadata = DeepDict( - PoseEstimation=dict( - name=cls.pose_estimation_name, - description="Contains the pose estimation series for each keypoint.", - scorer="heatmap_tracker", - source_software="LightningPose", - camera_name="CameraPoseEstimation", - ) - ) - cls.expected_metadata[cls.pose_estimation_name].update( - { - keypoint_name: dict( - name=f"PoseEstimationSeries{keypoint_name}", - description=f"The estimated position (x, y) of {keypoint_name} over time.", - ) - for keypoint_name in cls.expected_keypoint_names - } - ) - - cls.test_data = pd.read_csv(cls.interface_kwargs["file_path"], header=[0, 1, 2])["heatmap_tracker"] - - def check_extracted_metadata(self, metadata: dict): - assert metadata["NWBFile"]["session_start_time"] == datetime(2023, 11, 9, 10, 14, 37, 0) - assert self.pose_estimation_name in metadata["Behavior"] - assert metadata["Behavior"][self.pose_estimation_name] == self.expected_metadata[self.pose_estimation_name] - - def check_read_nwb(self, nwbfile_path: str): - from ndx_pose import PoseEstimation, PoseEstimationSeries - - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - - # Replacing assertIn with pytest-style assert - assert "behavior" in nwbfile.processing - assert self.pose_estimation_name in nwbfile.processing["behavior"].data_interfaces - - pose_estimation_container = nwbfile.processing["behavior"].data_interfaces[self.pose_estimation_name] - - # Replacing assertIsInstance with pytest-style assert - assert isinstance(pose_estimation_container, PoseEstimation) - - pose_estimation_metadata = self.expected_metadata[self.pose_estimation_name] - - # Replacing assertEqual with pytest-style assert - assert pose_estimation_container.description == pose_estimation_metadata["description"] - assert pose_estimation_container.scorer == pose_estimation_metadata["scorer"] - assert pose_estimation_container.source_software == pose_estimation_metadata["source_software"] - - # Using numpy's assert_array_equal - assert_array_equal( - pose_estimation_container.dimensions[:], [[self.original_video_height, self.original_video_width]] - ) - - # Replacing assertEqual with pytest-style assert - assert len(pose_estimation_container.pose_estimation_series) == len(self.expected_keypoint_names) - - for keypoint_name in self.expected_keypoint_names: - series_metadata = pose_estimation_metadata[keypoint_name] - - # Replacing assertIn with pytest-style assert - assert series_metadata["name"] in pose_estimation_container.pose_estimation_series - - pose_estimation_series = pose_estimation_container.pose_estimation_series[series_metadata["name"]] - - # Replacing assertIsInstance with pytest-style assert - assert isinstance(pose_estimation_series, PoseEstimationSeries) - - # Replacing assertEqual with pytest-style assert - assert pose_estimation_series.unit == "px" - assert pose_estimation_series.description == series_metadata["description"] - assert pose_estimation_series.reference_frame == self.conversion_options["reference_frame"] - - test_data = self.test_data[keypoint_name] - - # Using numpy's assert_array_equal - assert_array_equal(pose_estimation_series.data[:], test_data[["x", "y"]].values) - - -class TestLightningPoseDataInterfaceWithStubTest(DataInterfaceTestMixin, TemporalAlignmentMixin): - data_interface_cls = LightningPoseDataInterface - interface_kwargs = dict( - file_path=str(BEHAVIOR_DATA_PATH / "lightningpose" / "outputs/2023-11-09/10-14-37/video_preds/test_vid.csv"), - original_video_file_path=str( - BEHAVIOR_DATA_PATH / "lightningpose" / "outputs/2023-11-09/10-14-37/video_preds/test_vid.mp4" - ), - ) - - conversion_options = dict(stub_test=True) - save_directory = OUTPUT_PATH - - def check_read_nwb(self, nwbfile_path: str): - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - pose_estimation_container = nwbfile.processing["behavior"].data_interfaces["PoseEstimation"] - for pose_estimation_series in pose_estimation_container.pose_estimation_series.values(): - assert pose_estimation_series.data.shape[0] == 10 - assert pose_estimation_series.confidence.shape[0] == 10 - class TestFicTracDataInterface(DataInterfaceTestMixin): data_interface_cls = FicTracDataInterface @@ -326,274 +177,6 @@ class TestFicTracDataInterfaceTiming(TemporalAlignmentMixin): save_directory = OUTPUT_PATH -from platform import python_version - -from packaging import version - -python_version = version.parse(python_version()) -from sys import platform - - -@pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", -) -class TestDeepLabCutInterface(DataInterfaceTestMixin): - data_interface_cls = DeepLabCutInterface - interface_kwargs = dict( - file_path=str( - BEHAVIOR_DATA_PATH - / "DLC" - / "open_field_without_video" - / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" - ), - config_file_path=str(BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "config.yaml"), - subject_name="ind1", - ) - save_directory = OUTPUT_PATH - - def run_custom_checks(self): - self.check_renaming_instance(nwbfile_path=self.nwbfile_path) - - def check_renaming_instance(self, nwbfile_path: str): - custom_container_name = "TestPoseEstimation" - - metadata = self.interface.get_metadata() - metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) - - self.interface.run_conversion( - nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata, container_name=custom_container_name - ) - - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - assert "behavior" in nwbfile.processing - assert "PoseEstimationDeepLabCut" not in nwbfile.processing["behavior"].data_interfaces - assert custom_container_name in nwbfile.processing["behavior"].data_interfaces - - def check_read_nwb(self, nwbfile_path: str): - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - assert "behavior" in nwbfile.processing - processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces - assert "PoseEstimationDeepLabCut" in processing_module_interfaces - - pose_estimation_series_in_nwb = processing_module_interfaces[ - "PoseEstimationDeepLabCut" - ].pose_estimation_series - expected_pose_estimation_series = ["ind1_leftear", "ind1_rightear", "ind1_snout", "ind1_tailbase"] - - expected_pose_estimation_series_are_in_nwb_file = [ - pose_estimation in pose_estimation_series_in_nwb for pose_estimation in expected_pose_estimation_series - ] - - assert all(expected_pose_estimation_series_are_in_nwb_file) - - -@pytest.fixture -def clean_pose_extension_import(): - modules_to_remove = [m for m in sys.modules if m.startswith("ndx_pose")] - for module in modules_to_remove: - del sys.modules[module] - - -@pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", -) -def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp_path): - """ - Test that the DeepLabCutInterface writes correctly without importing the ndx-pose extension. - See issues: - https://github.com/catalystneuro/neuroconv/issues/1114 - https://github.com/rly/ndx-pose/issues/36 - - """ - - interface_kwargs = dict( - file_path=str( - BEHAVIOR_DATA_PATH - / "DLC" - / "open_field_without_video" - / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" - ), - config_file_path=str(BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "config.yaml"), - ) - - interface = DeepLabCutInterface(**interface_kwargs) - metadata = interface.get_metadata() - metadata["NWBFile"]["session_start_time"] = datetime(2023, 7, 24, 9, 30, 55, 440600, tzinfo=timezone.utc) - - nwbfile_path = tmp_path / "test.nwb" - interface.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True) - with NWBHDF5IO(path=nwbfile_path, mode="r") as io: - read_nwbfile = io.read() - pose_estimation_container = read_nwbfile.processing["behavior"]["PoseEstimation"] - - assert len(pose_estimation_container.fields) > 0 - - -@pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", -) -class TestDeepLabCutInterfaceNoConfigFile(DataInterfaceTestMixin): - data_interface_cls = DeepLabCutInterface - interface_kwargs = dict( - file_path=str( - BEHAVIOR_DATA_PATH - / "DLC" - / "open_field_without_video" - / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" - ), - config_file_path=None, - subject_name="ind1", - ) - save_directory = OUTPUT_PATH - - def check_read_nwb(self, nwbfile_path: str): - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - assert "behavior" in nwbfile.processing - processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces - assert "PoseEstimationDeepLabCut" in processing_module_interfaces - - pose_estimation_series_in_nwb = processing_module_interfaces[ - "PoseEstimationDeepLabCut" - ].pose_estimation_series - expected_pose_estimation_series = ["ind1_leftear", "ind1_rightear", "ind1_snout", "ind1_tailbase"] - - expected_pose_estimation_series_are_in_nwb_file = [ - pose_estimation in pose_estimation_series_in_nwb for pose_estimation in expected_pose_estimation_series - ] - - assert all(expected_pose_estimation_series_are_in_nwb_file) - - -@pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", -) -class TestDeepLabCutInterfaceSetTimestamps(DataInterfaceTestMixin): - data_interface_cls = DeepLabCutInterface - interface_kwargs = dict( - file_path=str( - BEHAVIOR_DATA_PATH - / "DLC" - / "open_field_without_video" - / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" - ), - config_file_path=str(BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "config.yaml"), - subject_name="ind1", - ) - - save_directory = OUTPUT_PATH - - def run_custom_checks(self): - self.check_custom_timestamps(nwbfile_path=self.nwbfile_path) - - def check_custom_timestamps(self, nwbfile_path: str): - custom_timestamps = np.concatenate( - (np.linspace(10, 110, 1000), np.linspace(150, 250, 1000), np.linspace(300, 400, 330)) - ) - - metadata = self.interface.get_metadata() - metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) - - self.interface.set_aligned_timestamps(custom_timestamps) - assert len(self.interface._timestamps) == 2330 - - self.interface.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True) - - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - assert "behavior" in nwbfile.processing - processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces - assert "PoseEstimationDeepLabCut" in processing_module_interfaces - - pose_estimation_series_in_nwb = processing_module_interfaces[ - "PoseEstimationDeepLabCut" - ].pose_estimation_series - - for pose_estimation in pose_estimation_series_in_nwb.values(): - pose_timestamps = pose_estimation.timestamps - np.testing.assert_array_equal(pose_timestamps, custom_timestamps) - - # This was tested in the other test - def check_read_nwb(self, nwbfile_path: str): - pass - - -@pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", -) -class TestDeepLabCutInterfaceFromCSV(DataInterfaceTestMixin): - data_interface_cls = DeepLabCutInterface - interface_kwargs = dict( - file_path=str( - BEHAVIOR_DATA_PATH - / "DLC" - / "SL18_csv" - / "SL18_D19_S01_F01_BOX_SLP_20230503_112642.1DLC_resnet50_SubLearnSleepBoxRedLightJun26shuffle1_100000_stubbed.csv" - ), - config_file_path=None, - subject_name="SL18", - ) - save_directory = OUTPUT_PATH - - def check_read_nwb(self, nwbfile_path: str): - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - assert "behavior" in nwbfile.processing - processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces - assert "PoseEstimation" in processing_module_interfaces - - pose_estimation_series_in_nwb = processing_module_interfaces["PoseEstimation"].pose_estimation_series - expected_pose_estimation_series = ["SL18_redled", "SL18_shoulder", "SL18_haunch", "SL18_baseoftail"] - - expected_pose_estimation_series_are_in_nwb_file = [ - pose_estimation in pose_estimation_series_in_nwb for pose_estimation in expected_pose_estimation_series - ] - - assert all(expected_pose_estimation_series_are_in_nwb_file) - - -class TestSLEAPInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): - data_interface_cls = SLEAPInterface - interface_kwargs = dict( - file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp"), - video_file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.mp4"), - ) - save_directory = OUTPUT_PATH - - def check_read_nwb(self, nwbfile_path: str): # This is currently structured to be file-specific - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - assert "SLEAP_VIDEO_000_20190128_113421" in nwbfile.processing - processing_module_interfaces = nwbfile.processing["SLEAP_VIDEO_000_20190128_113421"].data_interfaces - assert "track=track_0" in processing_module_interfaces - - pose_estimation_series_in_nwb = processing_module_interfaces["track=track_0"].pose_estimation_series - expected_pose_estimation_series = [ - "abdomen", - "eyeL", - "eyeR", - "forelegL4", - "forelegR4", - "head", - "hindlegL4", - "hindlegR4", - "midlegL4", - "midlegR4", - "thorax", - "wingL", - "wingR", - ] - - assert set(pose_estimation_series_in_nwb) == set(expected_pose_estimation_series) - - class TestMiniscopeInterface(DataInterfaceTestMixin): data_interface_cls = MiniscopeBehaviorInterface interface_kwargs = dict(folder_path=str(OPHYS_DATA_PATH / "imaging_datasets" / "Miniscope" / "C6-J588_Disc5")) @@ -673,121 +256,6 @@ def check_metadata(self): assert metadata["NWBFile"]["session_start_time"] == datetime(2023, 5, 15, 10, 35, 29) -class CustomTestSLEAPInterface(TestCase): - savedir = OUTPUT_PATH - - @parameterized.expand( - [ - param( - data_interface=SLEAPInterface, - interface_kwargs=dict( - file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp"), - ), - ) - ] - ) - def test_sleap_to_nwb_interface(self, data_interface, interface_kwargs): - nwbfile_path = str(self.savedir / f"{data_interface.__name__}.nwb") - - interface = SLEAPInterface(**interface_kwargs) - metadata = interface.get_metadata() - metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) - interface.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata) - - slp_predictions_path = interface_kwargs["file_path"] - labels = sleap_io.load_slp(slp_predictions_path) - - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - # Test matching number of processing modules - number_of_videos = len(labels.videos) - assert len(nwbfile.processing) == number_of_videos - - # Test processing module naming as video - processing_module_name = "SLEAP_VIDEO_000_20190128_113421" - assert processing_module_name in nwbfile.processing - - # For this case we have as many containers as tracks - # Each track usually represents a subject - processing_module = nwbfile.processing[processing_module_name] - processing_module_interfaces = processing_module.data_interfaces - assert len(processing_module_interfaces) == len(labels.tracks) - - # Test name of PoseEstimation containers - extracted_container_names = processing_module_interfaces.keys() - for track in labels.tracks: - expected_track_name = f"track={track.name}" - assert expected_track_name in extracted_container_names - - # Test one PoseEstimation container - container_name = f"track={track.name}" - pose_estimation_container = processing_module_interfaces[container_name] - # Test that the skeleton nodes are store as nodes in containers - expected_node_names = [node.name for node in labels.skeletons[0]] - assert expected_node_names == list(pose_estimation_container.nodes[:]) - - # Test that each PoseEstimationSeries is named as a node - for node_name in pose_estimation_container.nodes[:]: - assert node_name in pose_estimation_container.pose_estimation_series - - @parameterized.expand( - [ - param( - data_interface=SLEAPInterface, - interface_kwargs=dict( - file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.slp"), - video_file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.mp4"), - ), - ) - ] - ) - def test_sleap_interface_timestamps_propagation(self, data_interface, interface_kwargs): - nwbfile_path = str(self.savedir / f"{data_interface.__name__}.nwb") - - interface = SLEAPInterface(**interface_kwargs) - metadata = interface.get_metadata() - metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) - interface.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata) - - slp_predictions_path = interface_kwargs["file_path"] - labels = sleap_io.load_slp(slp_predictions_path) - - from neuroconv.datainterfaces.behavior.sleap.sleap_utils import ( - extract_timestamps, - ) - - expected_timestamps = set(extract_timestamps(interface_kwargs["video_file_path"])) - - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - # Test matching number of processing modules - number_of_videos = len(labels.videos) - assert len(nwbfile.processing) == number_of_videos - - # Test processing module naming as video - video_name = Path(labels.videos[0].filename).stem - processing_module_name = f"SLEAP_VIDEO_000_{video_name}" - - # For this case we have as many containers as tracks - processing_module_interfaces = nwbfile.processing[processing_module_name].data_interfaces - - extracted_container_names = processing_module_interfaces.keys() - for track in labels.tracks: - expected_track_name = f"track={track.name}" - assert expected_track_name in extracted_container_names - - container_name = f"track={track.name}" - pose_estimation_container = processing_module_interfaces[container_name] - - # Test that each PoseEstimationSeries is named as a node - for node_name in pose_estimation_container.nodes[:]: - pose_estimation_series = pose_estimation_container.pose_estimation_series[node_name] - extracted_timestamps = pose_estimation_series.timestamps[:] - - # Some frames do not have predictions associated with them, so we test for sub-set - assert set(extracted_timestamps).issubset(expected_timestamps) - - class TestVideoInterface(VideoInterfaceMixin): data_interface_cls = VideoInterface save_directory = OUTPUT_PATH diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py new file mode 100644 index 000000000..b0e6447e0 --- /dev/null +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -0,0 +1,565 @@ +import sys +import unittest +from datetime import datetime, timezone +from pathlib import Path + +import numpy as np +import pandas as pd +import pytest +import sleap_io +from hdmf.testing import TestCase +from numpy.testing import assert_array_equal +from parameterized import param, parameterized +from pynwb import NWBHDF5IO + +from neuroconv.datainterfaces import ( + DeepLabCutInterface, + LightningPoseDataInterface, + SLEAPInterface, +) +from neuroconv.tools.testing.data_interface_mixins import ( + DataInterfaceTestMixin, + TemporalAlignmentMixin, +) +from neuroconv.utils import DeepDict + +try: + from ..setup_paths import BEHAVIOR_DATA_PATH, OUTPUT_PATH +except ImportError: + from setup_paths import BEHAVIOR_DATA_PATH, OUTPUT_PATH + +from importlib.metadata import version as importlib_version + +from packaging import version + +ndx_pose_version = version.parse(importlib_version("ndx-pose")) + + +from platform import python_version + +from packaging import version + +python_version = version.parse(python_version()) +from sys import platform + + +class TestLightningPoseDataInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): + data_interface_cls = LightningPoseDataInterface + interface_kwargs = dict( + file_path=str(BEHAVIOR_DATA_PATH / "lightningpose" / "outputs/2023-11-09/10-14-37/video_preds/test_vid.csv"), + original_video_file_path=str( + BEHAVIOR_DATA_PATH / "lightningpose" / "outputs/2023-11-09/10-14-37/video_preds/test_vid.mp4" + ), + ) + conversion_options = dict(reference_frame="(0,0) corresponds to the top left corner of the video.") + save_directory = OUTPUT_PATH + + @pytest.fixture(scope="class", autouse=True) + def setup_metadata(self, request): + + cls = request.cls + + cls.pose_estimation_name = "PoseEstimation" + cls.original_video_height = 406 + cls.original_video_width = 396 + cls.expected_keypoint_names = [ + "paw1LH_top", + "paw2LF_top", + "paw3RF_top", + "paw4RH_top", + "tailBase_top", + "tailMid_top", + "nose_top", + "obs_top", + "paw1LH_bot", + "paw2LF_bot", + "paw3RF_bot", + "paw4RH_bot", + "tailBase_bot", + "tailMid_bot", + "nose_bot", + "obsHigh_bot", + "obsLow_bot", + ] + cls.expected_metadata = DeepDict( + PoseEstimation=dict( + name=cls.pose_estimation_name, + description="Contains the pose estimation series for each keypoint.", + scorer="heatmap_tracker", + source_software="LightningPose", + camera_name="CameraPoseEstimation", + ) + ) + cls.expected_metadata[cls.pose_estimation_name].update( + { + keypoint_name: dict( + name=f"PoseEstimationSeries{keypoint_name}", + description=f"The estimated position (x, y) of {keypoint_name} over time.", + ) + for keypoint_name in cls.expected_keypoint_names + } + ) + + cls.test_data = pd.read_csv(cls.interface_kwargs["file_path"], header=[0, 1, 2])["heatmap_tracker"] + + def check_extracted_metadata(self, metadata: dict): + assert metadata["NWBFile"]["session_start_time"] == datetime(2023, 11, 9, 10, 14, 37, 0) + assert self.pose_estimation_name in metadata["Behavior"] + assert metadata["Behavior"][self.pose_estimation_name] == self.expected_metadata[self.pose_estimation_name] + + def check_read_nwb(self, nwbfile_path: str): + from ndx_pose import PoseEstimation, PoseEstimationSeries + + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + + # Replacing assertIn with pytest-style assert + assert "behavior" in nwbfile.processing + assert self.pose_estimation_name in nwbfile.processing["behavior"].data_interfaces + + pose_estimation_container = nwbfile.processing["behavior"].data_interfaces[self.pose_estimation_name] + + # Replacing assertIsInstance with pytest-style assert + assert isinstance(pose_estimation_container, PoseEstimation) + + pose_estimation_metadata = self.expected_metadata[self.pose_estimation_name] + + # Replacing assertEqual with pytest-style assert + assert pose_estimation_container.description == pose_estimation_metadata["description"] + assert pose_estimation_container.scorer == pose_estimation_metadata["scorer"] + assert pose_estimation_container.source_software == pose_estimation_metadata["source_software"] + + # Using numpy's assert_array_equal + assert_array_equal( + pose_estimation_container.dimensions[:], [[self.original_video_height, self.original_video_width]] + ) + + # Replacing assertEqual with pytest-style assert + assert len(pose_estimation_container.pose_estimation_series) == len(self.expected_keypoint_names) + + for keypoint_name in self.expected_keypoint_names: + series_metadata = pose_estimation_metadata[keypoint_name] + + # Replacing assertIn with pytest-style assert + assert series_metadata["name"] in pose_estimation_container.pose_estimation_series + + pose_estimation_series = pose_estimation_container.pose_estimation_series[series_metadata["name"]] + + # Replacing assertIsInstance with pytest-style assert + assert isinstance(pose_estimation_series, PoseEstimationSeries) + + # Replacing assertEqual with pytest-style assert + assert pose_estimation_series.unit == "px" + assert pose_estimation_series.description == series_metadata["description"] + assert pose_estimation_series.reference_frame == self.conversion_options["reference_frame"] + + test_data = self.test_data[keypoint_name] + + # Using numpy's assert_array_equal + assert_array_equal(pose_estimation_series.data[:], test_data[["x", "y"]].values) + + +class TestLightningPoseDataInterfaceWithStubTest(DataInterfaceTestMixin, TemporalAlignmentMixin): + data_interface_cls = LightningPoseDataInterface + interface_kwargs = dict( + file_path=str(BEHAVIOR_DATA_PATH / "lightningpose" / "outputs/2023-11-09/10-14-37/video_preds/test_vid.csv"), + original_video_file_path=str( + BEHAVIOR_DATA_PATH / "lightningpose" / "outputs/2023-11-09/10-14-37/video_preds/test_vid.mp4" + ), + ) + + conversion_options = dict(stub_test=True) + save_directory = OUTPUT_PATH + + def check_read_nwb(self, nwbfile_path: str): + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + pose_estimation_container = nwbfile.processing["behavior"].data_interfaces["PoseEstimation"] + for pose_estimation_series in pose_estimation_container.pose_estimation_series.values(): + assert pose_estimation_series.data.shape[0] == 10 + assert pose_estimation_series.confidence.shape[0] == 10 + + +@pytest.mark.skipif( + platform == "darwin" and python_version < version.parse("3.10"), + reason="interface not supported on macOS with Python < 3.10", +) +class TestDeepLabCutInterface(DataInterfaceTestMixin): + data_interface_cls = DeepLabCutInterface + interface_kwargs = dict( + file_path=str( + BEHAVIOR_DATA_PATH + / "DLC" + / "open_field_without_video" + / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" + ), + config_file_path=str(BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "config.yaml"), + subject_name="ind1", + ) + save_directory = OUTPUT_PATH + + def run_custom_checks(self): + self.check_renaming_instance(nwbfile_path=self.nwbfile_path) + + def check_renaming_instance(self, nwbfile_path: str): + custom_container_name = "TestPoseEstimation" + + metadata = self.interface.get_metadata() + metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) + + self.interface.run_conversion( + nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata, container_name=custom_container_name + ) + + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + assert "behavior" in nwbfile.processing + assert custom_container_name in nwbfile.processing["behavior"].data_interfaces + + def check_read_nwb(self, nwbfile_path: str): + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + assert "behavior" in nwbfile.processing + processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces + assert "PoseEstimationDeepLabCut" in processing_module_interfaces + + pose_estimation_series_in_nwb = processing_module_interfaces[ + "PoseEstimationDeepLabCut" + ].pose_estimation_series + expected_pose_estimation_series = ["ind1_leftear", "ind1_rightear", "ind1_snout", "ind1_tailbase"] + + expected_pose_estimation_series_are_in_nwb_file = [ + pose_estimation in pose_estimation_series_in_nwb for pose_estimation in expected_pose_estimation_series + ] + + assert all(expected_pose_estimation_series_are_in_nwb_file) + + +@pytest.fixture +def clean_pose_extension_import(): + modules_to_remove = [m for m in sys.modules if m.startswith("ndx_pose")] + for module in modules_to_remove: + del sys.modules[module] + + +@pytest.mark.skipif( + platform == "darwin" and python_version < version.parse("3.10"), + reason="interface not supported on macOS with Python < 3.10", +) +def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp_path): + """ + Test that the DeepLabCutInterface writes correctly without importing the ndx-pose extension. + See issues: + https://github.com/catalystneuro/neuroconv/issues/1114 + https://github.com/rly/ndx-pose/issues/36 + + """ + + interface_kwargs = dict( + file_path=str( + BEHAVIOR_DATA_PATH + / "DLC" + / "open_field_without_video" + / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" + ), + config_file_path=str(BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "config.yaml"), + ) + + interface = DeepLabCutInterface(**interface_kwargs) + metadata = interface.get_metadata() + metadata["NWBFile"]["session_start_time"] = datetime(2023, 7, 24, 9, 30, 55, 440600, tzinfo=timezone.utc) + + nwbfile_path = tmp_path / "test.nwb" + interface.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True) + with NWBHDF5IO(path=nwbfile_path, mode="r") as io: + read_nwbfile = io.read() + pose_estimation_container = read_nwbfile.processing["behavior"]["PoseEstimation"] + + assert len(pose_estimation_container.fields) > 0 + + +@pytest.mark.skipif( + platform == "darwin" and python_version < version.parse("3.10"), + reason="interface not supported on macOS with Python < 3.10", +) +class TestDeepLabCutInterfaceNoConfigFile(DataInterfaceTestMixin): + data_interface_cls = DeepLabCutInterface + interface_kwargs = dict( + file_path=str( + BEHAVIOR_DATA_PATH + / "DLC" + / "open_field_without_video" + / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" + ), + config_file_path=None, + subject_name="ind1", + ) + save_directory = OUTPUT_PATH + + def check_read_nwb(self, nwbfile_path: str): + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + assert "behavior" in nwbfile.processing + processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces + assert "PoseEstimationDeepLabCut" in processing_module_interfaces + + pose_estimation_series_in_nwb = processing_module_interfaces[ + "PoseEstimationDeepLabCut" + ].pose_estimation_series + expected_pose_estimation_series = ["ind1_leftear", "ind1_rightear", "ind1_snout", "ind1_tailbase"] + + expected_pose_estimation_series_are_in_nwb_file = [ + pose_estimation in pose_estimation_series_in_nwb for pose_estimation in expected_pose_estimation_series + ] + + assert all(expected_pose_estimation_series_are_in_nwb_file) + + +@pytest.mark.skipif( + platform == "darwin" and python_version < version.parse("3.10"), + reason="interface not supported on macOS with Python < 3.10", +) +class TestDeepLabCutInterfaceSetTimestamps(DataInterfaceTestMixin): + data_interface_cls = DeepLabCutInterface + interface_kwargs = dict( + file_path=str( + BEHAVIOR_DATA_PATH + / "DLC" + / "open_field_without_video" + / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" + ), + config_file_path=str(BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "config.yaml"), + subject_name="ind1", + ) + + save_directory = OUTPUT_PATH + + def run_custom_checks(self): + self.check_custom_timestamps(nwbfile_path=self.nwbfile_path) + + def check_custom_timestamps(self, nwbfile_path: str): + custom_timestamps = np.concatenate( + (np.linspace(10, 110, 1000), np.linspace(150, 250, 1000), np.linspace(300, 400, 330)) + ) + + metadata = self.interface.get_metadata() + metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) + + self.interface.set_aligned_timestamps(custom_timestamps) + assert len(self.interface._timestamps) == 2330 + + self.interface.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True) + + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + assert "behavior" in nwbfile.processing + processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces + assert "PoseEstimationDeepLabCut" in processing_module_interfaces + + pose_estimation_series_in_nwb = processing_module_interfaces[ + "PoseEstimationDeepLabCut" + ].pose_estimation_series + + for pose_estimation in pose_estimation_series_in_nwb.values(): + pose_timestamps = pose_estimation.timestamps + np.testing.assert_array_equal(pose_timestamps, custom_timestamps) + + # This was tested in the other test + def check_read_nwb(self, nwbfile_path: str): + pass + + +@pytest.mark.skipif( + platform == "darwin" and python_version < version.parse("3.10"), + reason="interface not supported on macOS with Python < 3.10", +) +class TestDeepLabCutInterfaceFromCSV(DataInterfaceTestMixin): + data_interface_cls = DeepLabCutInterface + interface_kwargs = dict( + file_path=str( + BEHAVIOR_DATA_PATH + / "DLC" + / "SL18_csv" + / "SL18_D19_S01_F01_BOX_SLP_20230503_112642.1DLC_resnet50_SubLearnSleepBoxRedLightJun26shuffle1_100000_stubbed.csv" + ), + config_file_path=None, + subject_name="SL18", + ) + save_directory = OUTPUT_PATH + + def check_read_nwb(self, nwbfile_path: str): + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + assert "behavior" in nwbfile.processing + processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces + assert "PoseEstimation" in processing_module_interfaces + + pose_estimation_series_in_nwb = processing_module_interfaces["PoseEstimation"].pose_estimation_series + expected_pose_estimation_series = ["SL18_redled", "SL18_shoulder", "SL18_haunch", "SL18_baseoftail"] + + expected_pose_estimation_series_are_in_nwb_file = [ + pose_estimation in pose_estimation_series_in_nwb for pose_estimation in expected_pose_estimation_series + ] + + assert all(expected_pose_estimation_series_are_in_nwb_file) + + +@pytest.mark.skipif( + ndx_pose_version < version.parse("2.0.0"), reason="SLEAPInterface requires ndx-pose version < 2.0.0" +) +class TestSLEAPInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): + + data_interface_cls = SLEAPInterface + interface_kwargs = dict( + file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp"), + video_file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.mp4"), + ) + save_directory = OUTPUT_PATH + + def check_read_nwb(self, nwbfile_path: str): # This is currently structured to be file-specific + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + assert "SLEAP_VIDEO_000_20190128_113421" in nwbfile.processing + processing_module_interfaces = nwbfile.processing["SLEAP_VIDEO_000_20190128_113421"].data_interfaces + assert "track=track_0" in processing_module_interfaces + + pose_estimation_series_in_nwb = processing_module_interfaces["track=track_0"].pose_estimation_series + expected_pose_estimation_series = [ + "abdomen", + "eyeL", + "eyeR", + "forelegL4", + "forelegR4", + "head", + "hindlegL4", + "hindlegR4", + "midlegL4", + "midlegR4", + "thorax", + "wingL", + "wingR", + ] + + assert set(pose_estimation_series_in_nwb) == set(expected_pose_estimation_series) + + +@pytest.mark.skipif( + ndx_pose_version < version.parse("2.0.0"), reason="SLEAPInterface requires ndx-pose version < 2.0.0" +) +class CustomTestSLEAPInterface(TestCase): + savedir = OUTPUT_PATH + + @parameterized.expand( + [ + param( + data_interface=SLEAPInterface, + interface_kwargs=dict( + file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp"), + ), + ) + ] + ) + def test_sleap_to_nwb_interface(self, data_interface, interface_kwargs): + nwbfile_path = str(self.savedir / f"{data_interface.__name__}.nwb") + + interface = SLEAPInterface(**interface_kwargs) + metadata = interface.get_metadata() + metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) + interface.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata) + + slp_predictions_path = interface_kwargs["file_path"] + labels = sleap_io.load_slp(slp_predictions_path) + + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + # Test matching number of processing modules + number_of_videos = len(labels.videos) + assert len(nwbfile.processing) == number_of_videos + + # Test processing module naming as video + processing_module_name = "SLEAP_VIDEO_000_20190128_113421" + assert processing_module_name in nwbfile.processing + + # For this case we have as many containers as tracks + # Each track usually represents a subject + processing_module = nwbfile.processing[processing_module_name] + processing_module_interfaces = processing_module.data_interfaces + assert len(processing_module_interfaces) == len(labels.tracks) + + # Test name of PoseEstimation containers + extracted_container_names = processing_module_interfaces.keys() + for track in labels.tracks: + expected_track_name = f"track={track.name}" + assert expected_track_name in extracted_container_names + + # Test one PoseEstimation container + container_name = f"track={track.name}" + pose_estimation_container = processing_module_interfaces[container_name] + # Test that the skeleton nodes are store as nodes in containers + expected_node_names = [node.name for node in labels.skeletons[0]] + assert expected_node_names == list(pose_estimation_container.nodes[:]) + + # Test that each PoseEstimationSeries is named as a node + for node_name in pose_estimation_container.nodes[:]: + assert node_name in pose_estimation_container.pose_estimation_series + + @parameterized.expand( + [ + param( + data_interface=SLEAPInterface, + interface_kwargs=dict( + file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.slp"), + video_file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.mp4"), + ), + ) + ] + ) + def test_sleap_interface_timestamps_propagation(self, data_interface, interface_kwargs): + nwbfile_path = str(self.savedir / f"{data_interface.__name__}.nwb") + + interface = SLEAPInterface(**interface_kwargs) + metadata = interface.get_metadata() + metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) + interface.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata) + + slp_predictions_path = interface_kwargs["file_path"] + labels = sleap_io.load_slp(slp_predictions_path) + + from neuroconv.datainterfaces.behavior.sleap.sleap_utils import ( + extract_timestamps, + ) + + expected_timestamps = set(extract_timestamps(interface_kwargs["video_file_path"])) + + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + # Test matching number of processing modules + number_of_videos = len(labels.videos) + assert len(nwbfile.processing) == number_of_videos + + # Test processing module naming as video + video_name = Path(labels.videos[0].filename).stem + processing_module_name = f"SLEAP_VIDEO_000_{video_name}" + + # For this case we have as many containers as tracks + processing_module_interfaces = nwbfile.processing[processing_module_name].data_interfaces + + extracted_container_names = processing_module_interfaces.keys() + for track in labels.tracks: + expected_track_name = f"track={track.name}" + assert expected_track_name in extracted_container_names + + container_name = f"track={track.name}" + pose_estimation_container = processing_module_interfaces[container_name] + + # Test that each PoseEstimationSeries is named as a node + for node_name in pose_estimation_container.nodes[:]: + pose_estimation_series = pose_estimation_container.pose_estimation_series[node_name] + extracted_timestamps = pose_estimation_series.timestamps[:] + + # Some frames do not have predictions associated with them, so we test for sub-set + assert set(extracted_timestamps).issubset(expected_timestamps) + + +if __name__ == "__main__": + unittest.main() From 816f27bf5a18b2404b5130cfeff3707b64d3da26 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Mon, 20 Jan 2025 19:28:01 -0600 Subject: [PATCH 09/20] fix pose estimation test --- .../behavior/test_pose_estimation_interfaces.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py index b0e6447e0..4c210c91b 100644 --- a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -392,9 +392,11 @@ def check_read_nwb(self, nwbfile_path: str): nwbfile = io.read() assert "behavior" in nwbfile.processing processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces - assert "PoseEstimation" in processing_module_interfaces + assert "PoseEstimationDeepLabCut" in processing_module_interfaces - pose_estimation_series_in_nwb = processing_module_interfaces["PoseEstimation"].pose_estimation_series + pose_estimation_series_in_nwb = processing_module_interfaces[ + "PoseEstimationDeepLabCut" + ].pose_estimation_series expected_pose_estimation_series = ["SL18_redled", "SL18_shoulder", "SL18_haunch", "SL18_baseoftail"] expected_pose_estimation_series_are_in_nwb_file = [ From 887428de15ff826f4d9a9934d02703c16edc1d84 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 21 Jan 2025 08:49:37 -0600 Subject: [PATCH 10/20] modify doctests --- .../combinations/ecephys_pose_estimation.rst | 14 +++++++------- docs/conversion_examples_gallery/conftest.py | 8 +++++++- .../behavior/test_pose_estimation_interfaces.py | 2 +- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/docs/conversion_examples_gallery/combinations/ecephys_pose_estimation.rst b/docs/conversion_examples_gallery/combinations/ecephys_pose_estimation.rst index 270431c05..37da0da9e 100644 --- a/docs/conversion_examples_gallery/combinations/ecephys_pose_estimation.rst +++ b/docs/conversion_examples_gallery/combinations/ecephys_pose_estimation.rst @@ -2,10 +2,10 @@ Electrophysiology and Behavior ------------------------------ This example showcases a conversion that combines two modalities of data electrophysiology and behavior in the form of pose estimation. -For this specific example were are combining a OpenEphys recording with KiloSort sorting results and PoseEstimation from sleap using the +For this specific example were are combining a OpenEphys recording with KiloSort sorting results and PoseEstimation from DeepLabCut using the :py:class:`~neuroconv.datainterfaces.ecephys.blackrock.blackrockdatainterface.BlackrockRecordingInterface`, :py:class:`~neuroconv.datainterfaces.ecephys.kilosort.kilosortdatainterface.KiloSortSortingInterface`, and -:py:class:`~neuroconv.datainterfaces.behavior.sleap.sleapdatainterface.SLEAPInterface`. classes. +:py:class:`~neuroconv.datainterfaces.behavior.deeplabcut.deeplabcutdatainterface.DeepLabCutInterface`. classes. .. code-block:: python @@ -14,7 +14,7 @@ For this specific example were are combining a OpenEphys recording with KiloSort >>> from zoneinfo import ZoneInfo >>> from pathlib import Path >>> from neuroconv import ConverterPipe - >>> from neuroconv.datainterfaces import BlackrockRecordingInterface, KiloSortSortingInterface, SLEAPInterface + >>> from neuroconv.datainterfaces import BlackrockRecordingInterface, KiloSortSortingInterface, DeepLabCutInterface >>> >>> >>> file_path = f"{ECEPHY_DATA_PATH}/blackrock/FileSpec2.3001.ns5" @@ -25,13 +25,13 @@ For this specific example were are combining a OpenEphys recording with KiloSort >>> # Change the folder_path to the location of the data in your system >>> interface_kilosort = KiloSortSortingInterface(folder_path=folder_path, verbose=False) >>> - >>> # Change the file_path so it points to the slp file in your system - >>> file_path = BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp" - >>> interface_sleap = SLEAPInterface(file_path=file_path, verbose=False) + >>> # Change the file_path and config_file_path to point to your DeepLabCut files + >>> file_path = BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" + >>> interface_dlc = DeepLabCutInterface(file_path=file_path, verbose=False) >>> >>> # Now that we have defined the two interfaces we pass them to the ConverterPipe which will coordinate the >>> # concurrent conversion of the data - >>> converter = ConverterPipe(data_interfaces=[interface_blackrock, interface_kilosort, interface_sleap], verbose=False) + >>> converter = ConverterPipe(data_interfaces=[interface_blackrock, interface_kilosort, interface_dlc], verbose=False) >>> >>> # Extract what metadata we can from the source files >>> metadata = converter.get_metadata() diff --git a/docs/conversion_examples_gallery/conftest.py b/docs/conversion_examples_gallery/conftest.py index 6618d6d52..99ad152b3 100644 --- a/docs/conversion_examples_gallery/conftest.py +++ b/docs/conversion_examples_gallery/conftest.py @@ -1,4 +1,5 @@ import platform +from importlib.metadata import version as importlib_version from pathlib import Path import pytest @@ -29,9 +30,14 @@ def add_data_space(doctest_namespace, tmp_path): # Hook to conditionally skip doctests in deeplabcut.rst for Python 3.9 on macOS (Darwin) def pytest_runtest_setup(item): if isinstance(item, pytest.DoctestItem): - # Check if we are running the doctest from deeplabcut.rst test_file = Path(item.fspath) + # Check if we are running the doctest from deeplabcut.rst if test_file.name == "deeplabcut.rst": # Check if Python version is 3.9 and platform is Darwin (macOS) if version.parse(python_version) < version.parse("3.10") and os == "Darwin": pytest.skip("Skipping doctests for deeplabcut.rst on Python 3.9 and macOS") + # Check if we are running the doctest from sleap.rst + elif test_file.name == "sleap.rst": + ndx_pose_version = version.parse(importlib_version("ndx-pose")) + if ndx_pose_version >= version.parse("2.0.0"): + pytest.skip("Skipping doctests for sleap.rst: only run when ndx-pose version < 2.0.0") diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py index 4c210c91b..047e8a57e 100644 --- a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -273,7 +273,7 @@ def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp interface.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True) with NWBHDF5IO(path=nwbfile_path, mode="r") as io: read_nwbfile = io.read() - pose_estimation_container = read_nwbfile.processing["behavior"]["PoseEstimation"] + pose_estimation_container = read_nwbfile.processing["behavior"]["PoseEstimationDeepLabCut"] assert len(pose_estimation_container.fields) > 0 From a08ddea6208e785e38a5fe5d65a3bd58b912fb5b Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 21 Jan 2025 08:55:58 -0600 Subject: [PATCH 11/20] add the rest of the testing --- .github/workflows/testing.yml | 5 ++++ .../test_pose_estimation_interfaces.py | 26 ++++++++++--------- 2 files changed, 19 insertions(+), 12 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index d8c5bb9fd..dc5625ec9 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -101,6 +101,11 @@ jobs: - name: Install full requirements run: pip install .[full] + - name: Run Sleap Tests until sleap.io adds support for ndx-pose > 2.0 + run : | + pip install ndx-pose==0.1.1 + pytest tests/test_on_data/behavior/test_pose_estimation_interfaces.py + - name: Run full pytest with coverage run: pytest -vv -rsx -n auto --dist loadscope --cov=neuroconv --cov-report xml:./codecov.xml - name: Upload full coverage to Codecov diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py index 047e8a57e..f893cf527 100644 --- a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -43,6 +43,7 @@ from sys import platform +@pytest.mark.skipif(ndx_pose_version < version.parse("2.0.0"), reason="Interface requires ndx-pose version >= 2.0.0") class TestLightningPoseDataInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): data_interface_cls = LightningPoseDataInterface interface_kwargs = dict( @@ -159,6 +160,7 @@ def check_read_nwb(self, nwbfile_path: str): assert_array_equal(pose_estimation_series.data[:], test_data[["x", "y"]].values) +@pytest.mark.skipif(ndx_pose_version < version.parse("2.0.0"), reason="Interface requires ndx-pose version >= 2.0.0") class TestLightningPoseDataInterfaceWithStubTest(DataInterfaceTestMixin, TemporalAlignmentMixin): data_interface_cls = LightningPoseDataInterface interface_kwargs = dict( @@ -181,8 +183,8 @@ def check_read_nwb(self, nwbfile_path: str): @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), + reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", ) class TestDeepLabCutInterface(DataInterfaceTestMixin): data_interface_cls = DeepLabCutInterface @@ -243,8 +245,8 @@ def clean_pose_extension_import(): @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), + reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", ) def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp_path): """ @@ -279,8 +281,8 @@ def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), + reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", ) class TestDeepLabCutInterfaceNoConfigFile(DataInterfaceTestMixin): data_interface_cls = DeepLabCutInterface @@ -316,8 +318,8 @@ def check_read_nwb(self, nwbfile_path: str): @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), + reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", ) class TestDeepLabCutInterfaceSetTimestamps(DataInterfaceTestMixin): data_interface_cls = DeepLabCutInterface @@ -370,8 +372,8 @@ def check_read_nwb(self, nwbfile_path: str): @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10"), - reason="interface not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), + reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", ) class TestDeepLabCutInterfaceFromCSV(DataInterfaceTestMixin): data_interface_cls = DeepLabCutInterface @@ -407,7 +409,7 @@ def check_read_nwb(self, nwbfile_path: str): @pytest.mark.skipif( - ndx_pose_version < version.parse("2.0.0"), reason="SLEAPInterface requires ndx-pose version < 2.0.0" + ndx_pose_version >= version.parse("2.0.0"), reason="SLEAPInterface requires ndx-pose version < 2.0.0" ) class TestSLEAPInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): @@ -446,7 +448,7 @@ def check_read_nwb(self, nwbfile_path: str): # This is currently structured to @pytest.mark.skipif( - ndx_pose_version < version.parse("2.0.0"), reason="SLEAPInterface requires ndx-pose version < 2.0.0" + ndx_pose_version >= version.parse("2.0.0"), reason="SLEAPInterface requires ndx-pose version < 2.0.0" ) class CustomTestSLEAPInterface(TestCase): savedir = OUTPUT_PATH From d069e5eb14f23bf83e789b2f627a5fa16726a574 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 21 Jan 2025 09:09:10 -0600 Subject: [PATCH 12/20] add links to removal --- docs/conversion_examples_gallery/conftest.py | 1 + .../behavior/deeplabcut/deeplabcutdatainterface.py | 13 ++++++++++++- .../lightningpose/lightningposedatainterface.py | 11 +++++++++++ .../behavior/sleap/sleapdatainterface.py | 9 +++++++-- .../behavior/test_pose_estimation_interfaces.py | 10 +++------- 5 files changed, 34 insertions(+), 10 deletions(-) diff --git a/docs/conversion_examples_gallery/conftest.py b/docs/conversion_examples_gallery/conftest.py index 99ad152b3..ececada61 100644 --- a/docs/conversion_examples_gallery/conftest.py +++ b/docs/conversion_examples_gallery/conftest.py @@ -37,6 +37,7 @@ def pytest_runtest_setup(item): if version.parse(python_version) < version.parse("3.10") and os == "Darwin": pytest.skip("Skipping doctests for deeplabcut.rst on Python 3.9 and macOS") # Check if we are running the doctest from sleap.rst + # TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 elif test_file.name == "sleap.rst": ndx_pose_version = version.parse(importlib_version("ndx-pose")) if ndx_pose_version >= version.parse("2.0.0"): diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py index ea9a7bab3..6abea7372 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py @@ -48,7 +48,18 @@ def __init__( Controls verbosity. """ # This import is to assure that the ndx_pose is in the global namespace when an pynwb.io object is created - from ndx_pose import PoseEstimation, PoseEstimationSeries # noqa: F401 + from importlib.metadata import version + + import ndx_pose # noqa: F401 + from packaging import version as version_parse + + ndx_pose_version = version("ndx-pose") + if version_parse.parse(ndx_pose_version) < version_parse.parse("2.0.0"): + raise ImportError( + "DeepLabCut interface requires ndx-pose version 2.0.0 or later. " + f"Found version {ndx_pose_version}. Please upgrade: " + "pip install 'ndx-pose>=2.0.0'" + ) from ._dlc_utils import _read_config diff --git a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py index 31d5810d8..70dd05e77 100644 --- a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py @@ -84,7 +84,18 @@ def __init__( # This import is to assure that the ndx_pose is in the global namespace when an pynwb.io object is created # For more detail, see https://github.com/rly/ndx-pose/issues/36 + from importlib.metadata import version + import ndx_pose # noqa: F401 + from packaging import version as version_parse + + ndx_pose_version = version("ndx-pose") + if version_parse.parse(ndx_pose_version) < version_parse.parse("2.0.0"): + raise ImportError( + "LightningPose interface requires ndx-pose version 2.0.0 or later. " + f"Found version {ndx_pose_version}. Please upgrade: " + "pip install 'ndx-pose>=2.0.0'" + ) from neuroconv.datainterfaces.behavior.video.video_utils import ( VideoCaptureContext, diff --git a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py index 0841754bb..6cbb1ce54 100644 --- a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py @@ -59,8 +59,13 @@ def __init__( ndx_pose_version = version("ndx-pose") - if version_parse.parse(ndx_pose_version) >= version_parse.parse("0.2.0"): - raise ImportError("The ndx-pose version must be less than 0.2.0.") + # TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 + if version_parse.parse(ndx_pose_version) != version_parse.parse("0.1.1"): + raise ImportError( + "SLEAP interface requires ndx-pose version 0.1.1. " + f"Found version {ndx_pose_version}. Please install the required version: " + "pip install 'ndx-pose==0.1.1'" + ) self.file_path = Path(file_path) self.sleap_io = get_package(package_name="sleap_io") diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py index f893cf527..c0a471141 100644 --- a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -29,18 +29,14 @@ from setup_paths import BEHAVIOR_DATA_PATH, OUTPUT_PATH from importlib.metadata import version as importlib_version - -from packaging import version - -ndx_pose_version = version.parse(importlib_version("ndx-pose")) - - from platform import python_version +from sys import platform from packaging import version python_version = version.parse(python_version()) -from sys import platform +# TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 +ndx_pose_version = version.parse(importlib_version("ndx-pose")) @pytest.mark.skipif(ndx_pose_version < version.parse("2.0.0"), reason="Interface requires ndx-pose version >= 2.0.0") From a48c9e6cfe09bd44190cbd5630bd5cb6f3e40202 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 21 Jan 2025 09:16:34 -0600 Subject: [PATCH 13/20] restore sleap io test --- .github/workflows/testing.yml | 1 + .../combinations/ecephys_pose_estimation.rst | 14 +++++++------- docs/conversion_examples_gallery/conftest.py | 4 ++-- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index dc5625ec9..9d7db280c 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -101,6 +101,7 @@ jobs: - name: Install full requirements run: pip install .[full] + # TODO: remove this setp after this is merged https://github.com/talmolab/sleap-io/pull/143 - name: Run Sleap Tests until sleap.io adds support for ndx-pose > 2.0 run : | pip install ndx-pose==0.1.1 diff --git a/docs/conversion_examples_gallery/combinations/ecephys_pose_estimation.rst b/docs/conversion_examples_gallery/combinations/ecephys_pose_estimation.rst index 37da0da9e..270431c05 100644 --- a/docs/conversion_examples_gallery/combinations/ecephys_pose_estimation.rst +++ b/docs/conversion_examples_gallery/combinations/ecephys_pose_estimation.rst @@ -2,10 +2,10 @@ Electrophysiology and Behavior ------------------------------ This example showcases a conversion that combines two modalities of data electrophysiology and behavior in the form of pose estimation. -For this specific example were are combining a OpenEphys recording with KiloSort sorting results and PoseEstimation from DeepLabCut using the +For this specific example were are combining a OpenEphys recording with KiloSort sorting results and PoseEstimation from sleap using the :py:class:`~neuroconv.datainterfaces.ecephys.blackrock.blackrockdatainterface.BlackrockRecordingInterface`, :py:class:`~neuroconv.datainterfaces.ecephys.kilosort.kilosortdatainterface.KiloSortSortingInterface`, and -:py:class:`~neuroconv.datainterfaces.behavior.deeplabcut.deeplabcutdatainterface.DeepLabCutInterface`. classes. +:py:class:`~neuroconv.datainterfaces.behavior.sleap.sleapdatainterface.SLEAPInterface`. classes. .. code-block:: python @@ -14,7 +14,7 @@ For this specific example were are combining a OpenEphys recording with KiloSort >>> from zoneinfo import ZoneInfo >>> from pathlib import Path >>> from neuroconv import ConverterPipe - >>> from neuroconv.datainterfaces import BlackrockRecordingInterface, KiloSortSortingInterface, DeepLabCutInterface + >>> from neuroconv.datainterfaces import BlackrockRecordingInterface, KiloSortSortingInterface, SLEAPInterface >>> >>> >>> file_path = f"{ECEPHY_DATA_PATH}/blackrock/FileSpec2.3001.ns5" @@ -25,13 +25,13 @@ For this specific example were are combining a OpenEphys recording with KiloSort >>> # Change the folder_path to the location of the data in your system >>> interface_kilosort = KiloSortSortingInterface(folder_path=folder_path, verbose=False) >>> - >>> # Change the file_path and config_file_path to point to your DeepLabCut files - >>> file_path = BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" - >>> interface_dlc = DeepLabCutInterface(file_path=file_path, verbose=False) + >>> # Change the file_path so it points to the slp file in your system + >>> file_path = BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp" + >>> interface_sleap = SLEAPInterface(file_path=file_path, verbose=False) >>> >>> # Now that we have defined the two interfaces we pass them to the ConverterPipe which will coordinate the >>> # concurrent conversion of the data - >>> converter = ConverterPipe(data_interfaces=[interface_blackrock, interface_kilosort, interface_dlc], verbose=False) + >>> converter = ConverterPipe(data_interfaces=[interface_blackrock, interface_kilosort, interface_sleap], verbose=False) >>> >>> # Extract what metadata we can from the source files >>> metadata = converter.get_metadata() diff --git a/docs/conversion_examples_gallery/conftest.py b/docs/conversion_examples_gallery/conftest.py index ececada61..7cffc2132 100644 --- a/docs/conversion_examples_gallery/conftest.py +++ b/docs/conversion_examples_gallery/conftest.py @@ -38,7 +38,7 @@ def pytest_runtest_setup(item): pytest.skip("Skipping doctests for deeplabcut.rst on Python 3.9 and macOS") # Check if we are running the doctest from sleap.rst # TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 - elif test_file.name == "sleap.rst": + elif test_file.name == "ecephys_pose_estimation.rst": ndx_pose_version = version.parse(importlib_version("ndx-pose")) if ndx_pose_version >= version.parse("2.0.0"): - pytest.skip("Skipping doctests for sleap.rst: only run when ndx-pose version < 2.0.0") + pytest.skip("Skipping doctests because sleeps only run when ndx-pose version < 2.0.0") From 71cf0ad709414b68bbb9dc6ed62a6e752b44cc27 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 21 Jan 2025 10:08:46 -0600 Subject: [PATCH 14/20] testing --- .github/workflows/testing.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 9d7db280c..644329cbd 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -98,15 +98,15 @@ jobs: s3-gin-bucket: ${{ secrets.S3_GIN_BUCKET }} os: ${{ matrix.os }} - - name: Install full requirements - run: pip install .[full] - # TODO: remove this setp after this is merged https://github.com/talmolab/sleap-io/pull/143 - name: Run Sleap Tests until sleap.io adds support for ndx-pose > 2.0 run : | pip install ndx-pose==0.1.1 pytest tests/test_on_data/behavior/test_pose_estimation_interfaces.py + - name: Install full requirements + run: pip install .[full] + - name: Run full pytest with coverage run: pytest -vv -rsx -n auto --dist loadscope --cov=neuroconv --cov-report xml:./codecov.xml - name: Upload full coverage to Codecov From 62eb86469f07873f42d2fcbebc2d7ae22e8a4db3 Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 21 Jan 2025 11:13:34 -0600 Subject: [PATCH 15/20] fix version --- .../deeplabcut/deeplabcutdatainterface.py | 6 ++-- .../lightningposedatainterface.py | 6 ++-- .../test_pose_estimation_interfaces.py | 28 +++++++++---------- 3 files changed, 20 insertions(+), 20 deletions(-) diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py index 6abea7372..b81cb3197 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/deeplabcutdatainterface.py @@ -54,11 +54,11 @@ def __init__( from packaging import version as version_parse ndx_pose_version = version("ndx-pose") - if version_parse.parse(ndx_pose_version) < version_parse.parse("2.0.0"): + if version_parse.parse(ndx_pose_version) < version_parse.parse("0.2.0"): raise ImportError( - "DeepLabCut interface requires ndx-pose version 2.0.0 or later. " + "DeepLabCut interface requires ndx-pose version 0.2.0 or later. " f"Found version {ndx_pose_version}. Please upgrade: " - "pip install 'ndx-pose>=2.0.0'" + "pip install 'ndx-pose>=0.2.0'" ) from ._dlc_utils import _read_config diff --git a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py index 70dd05e77..f3021a16b 100644 --- a/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/lightningpose/lightningposedatainterface.py @@ -90,11 +90,11 @@ def __init__( from packaging import version as version_parse ndx_pose_version = version("ndx-pose") - if version_parse.parse(ndx_pose_version) < version_parse.parse("2.0.0"): + if version_parse.parse(ndx_pose_version) < version_parse.parse("0.2.0"): raise ImportError( - "LightningPose interface requires ndx-pose version 2.0.0 or later. " + "LightningPose interface requires ndx-pose version 0.2.0 or later. " f"Found version {ndx_pose_version}. Please upgrade: " - "pip install 'ndx-pose>=2.0.0'" + "pip install 'ndx-pose>=0.2.0'" ) from neuroconv.datainterfaces.behavior.video.video_utils import ( diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py index c0a471141..ef985b6db 100644 --- a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -39,7 +39,7 @@ ndx_pose_version = version.parse(importlib_version("ndx-pose")) -@pytest.mark.skipif(ndx_pose_version < version.parse("2.0.0"), reason="Interface requires ndx-pose version >= 2.0.0") +@pytest.mark.skipif(ndx_pose_version < version.parse("0.2.0"), reason="Interface requires ndx-pose version >= 0.2.0") class TestLightningPoseDataInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): data_interface_cls = LightningPoseDataInterface interface_kwargs = dict( @@ -156,7 +156,7 @@ def check_read_nwb(self, nwbfile_path: str): assert_array_equal(pose_estimation_series.data[:], test_data[["x", "y"]].values) -@pytest.mark.skipif(ndx_pose_version < version.parse("2.0.0"), reason="Interface requires ndx-pose version >= 2.0.0") +@pytest.mark.skipif(ndx_pose_version < version.parse("0.2.0"), reason="Interface requires ndx-pose version >= 0.2.0") class TestLightningPoseDataInterfaceWithStubTest(DataInterfaceTestMixin, TemporalAlignmentMixin): data_interface_cls = LightningPoseDataInterface interface_kwargs = dict( @@ -179,8 +179,8 @@ def check_read_nwb(self, nwbfile_path: str): @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), - reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("0.2.0"), + reason="Interface requires ndx-pose version >= 0.2.0 and not supported on macOS with Python < 3.10", ) class TestDeepLabCutInterface(DataInterfaceTestMixin): data_interface_cls = DeepLabCutInterface @@ -241,8 +241,8 @@ def clean_pose_extension_import(): @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), - reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("0.2.0"), + reason="Interface requires ndx-pose version >= 0.2.0 and not supported on macOS with Python < 3.10", ) def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp_path): """ @@ -277,8 +277,8 @@ def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), - reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("0.2.0"), + reason="Interface requires ndx-pose version >= 0.2.0 and not supported on macOS with Python < 3.10", ) class TestDeepLabCutInterfaceNoConfigFile(DataInterfaceTestMixin): data_interface_cls = DeepLabCutInterface @@ -314,8 +314,8 @@ def check_read_nwb(self, nwbfile_path: str): @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), - reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("0.2.0"), + reason="Interface requires ndx-pose version >= 0.2.0 and not supported on macOS with Python < 3.10", ) class TestDeepLabCutInterfaceSetTimestamps(DataInterfaceTestMixin): data_interface_cls = DeepLabCutInterface @@ -368,8 +368,8 @@ def check_read_nwb(self, nwbfile_path: str): @pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("2.0.0"), - reason="Interface requires ndx-pose version >= 2.0.0 and not supported on macOS with Python < 3.10", + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("0.2.0"), + reason="Interface requires ndx-pose version >= 0.2.0 and not supported on macOS with Python < 3.10", ) class TestDeepLabCutInterfaceFromCSV(DataInterfaceTestMixin): data_interface_cls = DeepLabCutInterface @@ -405,7 +405,7 @@ def check_read_nwb(self, nwbfile_path: str): @pytest.mark.skipif( - ndx_pose_version >= version.parse("2.0.0"), reason="SLEAPInterface requires ndx-pose version < 2.0.0" + ndx_pose_version >= version.parse("0.2.0"), reason="SLEAPInterface requires ndx-pose version < 0.2.0" ) class TestSLEAPInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): @@ -444,7 +444,7 @@ def check_read_nwb(self, nwbfile_path: str): # This is currently structured to @pytest.mark.skipif( - ndx_pose_version >= version.parse("2.0.0"), reason="SLEAPInterface requires ndx-pose version < 2.0.0" + ndx_pose_version >= version.parse("0.2.0"), reason="SLEAPInterface requires ndx-pose version < 0.2.0" ) class CustomTestSLEAPInterface(TestCase): savedir = OUTPUT_PATH From 9bdbbfe3d0f1cf19829c16a057526d49bae9877a Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Tue, 21 Jan 2025 13:03:14 -0600 Subject: [PATCH 16/20] skip docstest --- docs/conversion_examples_gallery/conftest.py | 8 ++++---- .../datainterfaces/behavior/sleap/sleapdatainterface.py | 2 +- .../behavior/test_pose_estimation_interfaces.py | 2 +- 3 files changed, 6 insertions(+), 6 deletions(-) diff --git a/docs/conversion_examples_gallery/conftest.py b/docs/conversion_examples_gallery/conftest.py index 7cffc2132..775eb4f9d 100644 --- a/docs/conversion_examples_gallery/conftest.py +++ b/docs/conversion_examples_gallery/conftest.py @@ -37,8 +37,8 @@ def pytest_runtest_setup(item): if version.parse(python_version) < version.parse("3.10") and os == "Darwin": pytest.skip("Skipping doctests for deeplabcut.rst on Python 3.9 and macOS") # Check if we are running the doctest from sleap.rst - # TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 - elif test_file.name == "ecephys_pose_estimation.rst": + # TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 and released + elif test_file.name in ["ecephys_pose_estimation.rst", "sleap.rst"]: ndx_pose_version = version.parse(importlib_version("ndx-pose")) - if ndx_pose_version >= version.parse("2.0.0"): - pytest.skip("Skipping doctests because sleeps only run when ndx-pose version < 2.0.0") + if ndx_pose_version >= version.parse("0.2.0"): + pytest.skip("Skipping doctests because sleeps only run when ndx-pose version < 0.2.0") diff --git a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py index 6cbb1ce54..343a38e55 100644 --- a/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py +++ b/src/neuroconv/datainterfaces/behavior/sleap/sleapdatainterface.py @@ -59,7 +59,7 @@ def __init__( ndx_pose_version = version("ndx-pose") - # TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 + # TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 and released if version_parse.parse(ndx_pose_version) != version_parse.parse("0.1.1"): raise ImportError( "SLEAP interface requires ndx-pose version 0.1.1. " diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py index ef985b6db..4ead0426b 100644 --- a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -35,7 +35,7 @@ from packaging import version python_version = version.parse(python_version()) -# TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 +# TODO: remove after this is merged https://github.com/talmolab/sleap-io/pull/143 and released ndx_pose_version = version.parse(importlib_version("ndx-pose")) From 814a2487964a1d3d4cdd9eae102495fe4009fd8c Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 23 Jan 2025 11:03:10 -0600 Subject: [PATCH 17/20] Update CHANGELOG.md Co-authored-by: Paul Adkisson --- CHANGELOG.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 5660707c4..6df9fd842 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -6,7 +6,7 @@ ## Features * Added `metadata` and `conversion_options` as arguments to `NWBConverter.temporally_align_data_interfaces` [PR #1162](https://github.com/catalystneuro/neuroconv/pull/1162) -* Use the latest version of ndx-pose for `DeepLabCutInterface` [PR #1128](https://github.com/catalystneuro/neuroconv/pull/1128) +* Use the latest version of ndx-pose for `DeepLabCutInterface` and `LightningPoseDataInterface` [PR #1128](https://github.com/catalystneuro/neuroconv/pull/1128) ## Improvements * Interfaces and converters now have `verbose=False` by default [PR #1153](https://github.com/catalystneuro/neuroconv/pull/1153) From f37e09bf417f8fee6e5052b995b51505935d3c7e Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 23 Jan 2025 11:12:59 -0600 Subject: [PATCH 18/20] use get_module --- .../behavior/deeplabcut/_dlc_utils.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py b/src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py index bf087afda..1c64c59a9 100644 --- a/src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py +++ b/src/neuroconv/datainterfaces/behavior/deeplabcut/_dlc_utils.py @@ -10,6 +10,8 @@ from pynwb import NWBFile from ruamel.yaml import YAML +from ....tools import get_module + def _read_config(config_file_path: FilePath) -> dict: """ @@ -290,21 +292,13 @@ def _write_pes_to_nwbfile( subject=subject, ) - # Create Skeletons container - if "behavior" not in nwbfile.processing: - behavior_processing_module = nwbfile.create_processing_module( - name="behavior", description="processed behavioral data" - ) + behavior_processing_module = get_module(nwbfile=nwbfile, name="behavior", description="processed behavioral data") + if "Skeletons" not in behavior_processing_module.data_interfaces: skeletons = Skeletons(skeletons=[skeleton]) behavior_processing_module.add(skeletons) else: - behavior_processing_module = nwbfile.processing["behavior"] - if "Skeletons" not in behavior_processing_module.data_interfaces: - skeletons = Skeletons(skeletons=[skeleton]) - behavior_processing_module.add(skeletons) - else: - skeletons = behavior_processing_module["Skeletons"] - skeletons.add_skeletons(skeleton) + skeletons = behavior_processing_module["Skeletons"] + skeletons.add_skeletons(skeleton) pose_estimation_series = [] for keypoint in keypoints: From 7e7cf055df41a837ece21800c7be21a88ed047cd Mon Sep 17 00:00:00 2001 From: Heberto Mayorquin Date: Thu, 23 Jan 2025 11:39:16 -0600 Subject: [PATCH 19/20] add skeleton to dlc --- src/neuroconv/basedatainterface.py | 4 +- .../test_pose_estimation_interfaces.py | 388 +++++++++--------- 2 files changed, 194 insertions(+), 198 deletions(-) diff --git a/src/neuroconv/basedatainterface.py b/src/neuroconv/basedatainterface.py index 64af908e3..95b80f6d7 100644 --- a/src/neuroconv/basedatainterface.py +++ b/src/neuroconv/basedatainterface.py @@ -212,9 +212,7 @@ def run_conversion( @staticmethod def get_default_backend_configuration( nwbfile: NWBFile, - # TODO: when all H5DataIO prewraps are gone, introduce Zarr safely - # backend: Union[Literal["hdf5", "zarr"]], - backend: Literal["hdf5"] = "hdf5", + backend: Literal["hdf5", "zarr"] = "hdf5", ) -> Union[HDF5BackendConfiguration, ZarrBackendConfiguration]: """ Fill and return a default backend configuration to serve as a starting point for further customization. diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py index 4ead0426b..b4ab26c52 100644 --- a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -1,5 +1,4 @@ import sys -import unittest from datetime import datetime, timezone from pathlib import Path @@ -178,6 +177,163 @@ def check_read_nwb(self, nwbfile_path: str): assert pose_estimation_series.confidence.shape[0] == 10 +@pytest.mark.skipif( + ndx_pose_version >= version.parse("0.2.0"), reason="SLEAPInterface requires ndx-pose version < 0.2.0" +) +class TestSLEAPInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): + + data_interface_cls = SLEAPInterface + interface_kwargs = dict( + file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp"), + video_file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.mp4"), + ) + save_directory = OUTPUT_PATH + + def check_read_nwb(self, nwbfile_path: str): # This is currently structured to be file-specific + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + assert "SLEAP_VIDEO_000_20190128_113421" in nwbfile.processing + processing_module_interfaces = nwbfile.processing["SLEAP_VIDEO_000_20190128_113421"].data_interfaces + assert "track=track_0" in processing_module_interfaces + + pose_estimation_series_in_nwb = processing_module_interfaces["track=track_0"].pose_estimation_series + expected_pose_estimation_series = [ + "abdomen", + "eyeL", + "eyeR", + "forelegL4", + "forelegR4", + "head", + "hindlegL4", + "hindlegR4", + "midlegL4", + "midlegR4", + "thorax", + "wingL", + "wingR", + ] + + assert set(pose_estimation_series_in_nwb) == set(expected_pose_estimation_series) + + +@pytest.mark.skipif( + ndx_pose_version >= version.parse("0.2.0"), reason="SLEAPInterface requires ndx-pose version < 0.2.0" +) +class CustomTestSLEAPInterface(TestCase): + savedir = OUTPUT_PATH + + @parameterized.expand( + [ + param( + data_interface=SLEAPInterface, + interface_kwargs=dict( + file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp"), + ), + ) + ] + ) + def test_sleap_to_nwb_interface(self, data_interface, interface_kwargs): + nwbfile_path = str(self.savedir / f"{data_interface.__name__}.nwb") + + interface = SLEAPInterface(**interface_kwargs) + metadata = interface.get_metadata() + metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) + interface.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata) + + slp_predictions_path = interface_kwargs["file_path"] + labels = sleap_io.load_slp(slp_predictions_path) + + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + # Test matching number of processing modules + number_of_videos = len(labels.videos) + assert len(nwbfile.processing) == number_of_videos + + # Test processing module naming as video + processing_module_name = "SLEAP_VIDEO_000_20190128_113421" + assert processing_module_name in nwbfile.processing + + # For this case we have as many containers as tracks + # Each track usually represents a subject + processing_module = nwbfile.processing[processing_module_name] + processing_module_interfaces = processing_module.data_interfaces + assert len(processing_module_interfaces) == len(labels.tracks) + + # Test name of PoseEstimation containers + extracted_container_names = processing_module_interfaces.keys() + for track in labels.tracks: + expected_track_name = f"track={track.name}" + assert expected_track_name in extracted_container_names + + # Test one PoseEstimation container + container_name = f"track={track.name}" + pose_estimation_container = processing_module_interfaces[container_name] + # Test that the skeleton nodes are store as nodes in containers + expected_node_names = [node.name for node in labels.skeletons[0]] + assert expected_node_names == list(pose_estimation_container.nodes[:]) + + # Test that each PoseEstimationSeries is named as a node + for node_name in pose_estimation_container.nodes[:]: + assert node_name in pose_estimation_container.pose_estimation_series + + @parameterized.expand( + [ + param( + data_interface=SLEAPInterface, + interface_kwargs=dict( + file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.slp"), + video_file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.mp4"), + ), + ) + ] + ) + def test_sleap_interface_timestamps_propagation(self, data_interface, interface_kwargs): + nwbfile_path = str(self.savedir / f"{data_interface.__name__}.nwb") + + interface = SLEAPInterface(**interface_kwargs) + metadata = interface.get_metadata() + metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) + interface.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata) + + slp_predictions_path = interface_kwargs["file_path"] + labels = sleap_io.load_slp(slp_predictions_path) + + from neuroconv.datainterfaces.behavior.sleap.sleap_utils import ( + extract_timestamps, + ) + + expected_timestamps = set(extract_timestamps(interface_kwargs["video_file_path"])) + + with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: + nwbfile = io.read() + # Test matching number of processing modules + number_of_videos = len(labels.videos) + assert len(nwbfile.processing) == number_of_videos + + # Test processing module naming as video + video_name = Path(labels.videos[0].filename).stem + processing_module_name = f"SLEAP_VIDEO_000_{video_name}" + + # For this case we have as many containers as tracks + processing_module_interfaces = nwbfile.processing[processing_module_name].data_interfaces + + extracted_container_names = processing_module_interfaces.keys() + for track in labels.tracks: + expected_track_name = f"track={track.name}" + assert expected_track_name in extracted_container_names + + container_name = f"track={track.name}" + pose_estimation_container = processing_module_interfaces[container_name] + + # Test that each PoseEstimationSeries is named as a node + for node_name in pose_estimation_container.nodes[:]: + pose_estimation_series = pose_estimation_container.pose_estimation_series[node_name] + extracted_timestamps = pose_estimation_series.timestamps[:] + + # Some frames do not have predictions associated with them, so we test for sub-set + assert set(extracted_timestamps).issubset(expected_timestamps) + + @pytest.mark.skipif( platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("0.2.0"), reason="Interface requires ndx-pose version >= 0.2.0 and not supported on macOS with Python < 3.10", @@ -220,10 +376,10 @@ def check_read_nwb(self, nwbfile_path: str): assert "behavior" in nwbfile.processing processing_module_interfaces = nwbfile.processing["behavior"].data_interfaces assert "PoseEstimationDeepLabCut" in processing_module_interfaces + assert "Skeletons" in processing_module_interfaces - pose_estimation_series_in_nwb = processing_module_interfaces[ - "PoseEstimationDeepLabCut" - ].pose_estimation_series + pose_estimation_container = processing_module_interfaces["PoseEstimationDeepLabCut"] + pose_estimation_series_in_nwb = pose_estimation_container.pose_estimation_series expected_pose_estimation_series = ["ind1_leftear", "ind1_rightear", "ind1_snout", "ind1_tailbase"] expected_pose_estimation_series_are_in_nwb_file = [ @@ -232,48 +388,8 @@ def check_read_nwb(self, nwbfile_path: str): assert all(expected_pose_estimation_series_are_in_nwb_file) - -@pytest.fixture -def clean_pose_extension_import(): - modules_to_remove = [m for m in sys.modules if m.startswith("ndx_pose")] - for module in modules_to_remove: - del sys.modules[module] - - -@pytest.mark.skipif( - platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("0.2.0"), - reason="Interface requires ndx-pose version >= 0.2.0 and not supported on macOS with Python < 3.10", -) -def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp_path): - """ - Test that the DeepLabCutInterface writes correctly without importing the ndx-pose extension. - See issues: - https://github.com/catalystneuro/neuroconv/issues/1114 - https://github.com/rly/ndx-pose/issues/36 - - """ - - interface_kwargs = dict( - file_path=str( - BEHAVIOR_DATA_PATH - / "DLC" - / "open_field_without_video" - / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" - ), - config_file_path=str(BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "config.yaml"), - ) - - interface = DeepLabCutInterface(**interface_kwargs) - metadata = interface.get_metadata() - metadata["NWBFile"]["session_start_time"] = datetime(2023, 7, 24, 9, 30, 55, 440600, tzinfo=timezone.utc) - - nwbfile_path = tmp_path / "test.nwb" - interface.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True) - with NWBHDF5IO(path=nwbfile_path, mode="r") as io: - read_nwbfile = io.read() - pose_estimation_container = read_nwbfile.processing["behavior"]["PoseEstimationDeepLabCut"] - - assert len(pose_estimation_container.fields) > 0 + skeleton = pose_estimation_container.skeleton + assert skeleton.nodes[:].tolist() == ["snout", "leftear", "rightear", "tailbase"] @pytest.mark.skipif( @@ -404,162 +520,44 @@ def check_read_nwb(self, nwbfile_path: str): assert all(expected_pose_estimation_series_are_in_nwb_file) -@pytest.mark.skipif( - ndx_pose_version >= version.parse("0.2.0"), reason="SLEAPInterface requires ndx-pose version < 0.2.0" -) -class TestSLEAPInterface(DataInterfaceTestMixin, TemporalAlignmentMixin): - - data_interface_cls = SLEAPInterface - interface_kwargs = dict( - file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp"), - video_file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.mp4"), - ) - save_directory = OUTPUT_PATH - - def check_read_nwb(self, nwbfile_path: str): # This is currently structured to be file-specific - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - assert "SLEAP_VIDEO_000_20190128_113421" in nwbfile.processing - processing_module_interfaces = nwbfile.processing["SLEAP_VIDEO_000_20190128_113421"].data_interfaces - assert "track=track_0" in processing_module_interfaces - - pose_estimation_series_in_nwb = processing_module_interfaces["track=track_0"].pose_estimation_series - expected_pose_estimation_series = [ - "abdomen", - "eyeL", - "eyeR", - "forelegL4", - "forelegR4", - "head", - "hindlegL4", - "hindlegR4", - "midlegL4", - "midlegR4", - "thorax", - "wingL", - "wingR", - ] - - assert set(pose_estimation_series_in_nwb) == set(expected_pose_estimation_series) +@pytest.fixture +def clean_pose_extension_import(): + modules_to_remove = [m for m in sys.modules if m.startswith("ndx_pose")] + for module in modules_to_remove: + del sys.modules[module] @pytest.mark.skipif( - ndx_pose_version >= version.parse("0.2.0"), reason="SLEAPInterface requires ndx-pose version < 0.2.0" + platform == "darwin" and python_version < version.parse("3.10") or ndx_pose_version < version.parse("0.2.0"), + reason="Interface requires ndx-pose version >= 0.2.0 and not supported on macOS with Python < 3.10", ) -class CustomTestSLEAPInterface(TestCase): - savedir = OUTPUT_PATH - - @parameterized.expand( - [ - param( - data_interface=SLEAPInterface, - interface_kwargs=dict( - file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "predictions_1.2.7_provenance_and_tracking.slp"), - ), - ) - ] - ) - def test_sleap_to_nwb_interface(self, data_interface, interface_kwargs): - nwbfile_path = str(self.savedir / f"{data_interface.__name__}.nwb") - - interface = SLEAPInterface(**interface_kwargs) - metadata = interface.get_metadata() - metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) - interface.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata) - - slp_predictions_path = interface_kwargs["file_path"] - labels = sleap_io.load_slp(slp_predictions_path) - - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - # Test matching number of processing modules - number_of_videos = len(labels.videos) - assert len(nwbfile.processing) == number_of_videos - - # Test processing module naming as video - processing_module_name = "SLEAP_VIDEO_000_20190128_113421" - assert processing_module_name in nwbfile.processing - - # For this case we have as many containers as tracks - # Each track usually represents a subject - processing_module = nwbfile.processing[processing_module_name] - processing_module_interfaces = processing_module.data_interfaces - assert len(processing_module_interfaces) == len(labels.tracks) - - # Test name of PoseEstimation containers - extracted_container_names = processing_module_interfaces.keys() - for track in labels.tracks: - expected_track_name = f"track={track.name}" - assert expected_track_name in extracted_container_names - - # Test one PoseEstimation container - container_name = f"track={track.name}" - pose_estimation_container = processing_module_interfaces[container_name] - # Test that the skeleton nodes are store as nodes in containers - expected_node_names = [node.name for node in labels.skeletons[0]] - assert expected_node_names == list(pose_estimation_container.nodes[:]) +def test_deep_lab_cut_import_pose_extension_bug(clean_pose_extension_import, tmp_path): + """ + Test that the DeepLabCutInterface writes correctly without importing the ndx-pose extension. + See issues: + https://github.com/catalystneuro/neuroconv/issues/1114 + https://github.com/rly/ndx-pose/issues/36 - # Test that each PoseEstimationSeries is named as a node - for node_name in pose_estimation_container.nodes[:]: - assert node_name in pose_estimation_container.pose_estimation_series + """ - @parameterized.expand( - [ - param( - data_interface=SLEAPInterface, - interface_kwargs=dict( - file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.slp"), - video_file_path=str(BEHAVIOR_DATA_PATH / "sleap" / "melanogaster_courtship.mp4"), - ), - ) - ] + interface_kwargs = dict( + file_path=str( + BEHAVIOR_DATA_PATH + / "DLC" + / "open_field_without_video" + / "m3v1mp4DLC_resnet50_openfieldAug20shuffle1_30000.h5" + ), + config_file_path=str(BEHAVIOR_DATA_PATH / "DLC" / "open_field_without_video" / "config.yaml"), ) - def test_sleap_interface_timestamps_propagation(self, data_interface, interface_kwargs): - nwbfile_path = str(self.savedir / f"{data_interface.__name__}.nwb") - interface = SLEAPInterface(**interface_kwargs) - metadata = interface.get_metadata() - metadata["NWBFile"].update(session_start_time=datetime.now().astimezone()) - interface.run_conversion(nwbfile_path=nwbfile_path, overwrite=True, metadata=metadata) - - slp_predictions_path = interface_kwargs["file_path"] - labels = sleap_io.load_slp(slp_predictions_path) - - from neuroconv.datainterfaces.behavior.sleap.sleap_utils import ( - extract_timestamps, - ) - - expected_timestamps = set(extract_timestamps(interface_kwargs["video_file_path"])) - - with NWBHDF5IO(path=nwbfile_path, mode="r", load_namespaces=True) as io: - nwbfile = io.read() - # Test matching number of processing modules - number_of_videos = len(labels.videos) - assert len(nwbfile.processing) == number_of_videos - - # Test processing module naming as video - video_name = Path(labels.videos[0].filename).stem - processing_module_name = f"SLEAP_VIDEO_000_{video_name}" - - # For this case we have as many containers as tracks - processing_module_interfaces = nwbfile.processing[processing_module_name].data_interfaces - - extracted_container_names = processing_module_interfaces.keys() - for track in labels.tracks: - expected_track_name = f"track={track.name}" - assert expected_track_name in extracted_container_names - - container_name = f"track={track.name}" - pose_estimation_container = processing_module_interfaces[container_name] - - # Test that each PoseEstimationSeries is named as a node - for node_name in pose_estimation_container.nodes[:]: - pose_estimation_series = pose_estimation_container.pose_estimation_series[node_name] - extracted_timestamps = pose_estimation_series.timestamps[:] - - # Some frames do not have predictions associated with them, so we test for sub-set - assert set(extracted_timestamps).issubset(expected_timestamps) + interface = DeepLabCutInterface(**interface_kwargs) + metadata = interface.get_metadata() + metadata["NWBFile"]["session_start_time"] = datetime(2023, 7, 24, 9, 30, 55, 440600, tzinfo=timezone.utc) + nwbfile_path = tmp_path / "test.nwb" + interface.run_conversion(nwbfile_path=nwbfile_path, metadata=metadata, overwrite=True) + with NWBHDF5IO(path=nwbfile_path, mode="r") as io: + read_nwbfile = io.read() + pose_estimation_container = read_nwbfile.processing["behavior"]["PoseEstimationDeepLabCut"] -if __name__ == "__main__": - unittest.main() + assert len(pose_estimation_container.fields) > 0 From 901bc35dbbabff61b1bd147e6c4b188c2d8a0b41 Mon Sep 17 00:00:00 2001 From: pauladkisson Date: Thu, 23 Jan 2025 09:51:51 -0800 Subject: [PATCH 20/20] added assertions for skeletons to lightning --- tests/test_on_data/behavior/test_pose_estimation_interfaces.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py index b4ab26c52..5dbaa4633 100644 --- a/tests/test_on_data/behavior/test_pose_estimation_interfaces.py +++ b/tests/test_on_data/behavior/test_pose_estimation_interfaces.py @@ -112,6 +112,7 @@ def check_read_nwb(self, nwbfile_path: str): # Replacing assertIn with pytest-style assert assert "behavior" in nwbfile.processing assert self.pose_estimation_name in nwbfile.processing["behavior"].data_interfaces + assert "Skeletons" in nwbfile.processing["behavior"].data_interfaces pose_estimation_container = nwbfile.processing["behavior"].data_interfaces[self.pose_estimation_name] @@ -133,6 +134,8 @@ def check_read_nwb(self, nwbfile_path: str): # Replacing assertEqual with pytest-style assert assert len(pose_estimation_container.pose_estimation_series) == len(self.expected_keypoint_names) + assert pose_estimation_container.skeleton.nodes[:].tolist() == self.expected_keypoint_names + for keypoint_name in self.expected_keypoint_names: series_metadata = pose_estimation_metadata[keypoint_name]