# ndx-depth-moseq Extension for NWB
ndx-depth-moseq is a standardized format for storing the output of [depth-moseq](https://dattalab.github.io/moseq2-website/index.html), an automatic motion sequencing algorithm, in NWB. Currently, this extension only supports the output of depth-moseq-extract, but will be extended as needed to cover the other types of depth-moseq outputs.
This extension consists of 3 new neurodata types:
- `DepthImageSeries`, which is a simple extension of `pynwb.image.ImageSeries` for depth video with a constant reference depth.
- `MoSeqExtractParameterGroup`, which stores all the various parameters from the depth-moseq-extract algorithm.
- `MoSeqExtractGroup`, which stores all the relevant depth-moseq outputs including the `DepthImageSeries`, `MoSeqExtractParameterGroup`, as well as various native neurodata types such as the `Position`.
## Installation
```
pip install ndx-depth-moseq
```
## Usage
```python
"""Example of usage with mock data."""
from datetime import datetime
from pytz import timezone
import numpy as np
from pynwb.image import GrayscaleImage, ImageMaskSeries
from pynwb import NWBFile, TimeSeries
from pynwb.behavior import (
CompassDirection,
Position,
SpatialSeries,
)
from ndx_depth_moseq import DepthImageSeries, MoSeqExtractGroup, MoSeqExtractParameterGroup
# Define mock data (this will be replaced with the actual data)
version = "0.1.0"
num_frames = 10
num_rows = 512
num_cols = 424
processed_depth_video = np.zeros((num_frames, num_rows, num_cols))
loglikelihood_video = np.zeros((num_frames, num_rows, num_cols))
timestamps = np.arange(num_frames)
background = np.zeros((num_rows, num_cols))
is_flipped = np.zeros(num_frames, dtype=bool)
roi = np.zeros((num_rows, num_cols))
true_depth = 1.0
kinematic_var_names = ['centroid_x_mm', 'centroid_y_mm', 'height_ave_mm', 'angle', 'velocity_2d_mm', 'velocity_3d_mm', 'velocity_theta', 'length_mm', 'width_mm', 'area_px', 'width_px', 'length_px']
kinematic_vars = {k: np.zeros(num_frames) for k in kinematic_var_names}
kinematic_vars['length_px'] += 1
kinematic_vars['width_px'] += 1
parameters = {
'angle_hampel_sig': np.array([3], dtype=np.int64)[0],
'angle_hampel_span': np.array([5], dtype=np.int64)[0],
'bg_roi_depth_range_min': np.array([0], dtype=np.int64)[0],
'bg_roi_depth_range_max': np.array([1000], dtype=np.int64)[0],
'bg_roi_dilate_x': np.array([10], dtype=np.int64)[0],
'bg_roi_dilate_y': np.array([10], dtype=np.int64)[0],
'bg_roi_fill_holes': True,
'bg_roi_gradient_filter': True,
'bg_roi_gradient_kernel': np.array([5], dtype=np.int64)[0],
'bg_roi_gradient_threshold': np.array([10], dtype=np.int64)[0],
'bg_roi_index': np.array([0], dtype=np.int64)[0],
'bg_roi_shape': 'ellipse',
'bg_roi_weight_area': np.array([0.5], dtype=np.float64)[0],
'bg_roi_weight_extent': np.array([0.5], dtype=np.float64)[0],
'bg_roi_weight_dist': np.array([0.5], dtype=np.float64)[0],
'cable_filter_iters': np.array([5], dtype=np.int64)[0],
'cable_filter_shape': 'ellipse',
'cable_filter_size_x': np.array([5], dtype=np.int64)[0],
'cable_filter_size_y': np.array([5], dtype=np.int64)[0],
'centroid_hampel_sig': np.array([3], dtype=np.int64)[0],
'centroid_hampel_span': np.array([5], dtype=np.int64)[0],
'chunk_overlap': np.array([0], dtype=np.int64)[0],
'chunk_size': np.array([100], dtype=np.int64)[0],
'compress': False,
'compress_chunk_size': np.array([100], dtype=np.int64)[0],
'compress_threads': np.array([1], dtype=np.int64)[0],
'config_file': 'config.yaml',
'crop_size_width': np.array([512], dtype=np.int64)[0],
'crop_size_height': np.array([424], dtype=np.int64)[0],
'flip_classifier': 'flip_classifier.pkl',
'flip_classifier_smoothing': np.array([5], dtype=np.int64)[0],
'fps': np.array([30], dtype=np.int64)[0],
'frame_dtype': 'uint16',
'frame_trim_beginning': np.array([0], dtype=np.int64)[0],
'frame_trim_end': np.array([0], dtype=np.int64)[0],
'max_height': np.array([1000], dtype=np.int64)[0],
'min_height': np.array([0], dtype=np.int64)[0],
'model_smoothing_clips_x': np.array([5], dtype=np.int64)[0],
'model_smoothing_clips_y': np.array([5], dtype=np.int64)[0],
'spatial_filter_size': np.array([5], dtype=np.int64)[0],
'tail_filter_iters': np.array([5], dtype=np.int64)[0],
'tail_filter_shape': 'ellipse',
'tail_filter_size_x': np.array([5], dtype=np.int64)[0],
'tail_filter_size_y': np.array([5], dtype=np.int64)[0],
'temporal_filter_size': np.array([5], dtype=np.int64)[0],
'tracking_model_init': 'mean',
'tracking_model_ll_clip': np.array([5], dtype=np.int64)[0],
'tracking_model_ll_threshold': np.array([5], dtype=np.int64)[0],
'tracking_model_mask_threshold': np.array([5], dtype=np.int64)[0],
'tracking_model_segment': True,
'use_plane_bground': True,
'use_tracking_model': True,
'write_movie': False,
}
# Create the NWB file
nwbfile = NWBFile(
session_description="session_description",
identifier="identifier",
session_start_time=datetime.now(timezone("US/Pacific")),
)
# Add Imaging Data
kinect = nwbfile.create_device(name="kinect", manufacturer="Microsoft", description="Microsoft Kinect 2")
flipped_series = TimeSeries(
name="flipped_series",
data=is_flipped,
unit="a.u.",
timestamps=timestamps,
description="Boolean array indicating whether the image was flipped left/right",
)
processed_depth_video = DepthImageSeries(
name="processed_depth_video",
data=processed_depth_video,
unit="millimeters",
format="raw",
timestamps=flipped_series.timestamps,
description="3D array of depth frames (nframes x w x h, in mm)",
distant_depth=true_depth,
device=kinect,
)
loglikelihood_video = ImageMaskSeries(
name="loglikelihood_video",
data=loglikelihood_video,
masked_imageseries=processed_depth_video,
unit="a.u.",
format="raw",
timestamps=flipped_series.timestamps,
description="Log-likelihood values from the tracking model (nframes x w x h)",
device=kinect,
)
background = GrayscaleImage(
name="background",
data=background,
description="Computed background image.",
)
roi = GrayscaleImage(
name="roi",
data=roi,
description="Computed region of interest.",
)
# Add Position Data
position_data = np.vstack(
(kinematic_vars["centroid_x_mm"], kinematic_vars["centroid_y_mm"], kinematic_vars["height_ave_mm"])
).T
position_series = SpatialSeries(
name="position",
description="Position (x, y, height) in an open field.",
data=position_data,
timestamps=flipped_series.timestamps,
reference_frame="top left",
unit="mm",
)
position = Position(spatial_series=position_series, name="position")
# Add Compass Direction Data
heading_2d_series = SpatialSeries(
name="heading_2d",
description="Head orientation.",
data=kinematic_vars["angle"],
timestamps=flipped_series.timestamps,
reference_frame="top left",
unit="radians",
)
heading_2d = CompassDirection(spatial_series=heading_2d_series, name="heading_2d")
# Add speed/velocity data
speed_2d = TimeSeries(
name="speed_2d",
description="2D speed (mm / frame), note that missing frames are not accounted for",
data=kinematic_vars["velocity_2d_mm"],
timestamps=flipped_series.timestamps,
unit="mm/frame",
)
speed_3d = TimeSeries(
name="speed_3d",
description="3D speed (mm / frame), note that missing frames are not accounted for",
data=kinematic_vars["velocity_3d_mm"],
timestamps=flipped_series.timestamps,
unit="mm/frame",
)
angular_velocity_2d = TimeSeries(
name="angular_velocity_2d",
description="Angular component of velocity (arctan(vel_x, vel_y))",
data=kinematic_vars["velocity_theta"],
timestamps=flipped_series.timestamps,
unit="radians/frame",
)
# Add length/width/area data
length = TimeSeries(
name="length",
description="Length of mouse (mm)",
data=kinematic_vars["length_mm"],
timestamps=flipped_series.timestamps,
unit="mm",
)
width = TimeSeries(
name="width",
description="Width of mouse (mm)",
data=kinematic_vars["width_mm"],
timestamps=flipped_series.timestamps,
unit="mm",
)
width_px_to_mm = kinematic_vars["width_mm"] / kinematic_vars["width_px"]
length_px_to_mm = kinematic_vars["length_mm"] / kinematic_vars["length_px"]
area_px_to_mm2 = width_px_to_mm * length_px_to_mm
area_mm2 = kinematic_vars["area_px"] * area_px_to_mm2
area = TimeSeries(
name="area",
description="Pixel-wise area of mouse (mm^2)",
data=area_mm2,
timestamps=flipped_series.timestamps,
unit="mm^2",
)
# Add Parameters
parameters = MoSeqExtractParameterGroup(name="parameters", **parameters)
# Add MoseqExtractGroup
moseq_extract_group = MoSeqExtractGroup(
name="moseq_extract_group",
version=version,
parameters=parameters,
background=background,
processed_depth_video=processed_depth_video,
loglikelihood_video=loglikelihood_video,
roi=roi,
flipped_series=flipped_series,
depth_camera=kinect,
position=position,
heading_2d=heading_2d,
speed_2d=speed_2d,
speed_3d=speed_3d,
angular_velocity_2d=angular_velocity_2d,
length=length,
width=width,
area=area,
)
# Add data into a behavioral processing module
behavior_module = nwbfile.create_processing_module(
name="behavior",
description="Processed behavioral data from MoSeq",
)
behavior_module.add(moseq_extract_group)
```
---
This extension was created using [ndx-template](https://github.com/nwb-extensions/ndx-template).
Raw data
{
"_id": null,
"home_page": null,
"name": "ndx-depth-moseq",
"maintainer": null,
"docs_url": null,
"requires_python": null,
"maintainer_email": null,
"keywords": "NeurodataWithoutBorders, NWB, nwb-extension, ndx-extension",
"author": "Paul Adkisson",
"author_email": "paul.wesley.adkisson@gmail.com",
"download_url": "https://files.pythonhosted.org/packages/92/10/3fdc7ad86a7512e8bc104234407239551b75f3845896f8e700781ef43a62/ndx-depth-moseq-0.1.2.tar.gz",
"platform": null,
"description": "# ndx-depth-moseq Extension for NWB\n\nndx-depth-moseq is a standardized format for storing the output of [depth-moseq](https://dattalab.github.io/moseq2-website/index.html), an automatic motion sequencing algorithm, in NWB. Currently, this extension only supports the output of depth-moseq-extract, but will be extended as needed to cover the other types of depth-moseq outputs.\n\nThis extension consists of 3 new neurodata types:\n\n- `DepthImageSeries`, which is a simple extension of `pynwb.image.ImageSeries` for depth video with a constant reference depth.\n- `MoSeqExtractParameterGroup`, which stores all the various parameters from the depth-moseq-extract algorithm.\n- `MoSeqExtractGroup`, which stores all the relevant depth-moseq outputs including the `DepthImageSeries`, `MoSeqExtractParameterGroup`, as well as various native neurodata types such as the `Position`.\n\n## Installation\n```\npip install ndx-depth-moseq\n```\n\n## Usage\n\n```python\n\"\"\"Example of usage with mock data.\"\"\"\nfrom datetime import datetime\nfrom pytz import timezone\nimport numpy as np\nfrom pynwb.image import GrayscaleImage, ImageMaskSeries\nfrom pynwb import NWBFile, TimeSeries\nfrom pynwb.behavior import (\n CompassDirection,\n Position,\n SpatialSeries,\n)\nfrom ndx_depth_moseq import DepthImageSeries, MoSeqExtractGroup, MoSeqExtractParameterGroup\n\n# Define mock data (this will be replaced with the actual data) \nversion = \"0.1.0\"\nnum_frames = 10\nnum_rows = 512\nnum_cols = 424\nprocessed_depth_video = np.zeros((num_frames, num_rows, num_cols))\nloglikelihood_video = np.zeros((num_frames, num_rows, num_cols))\ntimestamps = np.arange(num_frames)\nbackground = np.zeros((num_rows, num_cols))\nis_flipped = np.zeros(num_frames, dtype=bool)\nroi = np.zeros((num_rows, num_cols))\ntrue_depth = 1.0\nkinematic_var_names = ['centroid_x_mm', 'centroid_y_mm', 'height_ave_mm', 'angle', 'velocity_2d_mm', 'velocity_3d_mm', 'velocity_theta', 'length_mm', 'width_mm', 'area_px', 'width_px', 'length_px']\nkinematic_vars = {k: np.zeros(num_frames) for k in kinematic_var_names}\nkinematic_vars['length_px'] += 1\nkinematic_vars['width_px'] += 1\nparameters = {\n 'angle_hampel_sig': np.array([3], dtype=np.int64)[0],\n 'angle_hampel_span': np.array([5], dtype=np.int64)[0],\n 'bg_roi_depth_range_min': np.array([0], dtype=np.int64)[0],\n 'bg_roi_depth_range_max': np.array([1000], dtype=np.int64)[0],\n 'bg_roi_dilate_x': np.array([10], dtype=np.int64)[0],\n 'bg_roi_dilate_y': np.array([10], dtype=np.int64)[0],\n 'bg_roi_fill_holes': True,\n 'bg_roi_gradient_filter': True,\n 'bg_roi_gradient_kernel': np.array([5], dtype=np.int64)[0],\n 'bg_roi_gradient_threshold': np.array([10], dtype=np.int64)[0],\n 'bg_roi_index': np.array([0], dtype=np.int64)[0],\n 'bg_roi_shape': 'ellipse',\n 'bg_roi_weight_area': np.array([0.5], dtype=np.float64)[0],\n 'bg_roi_weight_extent': np.array([0.5], dtype=np.float64)[0],\n 'bg_roi_weight_dist': np.array([0.5], dtype=np.float64)[0],\n 'cable_filter_iters': np.array([5], dtype=np.int64)[0],\n 'cable_filter_shape': 'ellipse',\n 'cable_filter_size_x': np.array([5], dtype=np.int64)[0],\n 'cable_filter_size_y': np.array([5], dtype=np.int64)[0],\n 'centroid_hampel_sig': np.array([3], dtype=np.int64)[0],\n 'centroid_hampel_span': np.array([5], dtype=np.int64)[0],\n 'chunk_overlap': np.array([0], dtype=np.int64)[0],\n 'chunk_size': np.array([100], dtype=np.int64)[0],\n 'compress': False,\n 'compress_chunk_size': np.array([100], dtype=np.int64)[0],\n 'compress_threads': np.array([1], dtype=np.int64)[0],\n 'config_file': 'config.yaml',\n 'crop_size_width': np.array([512], dtype=np.int64)[0],\n 'crop_size_height': np.array([424], dtype=np.int64)[0],\n 'flip_classifier': 'flip_classifier.pkl',\n 'flip_classifier_smoothing': np.array([5], dtype=np.int64)[0],\n 'fps': np.array([30], dtype=np.int64)[0],\n 'frame_dtype': 'uint16',\n 'frame_trim_beginning': np.array([0], dtype=np.int64)[0],\n 'frame_trim_end': np.array([0], dtype=np.int64)[0],\n 'max_height': np.array([1000], dtype=np.int64)[0],\n 'min_height': np.array([0], dtype=np.int64)[0],\n 'model_smoothing_clips_x': np.array([5], dtype=np.int64)[0],\n 'model_smoothing_clips_y': np.array([5], dtype=np.int64)[0],\n 'spatial_filter_size': np.array([5], dtype=np.int64)[0],\n 'tail_filter_iters': np.array([5], dtype=np.int64)[0],\n 'tail_filter_shape': 'ellipse',\n 'tail_filter_size_x': np.array([5], dtype=np.int64)[0],\n 'tail_filter_size_y': np.array([5], dtype=np.int64)[0],\n 'temporal_filter_size': np.array([5], dtype=np.int64)[0],\n 'tracking_model_init': 'mean',\n 'tracking_model_ll_clip': np.array([5], dtype=np.int64)[0],\n 'tracking_model_ll_threshold': np.array([5], dtype=np.int64)[0],\n 'tracking_model_mask_threshold': np.array([5], dtype=np.int64)[0],\n 'tracking_model_segment': True,\n 'use_plane_bground': True,\n 'use_tracking_model': True,\n 'write_movie': False,\n}\n\n# Create the NWB file\nnwbfile = NWBFile(\n session_description=\"session_description\",\n identifier=\"identifier\",\n session_start_time=datetime.now(timezone(\"US/Pacific\")),\n)\n\n# Add Imaging Data\nkinect = nwbfile.create_device(name=\"kinect\", manufacturer=\"Microsoft\", description=\"Microsoft Kinect 2\")\nflipped_series = TimeSeries(\n name=\"flipped_series\",\n data=is_flipped,\n unit=\"a.u.\",\n timestamps=timestamps,\n description=\"Boolean array indicating whether the image was flipped left/right\",\n)\nprocessed_depth_video = DepthImageSeries(\n name=\"processed_depth_video\",\n data=processed_depth_video,\n unit=\"millimeters\",\n format=\"raw\",\n timestamps=flipped_series.timestamps,\n description=\"3D array of depth frames (nframes x w x h, in mm)\",\n distant_depth=true_depth,\n device=kinect,\n)\nloglikelihood_video = ImageMaskSeries(\n name=\"loglikelihood_video\",\n data=loglikelihood_video,\n masked_imageseries=processed_depth_video,\n unit=\"a.u.\",\n format=\"raw\",\n timestamps=flipped_series.timestamps,\n description=\"Log-likelihood values from the tracking model (nframes x w x h)\",\n device=kinect,\n)\nbackground = GrayscaleImage(\n name=\"background\",\n data=background,\n description=\"Computed background image.\",\n)\nroi = GrayscaleImage(\n name=\"roi\",\n data=roi,\n description=\"Computed region of interest.\",\n)\n\n# Add Position Data\nposition_data = np.vstack(\n (kinematic_vars[\"centroid_x_mm\"], kinematic_vars[\"centroid_y_mm\"], kinematic_vars[\"height_ave_mm\"])\n).T\nposition_series = SpatialSeries(\n name=\"position\",\n description=\"Position (x, y, height) in an open field.\",\n data=position_data,\n timestamps=flipped_series.timestamps,\n reference_frame=\"top left\",\n unit=\"mm\",\n)\nposition = Position(spatial_series=position_series, name=\"position\")\n\n# Add Compass Direction Data\nheading_2d_series = SpatialSeries(\n name=\"heading_2d\",\n description=\"Head orientation.\",\n data=kinematic_vars[\"angle\"],\n timestamps=flipped_series.timestamps,\n reference_frame=\"top left\",\n unit=\"radians\",\n)\nheading_2d = CompassDirection(spatial_series=heading_2d_series, name=\"heading_2d\")\n\n# Add speed/velocity data\nspeed_2d = TimeSeries(\n name=\"speed_2d\",\n description=\"2D speed (mm / frame), note that missing frames are not accounted for\",\n data=kinematic_vars[\"velocity_2d_mm\"],\n timestamps=flipped_series.timestamps,\n unit=\"mm/frame\",\n)\nspeed_3d = TimeSeries(\n name=\"speed_3d\",\n description=\"3D speed (mm / frame), note that missing frames are not accounted for\",\n data=kinematic_vars[\"velocity_3d_mm\"],\n timestamps=flipped_series.timestamps,\n unit=\"mm/frame\",\n)\nangular_velocity_2d = TimeSeries(\n name=\"angular_velocity_2d\",\n description=\"Angular component of velocity (arctan(vel_x, vel_y))\",\n data=kinematic_vars[\"velocity_theta\"],\n timestamps=flipped_series.timestamps,\n unit=\"radians/frame\",\n)\n\n# Add length/width/area data\nlength = TimeSeries(\n name=\"length\",\n description=\"Length of mouse (mm)\",\n data=kinematic_vars[\"length_mm\"],\n timestamps=flipped_series.timestamps,\n unit=\"mm\",\n)\nwidth = TimeSeries(\n name=\"width\",\n description=\"Width of mouse (mm)\",\n data=kinematic_vars[\"width_mm\"],\n timestamps=flipped_series.timestamps,\n unit=\"mm\",\n)\nwidth_px_to_mm = kinematic_vars[\"width_mm\"] / kinematic_vars[\"width_px\"]\nlength_px_to_mm = kinematic_vars[\"length_mm\"] / kinematic_vars[\"length_px\"]\narea_px_to_mm2 = width_px_to_mm * length_px_to_mm\narea_mm2 = kinematic_vars[\"area_px\"] * area_px_to_mm2\narea = TimeSeries(\n name=\"area\",\n description=\"Pixel-wise area of mouse (mm^2)\",\n data=area_mm2,\n timestamps=flipped_series.timestamps,\n unit=\"mm^2\",\n)\n\n# Add Parameters\nparameters = MoSeqExtractParameterGroup(name=\"parameters\", **parameters)\n\n# Add MoseqExtractGroup\nmoseq_extract_group = MoSeqExtractGroup(\n name=\"moseq_extract_group\",\n version=version,\n parameters=parameters,\n background=background,\n processed_depth_video=processed_depth_video,\n loglikelihood_video=loglikelihood_video,\n roi=roi,\n flipped_series=flipped_series,\n depth_camera=kinect,\n position=position,\n heading_2d=heading_2d,\n speed_2d=speed_2d,\n speed_3d=speed_3d,\n angular_velocity_2d=angular_velocity_2d,\n length=length,\n width=width,\n area=area,\n)\n# Add data into a behavioral processing module\nbehavior_module = nwbfile.create_processing_module(\n name=\"behavior\",\n description=\"Processed behavioral data from MoSeq\",\n)\nbehavior_module.add(moseq_extract_group)\n```\n\n---\nThis extension was created using [ndx-template](https://github.com/nwb-extensions/ndx-template).\n",
"bugtrack_url": null,
"license": "BSD-3",
"summary": "Extension for MoSeq-extract output",
"version": "0.1.2",
"project_urls": null,
"split_keywords": [
"neurodatawithoutborders",
" nwb",
" nwb-extension",
" ndx-extension"
],
"urls": [
{
"comment_text": "",
"digests": {
"blake2b_256": "0a22dcd03b903566c8da021a5dcc2251577011f4a2aedf890dd90dc66da6565a",
"md5": "a44ddc66c65b342794d4992e69bdb987",
"sha256": "89cef6e36ad367755b2d5a6fd48c2c31d9bb0ada72371f4838c4db9d8e771ba4"
},
"downloads": -1,
"filename": "ndx_depth_moseq-0.1.2-py2.py3-none-any.whl",
"has_sig": false,
"md5_digest": "a44ddc66c65b342794d4992e69bdb987",
"packagetype": "bdist_wheel",
"python_version": "py2.py3",
"requires_python": null,
"size": 8175,
"upload_time": "2024-07-25T01:01:39",
"upload_time_iso_8601": "2024-07-25T01:01:39.907644Z",
"url": "https://files.pythonhosted.org/packages/0a/22/dcd03b903566c8da021a5dcc2251577011f4a2aedf890dd90dc66da6565a/ndx_depth_moseq-0.1.2-py2.py3-none-any.whl",
"yanked": false,
"yanked_reason": null
},
{
"comment_text": "",
"digests": {
"blake2b_256": "92103fdc7ad86a7512e8bc104234407239551b75f3845896f8e700781ef43a62",
"md5": "0e88f4f951460abe55bbc8e3327f7906",
"sha256": "6a849b6c712f944b6ef5dbaa4a46f9b1a771e0fdddc590a5bcd800e4230d305d"
},
"downloads": -1,
"filename": "ndx-depth-moseq-0.1.2.tar.gz",
"has_sig": false,
"md5_digest": "0e88f4f951460abe55bbc8e3327f7906",
"packagetype": "sdist",
"python_version": "source",
"requires_python": null,
"size": 18028,
"upload_time": "2024-07-25T01:01:40",
"upload_time_iso_8601": "2024-07-25T01:01:40.935775Z",
"url": "https://files.pythonhosted.org/packages/92/10/3fdc7ad86a7512e8bc104234407239551b75f3845896f8e700781ef43a62/ndx-depth-moseq-0.1.2.tar.gz",
"yanked": false,
"yanked_reason": null
}
],
"upload_time": "2024-07-25 01:01:40",
"github": false,
"gitlab": false,
"bitbucket": false,
"codeberg": false,
"lcname": "ndx-depth-moseq"
}