Skip to content

Commit

Permalink
Add writer to plugin for exporting a Tracks layer to hdf (#369)
Browse files Browse the repository at this point in the history
* Update plugin reader to return early for unsupported filetypes

* Add a function btrack.utils.napari_to_tracks

Converts napari layer data into a list of Tracklets

* Add hdf writer function to the plugin

* Add plugin writer to napari config

* Use empty dict for graph and properties if they are not layer metadata

* Determine parent, root, and children from graph if it exists

* Make black happy

* Add the tracks.h5 example dataset

* Set correct parent, root, and generation when converting napari to track

* Add test for btrack.utils.napari_to_tracks

The z-coordinates are not compared as dummy objects in the sample set have non-zero values even though it's a 2D dataset

* Use constants.States enum when setting default label rather int literal

Also inherit from enum.IntEnum for constants.States otherwise numpy complains that it can't be used to set as a fill value with np.full_like

* Upgrade `cvxopt` (#348)

* Update `pre-commit` and make some other fixes

* Update version pins

* Remove references to `cvxopt`

* Add type

Co-authored-by: Alan R Lowe <[email protected]>

* Use `|=`

* Remove extra dependencies

---------

Co-authored-by: Alan R Lowe <[email protected]>

* Move `fixture`s into `conftest.py` (#331)

* Move `fixture`s into `conftest.py`

* Just import `btrack`

* Move `Container`

* Fix tests

* Rename writer

* Use `qtpy`

* Don't set widget values in the widgets (as done from file)  (#379)

* Remove unnecessary `.setValue` calls

* Check status done by config file

* Don't store refs when converting tracks to napari

* Fix description of return values from napari_to_tracks

---------

Co-authored-by: Patrick Roddy <[email protected]>
Co-authored-by: Alan R Lowe <[email protected]>
  • Loading branch information
3 people authored Aug 8, 2023
1 parent efaad04 commit 2ec7728
Show file tree
Hide file tree
Showing 8 changed files with 197 additions and 9 deletions.
3 changes: 2 additions & 1 deletion btrack/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -69,7 +69,7 @@ class Fates(enum.Enum):


@enum.unique
class States(enum.Enum):
class States(enum.IntEnum):
INTERPHASE = 0
PROMETAPHASE = 1
METAPHASE = 2
Expand All @@ -96,3 +96,4 @@ class Dimensionality(enum.IntEnum):
TWO: int = 2
THREE: int = 3
FOUR: int = 4
FIVE: int = 5
13 changes: 11 additions & 2 deletions btrack/datasets.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,8 +4,8 @@
import pooch
from skimage.io import imread

from .btypes import PyTrackObject
from .io import import_CSV
from .btypes import PyTrackObject, Tracklet
from .io import HDF5FileHandler, import_CSV

BASE_URL = "https://raw.githubusercontent.com/lowe-lab-ucl/btrack-examples/main/"

Expand Down Expand Up @@ -69,3 +69,12 @@ def example_track_objects() -> list[PyTrackObject]:
file_path = example_track_objects_file()
objects = import_CSV(file_path)
return objects


def example_tracks() -> list[Tracklet]:
"""Return the example example localized and classified objected stored in an
HDF5 file as a list of `Tracklet`s."""
file_path = POOCH.fetch("examples/tracks.h5")
with HDF5FileHandler(file_path, "r", obj_type="obj_type_1") as reader:
tracks = reader.tracks
return tracks
8 changes: 8 additions & 0 deletions btrack/napari.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,9 @@ contributions:
- id: btrack.read_btrack
title: Read btrack files
python_name: btrack.napari.reader:get_reader
- id: btrack.write_hdf
title: Export Tracks to HDF
python_name: btrack.napari.writer:export_to_hdf
- id: btrack.track
title: Create Track
python_name: btrack.napari.main:create_btrack_widget
Expand All @@ -19,6 +22,11 @@ contributions:
- '*.hdf5'
accepts_directories: false

writers:
- command: btrack.write_hdf
layer_types: ["tracks"]
filename_extensions: [".h5", ".hdf", ".hdf5"]

widgets:
- command: btrack.track
display_name: Track
20 changes: 16 additions & 4 deletions btrack/napari/reader.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,11 +2,10 @@
This module is a reader plugin btrack files for napari.
"""
import os
import pathlib
from collections.abc import Sequence
from typing import Callable, Optional, Union

from napari_plugin_engine import napari_hook_implementation

from napari.types import LayerDataTuple

from btrack.io import HDF5FileHandler
Expand All @@ -17,7 +16,6 @@
ReaderFunction = Callable[[PathOrPaths], list[LayerDataTuple]]


@napari_hook_implementation
def get_reader(path: PathOrPaths) -> Optional[ReaderFunction]:
"""A basic implementation of the napari_get_reader hook specification.
Expand All @@ -32,7 +30,21 @@ def get_reader(path: PathOrPaths) -> Optional[ReaderFunction]:
If the path is a recognized format, return a function that accepts the
same path or list of paths, and returns a list of layer data tuples.
"""
return reader_function
if isinstance(path, list):
# reader plugins may be handed single path, or a list of paths.
# if it is a list, it is assumed to be an image stack...
# so we are only going to look at the first file.
path = path[0]

# if we know we cannot read the file, we immediately return None.
supported_extensions = [
".h5",
".hdf",
".hdf5",
]
return (
reader_function if pathlib.Path(path).suffix in supported_extensions else None
)


def reader_function(path: PathOrPaths) -> list[LayerDataTuple]:
Expand Down
30 changes: 30 additions & 0 deletions btrack/napari/writer.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
"""
This module is a writer plugin to export Tracks layers using BTrack
"""
from typing import Optional

import numpy.typing as npt

from btrack.io import HDF5FileHandler
from btrack.utils import napari_to_tracks


def export_to_hdf(
path: str,
data: npt.ArrayLike,
meta: dict,
) -> Optional[str]:
tracks = napari_to_tracks(
data=data,
properties=meta.get("properties", {}),
graph=meta.get("graph", {}),
)

with HDF5FileHandler(
filename=path,
read_write="w",
obj_type="obj_type_1",
) as writer:
writer.write_tracks(tracks)

return path
91 changes: 89 additions & 2 deletions btrack/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,19 @@
import dataclasses
import functools
import logging
from typing import Optional
from typing import TYPE_CHECKING, Optional

if TYPE_CHECKING:
import numpy.typing as npt

import numpy as np
from skimage.util import map_array

# import core
from . import _version, btypes, constants
from .btypes import Tracklet
from .constants import DEFAULT_EXPORT_PROPERTIES, Dimensionality
from .io import objects_from_dict
from .io._localization import segmentation_to_objects
from .models import HypothesisModel, MotionModel, ObjectModel

Expand Down Expand Up @@ -193,7 +198,7 @@ def tracks_to_napari(
raise ValueError("ndim must be 2 or 3 dimensional.")

t_header = ["ID", "t"] + ["z", "y", "x"][-ndim:]
p_header = ["t", "state", "generation", "root", "parent"]
p_header = ["t", "state", "generation", "root", "parent", "dummy"]

# ensure lexicographic ordering of tracks
ordered = sorted(tracks, key=lambda t: t.ID)
Expand Down Expand Up @@ -221,6 +226,88 @@ def nans_idx(x):
return data, properties, graph


def napari_to_tracks(
data: npt.NDArray,
properties: Optional[dict[str, npt.ArrayLike]],
graph: Optional[dict[int, list[int]]],
) -> list[btypes.Tracklet]:
"""Convert napari Tracks to a list of Tracklets.
Parameters
----------
data : array (N, D+1)
Coordinates for N points in D+1 dimensions. ID,T,(Z),Y,X. The first
axis is the integer ID of the track. D is either 3 or 4 for planar
or volumetric timeseries respectively.
properties : dict {str: array (N,)}
Properties for each point. Each property should be an array of length N,
where N is the number of points.
graph : dict {int: list}
Graph representing associations between tracks. Dictionary defines the
mapping between a track ID and the parents of the track. This can be
one (the track has one parent, and the parent has >=1 child) in the
case of track splitting, or more than one (the track has multiple
parents, but only one child) in the case of track merging.
Returns
-------
tracks : list[btypes.Tracklet]
A list of tracklet objects created from the napari Tracks layer data.
"""

if data.shape[1] == Dimensionality.FIVE:
track_id, t, z, y, x = data.T
elif data.shape[1] == Dimensionality.FOUR:
track_id, t, y, x = data.T
z = np.zeros_like(x)
else:
raise ValueError(
"Data must have either 4 (ID, t, y, x) or 5 (ID, t, z, y, x) columns, "
f"not {data.shape[1]}"
)

# Create all PyTrackObjects
objects_dict = {
"ID": np.arange(track_id.size),
"t": t,
"x": x,
"y": y,
"z": z,
"dummy": properties.get("dummy", np.full_like(track_id, fill_value=False)),
"label": properties.get(
"state", np.full_like(track_id, fill_value=constants.States.NULL)
),
}
track_objects = objects_from_dict(objects_dict)

# Create all Tracklets
tracklets = []
for track in np.unique(track_id).astype(int):
# Create tracklet
track_indices = np.argwhere(track_id == track).ravel()
track_data = [track_objects[i] for i in track_indices]
parent = graph.get(track, [track])[0]
children = [child for (child, parents) in graph.items() if track in parents]
tracklet = Tracklet(
ID=track,
data=track_data,
parent=parent,
children=children,
)

# Determine root tracklet
tracklet.root = parent
tracklet.generation = 0 if tracklet.root == track else 1
while tracklet.root in graph:
tracklet.root = graph[tracklet.root][0]
tracklet.generation += 1

tracklets.append(tracklet)

return tracklets


def update_segmentation(
segmentation: np.ndarray,
tracks: list[btypes.Tracklet],
Expand Down
6 changes: 6 additions & 0 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,12 @@ def _write_h5_file(file_path: os.PathLike, test_objects) -> os.PathLike:
return file_path


@pytest.fixture
def sample_tracks():
"""An example tracks dataset"""
return btrack.datasets.example_tracks()


@pytest.fixture
def test_objects():
"""
Expand Down
35 changes: 35 additions & 0 deletions tests/test_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -250,6 +250,41 @@ def test_tracks_to_napari_ndim_inference(ndim: int):
assert data.shape[-1] == ndim + 2


def test_napari_to_tracks(sample_tracks):
"""Test that a napari Tracks layer can be converted to a list of Tracklets.
First convert tracks to a napari layer, then convert back and compare.
"""

data, properties, graph = utils.tracks_to_napari(sample_tracks)
tracks = utils.napari_to_tracks(data, properties, graph)

properties_to_compare = [
"ID",
"t",
"x",
"y",
# "z", # z-coordinates are different
"parent",
"label",
"state",
"root",
"is_root",
"is_leaf",
"start",
"stop",
"generation",
"dummy",
"properties",
]

sample_tracks_dicts = [
sample.to_dict(properties_to_compare) for sample in sample_tracks
]
tracks_dicts = [track.to_dict(properties_to_compare) for track in tracks]
assert sample_tracks_dicts == tracks_dicts


def test_objects_from_array(test_objects):
"""Test creation of a list of objects from a numpy array."""

Expand Down

0 comments on commit 2ec7728

Please sign in to comment.