Skip to content

Commit

Permalink
fix bugs and docstring
Browse files Browse the repository at this point in the history
  • Loading branch information
Karl5766 committed Aug 6, 2024
1 parent aa0c000 commit 1d57582
Show file tree
Hide file tree
Showing 3 changed files with 107 additions and 32 deletions.
46 changes: 43 additions & 3 deletions src/cvpl_tools/im/algorithms.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
"""
This file is for cv algorithms
"""


from typing import Callable
import enum
from typing import Callable, Type
import numpy as np
import skimage
from scipy.ndimage import (
Expand Down Expand Up @@ -239,3 +238,44 @@ def round_object_detection_3sizes(seg, size_thres, dist_thres, rst, size_thres2,
# lbl_im = (lbl_im2 > 0) * 1 + (lbl_im > 0) * 2 + small_mask * 3

return lbl_im


# --------------------------------Statistical Analysis-------------------------------------


def Stats_MAE(counted, gt):
"""
Params
counted (list) - the counted cells in each image
gt (Iterable[float]) - the ground truth number of cells in each image
Returns
the mean absolute difference between counted and gt
"""
return np.abs(np.array(counted, dtype=np.float32) - np.array(gt, dtype=np.float32)).mean().item()


def Stats_ShowScatterPairComparisons(counted: np.array, gt, enumType: Type[enum.Enum]) -> None:
"""Show comparison between different algorithms counting results to the gt
Args:
counted: (a NCounter * NImages np.ndarray) The counted cells in each image, by each counter
gt: (Iterable[float]) The ground truth number of cells in each image
enumType: The enum class defined for counting
"""
import matplotlib.pyplot as plt
counting_method_inverse_dict = {item.value: item.name for item in enumType}
nrow = int((len(counting_method_inverse_dict) - 1) / 6) + 1
fig, axes = plt.subplots(nrow, 6, figsize=(24, nrow * 4), sharex=True, sharey=True)

gt_arr = np.array(gt, dtype=np.float32)
for i in range(counted.shape[0]):
X, Y = gt_arr, counted[i]

iax, jax = i // 6, i % 6
ax: plt.Axes = axes[iax, jax]
ax.set_box_aspect(1)
ax.set_title(counting_method_inverse_dict[i])
ax.scatter(X, Y)

plt.show()

12 changes: 12 additions & 0 deletions src/cvpl_tools/im/dask_algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,3 +71,15 @@ def map_da_to_rows(im: da.Array | np.ndarray,
block_index,
slices_iter
)


def map_rows_to_rows(rows: Iterator[tuple], block_map: Callable, reduce: bool = False):
"""Input rows ('s elements) should be of type np.ndarray"""
rows = [(block_map(r), block_index, slices) for r, block_index, slices in rows]
if reduce:
rows = np.concat([r[0] for r in rows], axis=0)
return rows


def reduce_numpy_rows(rows: Iterator[tuple]) -> np.ndarray:
return np.concat([r[0] for r in rows], axis=0)
81 changes: 52 additions & 29 deletions src/cvpl_tools/im/seg_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -340,7 +340,8 @@ def __init__(self, max_sigma=2, threshold: float = 0.1, reduce=False):
self.threshold = threshold
self.reduce = reduce

def np_features(self, block: np.ndarray, block_idx: tuple | None = None, slices: tuple | None = None):
def np_features(self, block: np.ndarray[np.float32], block_idx: tuple | None = None, slices: tuple | None = None)\
-> np.ndarray[np.float32]:
lc = skimage.feature.blob_dog(np.array(block * 255, dtype=np.uint8),
max_sigma=self.max_sigma,
threshold=self.threshold).astype(np.float32) # N * (ndim + 1) ndarray
Expand All @@ -349,17 +350,17 @@ def np_features(self, block: np.ndarray, block_idx: tuple | None = None, slices:
lc[:, :block.ndim] += start_pos[None, :]
return lc

def feature_forward(self, im: np.ndarray | da.Array, reduce: bool) \
-> Iterator[tuple] | np.ndarray:
def feature_forward(self, im: np.ndarray[np.float32] | da.Array, reduce: bool) \
-> Iterator[tuple] | np.ndarray[np.float32]:
return dask_algorithms.map_da_to_rows(im=im,
fn=self.np_features,
return_dim=im.ndim,
return_dtype=np.float32,
return_dask=False,
reduce=reduce)

def forward(self, im: np.ndarray | da.Array) \
-> Iterator[tuple] | np.ndarray:
def forward(self, im: np.ndarray[np.float32] | da.Array) \
-> Iterator[tuple] | np.ndarray[np.float32]:
return select_feature_columns(self.feature_forward(im, reduce=self.reduce), slice(im.ndim), self.reduce)

def interpretable_napari(self, viewer: napari.Viewer, im: np.ndarray[np.float32]):
Expand Down Expand Up @@ -489,6 +490,13 @@ def np_forward(self, bs: np.ndarray[np.uint8]) -> np.ndarray[np.int32]:


class BinaryAndCentroidListToInstance(SegProcess):
"""Defines a SegProcess
This class' instance forward() takes two inputs: The binary mask segmenting objects, and centroids
detected by the blobdog algorithm. Then it splits the pixels in the binary mask into instance
segmentation based on the centroids found by blobdog. The result is a more finely segmented mask
where objects closer together are more likely to be correctly segmented as two
"""
def __init__(self):
super().__init__(DatType.OTHER, DatType.OS)

Expand Down Expand Up @@ -588,7 +596,8 @@ def __init__(self, reduce=False):
super().__init__(DatType.OS, DatType.LC)
self.reduce = reduce

def np_features(self, block: np.ndarray, block_idx: tuple | None = None, slices: tuple | None = None):
def np_features(self, block: np.ndarray[np.int32], block_idx: tuple | None = None, slices: tuple | None = None)\
-> np.ndarray[np.float32]:
contours_np3d = algorithms.npindices_from_os(block)
lc = [contour.astype(np.float32).mean(axis=0) for contour in contours_np3d]
lc = np.array(lc, dtype=np.float32)
Expand All @@ -597,20 +606,20 @@ def np_features(self, block: np.ndarray, block_idx: tuple | None = None, slices:
lc[:, :block.ndim] += start_pos[None, :]
return lc

def feature_forward(self, im: np.ndarray | da.Array, reduce: bool) \
-> list[np.ndarray] | np.ndarray:
def feature_forward(self, im: np.ndarray[np.int32] | da.Array, reduce: bool) \
-> Iterator[tuple] | np.ndarray[np.float32]:
return dask_algorithms.map_da_to_rows(im=im,
fn=self.np_features,
return_dim=im.ndim,
return_dtype=np.float32,
return_dask=False,
reduce=reduce)

def forward(self, im: np.ndarray | da.Array) \
-> np.ndarray:
def forward(self, im: np.ndarray[np.int32] | da.Array) \
-> np.ndarray[np.float32]:
return self.feature_forward(im, reduce=self.reduce)

def interpretable_napari(self, viewer: napari.Viewer, im: np.ndarray[np.float32]):
def interpretable_napari(self, viewer: napari.Viewer, im: np.ndarray[np.int32] | da.Array):
features = self.feature_forward(im, reduce=True)
lc_interpretable_napari('os_to_lc_centroids', features, viewer, im.ndim, [])

Expand All @@ -620,16 +629,30 @@ def interpretable_napari(self, viewer: napari.Viewer, im: np.ndarray[np.float32]

class CountLCEdgePenalized(SegProcess):
"""From a list of cell centroid locations, calculate a cell count estimate
Each centroid is simply treated as 1 cell when they are sufficiently far from the edge, but as they
get closer to the edge the divisor becomes >1. and their estimate decreases towards 0, since cells
near the edge may be double-counted (or triple or more counted if at a corner etc.)
You need to provide an image_shape parameter due to the fact that lc does not contain
information about input image shape
Each centroid is simply treated as 1 cell when they are sufficiently far from the edge,
but as they get closer to the edge the divisor becomes >1. and their estimate decreases
towards 0, since cells near the edge may be double-counted (or triple or more counted
if at a corner etc.)
"""

def __init__(self,
im_shape: np.ndarray | tuple[int],
border_params: tuple[float, float, float] = (3., -.5, 2.),
reduce: bool = False):
"""Initialize a CountLCEdgePenalized object
Args:
im_shape: Shape of the blocks where rows in list of centroids locate in
border_params: Specify how the cells on the border gets discounted. Formula is:
intercept, dist_coeff, div_max = self.border_params
mults = 1 / np.clip(intercept - border_dists * dist_coeff, 1., div_max)
cc_list = np.prod(mults, axis=1)
reduce: If True, reduce the results into a Numpy 2d array calling forward()
"""
super().__init__(DatType.LC, DatType.CC)
self.im_shape = np.array(im_shape, dtype=np.float32)
self.border_params = border_params
Expand Down Expand Up @@ -661,27 +684,27 @@ def cc_list(self, lc: np.ndarray[np.float32]) -> np.ndarray[np.float32]:
return cc_list

def np_features(self, lc: np.ndarray[np.float32]) -> np.ndarray[np.float32]:
"""Calculate cell counts, then concat centroid locations to the left of cell counts"""
cc_list = self.cc_list(lc)
features = np.concat((lc, cc_list[:, None]), axis=1)
return features

def feature_forward(self, im: np.ndarray | da.Array, reduce: bool) \
-> list[np.ndarray] | np.ndarray:
return dask_algorithms.map_da_to_rows(im=im,
fn=self.np_features,
return_dim=im.ndim + 1,
return_dtype=np.float32,
return_dask=False,
reduce=reduce)
def feature_forward(self, lc: Iterator[tuple] | np.ndarray[np.float32], reduce: bool) \
-> Iterator[tuple] | np.ndarray[np.float32]:
if isinstance(lc, np.ndarray):
return self.np_features(lc)
else:
return dask_algorithms.map_rows_to_rows(lc, self.np_features, self.reduce)

def forward(self, im: np.ndarray | da.Array) \
-> np.ndarray:
features = select_feature_columns(self.feature_forward(im, self.reduce), [-1], self.reduce)
def forward(self, lc: Iterator[tuple] | np.ndarray[np.float32]) \
-> Iterator[tuple] | np.ndarray[np.float32]:
features = select_feature_columns(self.feature_forward(lc, self.reduce), [-1], self.reduce)
return sum_feature_columns(features, self.reduce)

def interpretable_napari(self, viewer: napari.Viewer, im: np.ndarray[np.float32]):
features = self.feature_forward(im, reduce=True)
lc_interpretable_napari('edge_penalized_centroids', features, viewer, im.ndim, ['ncells'])
def interpretable_napari(self, viewer: napari.Viewer, lc: Iterator[tuple] | np.ndarray[np.float32]):
features = self.feature_forward(lc, reduce=True)
lc_interpretable_napari('edge_penalized_centroids', features, viewer,
len(self.im_shape), ['ncells'])


# --------------------------Convert Ordinal Mask to Cell Count Estimate------------------------
Expand Down

0 comments on commit 1d57582

Please sign in to comment.