Skip to content

Commit

Permalink
fixing of bugs
Browse files Browse the repository at this point in the history
  • Loading branch information
Karl5766 committed Aug 7, 2024
1 parent a0631be commit ce1ebb1
Show file tree
Hide file tree
Showing 4 changed files with 54 additions and 37 deletions.
6 changes: 3 additions & 3 deletions src/cvpl_tools/fs.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@ def load_im_as_np(im_format, path, true_im_ndim: int, stack_axis=None,
# stack_axis = true_im_ndim - 1 # default stack on the last axis
if concat_instead: # concat on existing axis
assert 0 <= stack_axis < true_im_ndim - 1, f'Attempt to concat on axis {stack_axis} with true dim={true_im_ndim}'
im = np.concatenateenate(im_arr, axis=stack_axis)
im = np.concatenate(im_arr, axis=stack_axis)
else: # stack on new axis
assert 0 <= stack_axis < true_im_ndim, f'Attempt to concat on axis {stack_axis} with true dim={true_im_ndim}'
im = np.stack(im_arr, axis=stack_axis)
Expand Down Expand Up @@ -255,7 +255,7 @@ class ImReadSetting:
"""
im_format (int) - specifies the data storage layout e.g. ImFileType.FORMAT_UINT8
stack_axis (int) - if not None, combine several files with different zstacks to the same imid; specify the axis to stack on
concat_instead (bool) - if True, the stacking are done by np.concatenateenate() which preserves im dimensions; over the same axis
concat_instead (bool) - if True, the stacking are done by np.concatenate() which preserves im dimensions; over the same axis
true_im_ndim (int) - # of dimensions of a single image; includes number of channels (for 2d BGR image this would be 3)
allow_pickle (bool) - set allow_pickle for np.load and np.save
imid_keep_dirpath (bool) - keeping the directory path when extracting imid from paths; default to False
Expand All @@ -273,7 +273,7 @@ class ImWriteSetting:
"""
im_format (int) - specifies the data storage layout e.g. ImFileType.FORMAT_UINT8
stack_axis (int) - if not None, combine several files with different zstacks to the same imid; specify the axis to stack on
concat_instead (bool) - if True, the stacking are done by np.concatenateenate() which preserves im dimensions; over the same axis
concat_instead (bool) - if True, the stacking are done by np.concatenate() which preserves im dimensions; over the same axis
allow_pickle (bool) - set allow_pickle for np.load and np.save
ftype (int) - file type to write e.g. ImFileType.FTYPE_NPZ
"""
Expand Down
12 changes: 9 additions & 3 deletions src/cvpl_tools/im/algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,8 +138,9 @@ def npindices_from_os(lbl_im: np.ndarray[np.int32]) -> list[np.ndarray[np.int64]
continue

i = len(result) + 1
mask_np3d = lbl_im[slices] == i
result.append(np.argwhere(mask_np3d))
mask_np3d = np.argwhere(lbl_im[slices] == i)
mask_np3d += np.array(tuple(s.start for s in slices), dtype=np.int64)
result.append(mask_np3d)
return result


Expand Down Expand Up @@ -245,7 +246,12 @@ def round_object_detection_3sizes(seg, size_thres, dist_thres, rst, size_thres2,
# lbl_im = (lbl_im2 > 0) * 1 + (lbl_im > 0) * 2 + small_mask * 3

if remap_indices:
_, lbl_im = np.unique(lbl_im, return_inverse=True)
if np.__version__ < '2.0.0': # before 2.0.0, numpy will return a array of 1d regardless of input shape
im_shape = lbl_im.shape
_, lbl_im = np.unique(lbl_im, return_inverse=True)
lbl_im = lbl_im.reshape(im_shape)
else:
_, lbl_im = np.unique(lbl_im, return_inverse=True)

return lbl_im

Expand Down
9 changes: 4 additions & 5 deletions src/cvpl_tools/im/dask_algorithms.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,18 +37,17 @@ def map_da_to_rows(im: da.Array | np.ndarray,
# reference: https://github.com/dask/dask/issues/7589 and
# https://github.com/dask/dask-image/blob/adcb217de766dd6fef99895ed1a33bf78a97d14b/dask_image/ndmeasure/__init__.py#L299

if isinstance(im, np.ndarray):
# special case where this just reduces to a single function call
return fn(im, (0, 0, 0), tuple(slice(0, s) for s in im.shape))

# note the block da.Array passed as first argument to delayed_fn() will become np.ndarray in the call to fn()
# this is due to how dask.delayed() handles input arguments
# delayed_fn = dask.delayed(fn)
@dask.delayed
def delayed_fn(block, block_idx, slices):
result = fn(block, block_idx, slices)
return result

if isinstance(im, np.ndarray):
# special case where this just reduces to a single function call
im = da.from_array(im, chunks=im.shape)

slices_list = dask.array.core.slices_from_chunks(im.chunks)
block_iter = zip(
map(functools.partial(operator.getitem, im), slices_list), # dask block
Expand Down
64 changes: 38 additions & 26 deletions src/cvpl_tools/im/seg_process.py
Original file line number Diff line number Diff line change
Expand Up @@ -136,21 +136,19 @@ def lc_interpretable_napari(layer_name: str,
text=text_parameters)


def select_feature_columns(features,
cols: slice | Sequence[int] | int,
reduced: bool) -> Iterator[tuple] | np.ndarray:
def select_feature_columns(features: Iterator[tuple] | np.ndarray,
cols: slice | Sequence[int] | int) -> Iterator[tuple] | np.ndarray:
"""Performs selection on result returned by map_da_to_rows()"""
if reduced:
if isinstance(features, np.ndarray):
return features[:, cols]
else:
features_iter = ((block[:, cols], block_idx, slices) for (block, block_idx, slices) in features)
return features_iter


def sum_feature_columns(features,
reduced: bool) -> Iterator[tuple] | np.ndarray:
def sum_feature_columns(features) -> Iterator[tuple] | np.ndarray:
"""Performs selection on result returned by map_da_to_rows()"""
if reduced:
if isinstance(features, np.ndarray):
return features.sum(keepdims=True)
else:
return ((block.sum(keepdims=True), block_idx, slices)
Expand Down Expand Up @@ -351,7 +349,7 @@ def np_features(self, block: np.ndarray[np.float32], block_idx: tuple | None = N
lc[:, :block.ndim] += start_pos[None, :]
return lc

def feature_forward(self, im: np.ndarray[np.float32] | da.Array, reduce: bool) \
def feature_forward(self, im: np.ndarray[np.float32] | da.Array, reduce: bool = False) \
-> Iterator[tuple] | np.ndarray[np.float32]:
return dask_algorithms.map_da_to_rows(im=im,
fn=self.np_features,
Expand All @@ -362,7 +360,7 @@ def feature_forward(self, im: np.ndarray[np.float32] | da.Array, reduce: bool) \

def forward(self, im: np.ndarray[np.float32] | da.Array) \
-> Iterator[tuple] | np.ndarray[np.float32]:
return select_feature_columns(self.feature_forward(im, reduce=self.reduce), slice(im.ndim), self.reduce)
return select_feature_columns(self.feature_forward(im, reduce=self.reduce), slice(im.ndim))

def interpretable_napari(self, viewer: napari.Viewer, im: np.ndarray[np.float32]):
blobdog = self.feature_forward(im, reduce=True)
Expand Down Expand Up @@ -436,7 +434,7 @@ def map_fn(b, bidx, slices):
def forward(self, im: np.ndarray | da.Array) \
-> Iterator[tuple] | np.ndarray:
features = self.feature_forward(im, reduce=self.reduce)
return select_feature_columns(features, [-1], self.reduce)
return select_feature_columns(features, [-1])

def interpretable_napari(self, viewer: napari.Viewer, im: np.ndarray[np.float32] | da.Array):
# see what has been completely masked off
Expand Down Expand Up @@ -699,7 +697,7 @@ def cc_list(self, lc: np.ndarray[np.float32]) -> np.ndarray[np.float32]:
border_dists = np.abs((lc + midpoint) % self.im_shape - (midpoint - .5))

intercept, dist_coeff, div_max = self.border_params
mults = 1 / np.clip(intercept - border_dists * dist_coeff, 1., div_max)
mults = 1 / np.clip(intercept + border_dists * dist_coeff, 1., div_max)
cc_list = np.prod(mults, axis=1)
return cc_list

Expand All @@ -714,12 +712,12 @@ def feature_forward(self, lc: Iterator[tuple] | np.ndarray[np.float32], reduce:
if isinstance(lc, np.ndarray):
return self.np_features(lc)
else:
return dask_algorithms.map_rows_to_rows(lc, self.np_features, self.reduce)
return dask_algorithms.map_rows_to_rows(lc, self.np_features, reduce)

def forward(self, lc: Iterator[tuple] | np.ndarray[np.float32]) \
-> Iterator[tuple] | np.ndarray[np.float32]:
features = select_feature_columns(self.feature_forward(lc, self.reduce), [-1], self.reduce)
return sum_feature_columns(features, self.reduce)
features = select_feature_columns(self.feature_forward(lc, self.reduce), [-1])
return sum_feature_columns(features)

def interpretable_napari(self, viewer: napari.Viewer, lc: Iterator[tuple] | np.ndarray[np.float32]):
features = self.feature_forward(lc, reduce=True)
Expand All @@ -730,7 +728,7 @@ def interpretable_napari(self, viewer: napari.Viewer, lc: Iterator[tuple] | np.n
# --------------------------Convert Ordinal Mask to Cell Count Estimate------------------------


class CountOSNContour(SegProcess):
class CountOSBySize(SegProcess):
"""Counting ordinal segmentation contours
Several features:
Expand All @@ -739,7 +737,8 @@ class CountOSNContour(SegProcess):
2. Above size threshold, the contour is seen as a cluster of cells an estimate of cell count is given
based on the volume of the contour
3. For cells on the boundary location, their estimated ncell is penalized according to the distance
between the cell centroid and the boundary of the image
between the cell centroid and the boundary of the image; if the voxels of the cell do not touch
edge, this penalty does not apply
4. A min_size threshold, below (<=) which the contour is simply discarded because it's likely just
an artifact
"""
Expand All @@ -762,28 +761,41 @@ def cc_list(self, os: np.ndarray[np.int32]) -> np.ndarray[np.float32]:
ncells = {}
dc = []
dc_idx_to_centroid_idx = {}
idx_max = np.array(tuple(d - 1 for d in os.shape), dtype=np.int64)
for i in range(len(contours_np3d)):
contour = contours_np3d[i]
nvoxel = contour.shape[0]
if nvoxel <= self.min_size:
ncells[i] = 0.
elif nvoxel <= self.size_threshold:
dc_idx_to_centroid_idx[len(dc)] = i
dc.append(contour.astype(np.float32).mean(axis=0))
else:
ncells[i] = nvoxel * self.volume_weight
ncells[i] = 0.
dc_idx_to_centroid_idx[len(dc)] = i

# if no voxel touch the boundary, we do not want to apply the edge penalty
on_edge = (contour == 0).astype(np.uint8) + (contour == idx_max[None, :]).astype(np.uint8)
on_edge = on_edge.sum().item() > 0
if on_edge:
dc.append(contour.astype(np.float32).mean(axis=0))
else:
ncells[i] = 1

if nvoxel > self.size_threshold:
ncells[i] += (nvoxel - self.size_threshold) * self.volume_weight
ps = CountLCEdgePenalized(os.shape, self.border_params)
dc_centroids = np.array(dc, dtype=np.float32)
dc_ncells = ps.cc_list(dc_centroids)
for dc_idx in dc_idx_to_centroid_idx:
i = dc_idx_to_centroid_idx[dc_idx]
ncells[i] = dc_ncells[dc_idx]
ncells[i] += dc_ncells[dc_idx]
ncells = np.array([ncells[i] for i in range(len(ncells))], dtype=np.float32)
return ncells

def np_features(self, os: np.ndarray[np.int32]) -> np.ndarray[np.float32]:
ps = DirectOSToLC()
lc = ps.forward(os)
def np_features(self,
os: np.ndarray[np.int32],
block_idx: tuple | None = None,
slices: tuple | None = None) -> np.ndarray[np.float32]:
ps = DirectOSToLC(reduce=True)
lc = ps.np_features(os, block_idx, slices)
cc_list = self.cc_list(os)
features = np.concatenate((lc, cc_list[:, None]), axis=1)
return features
Expand All @@ -799,8 +811,8 @@ def feature_forward(self, im: np.ndarray | da.Array, reduce: bool) \

def forward(self, im: np.ndarray | da.Array) \
-> np.ndarray:
features = select_feature_columns(self.feature_forward(im, self.reduce), [-1], self.reduce)
return sum_feature_columns(features, self.reduce)
features = select_feature_columns(self.feature_forward(im, self.reduce), [-1])
return sum_feature_columns(features)

def interpretable_napari(self, viewer: napari.Viewer, im: np.ndarray[np.float32]):
features = self.feature_forward(im, reduce=True)
Expand Down

0 comments on commit ce1ebb1

Please sign in to comment.