Commit 92a68341 authored by Philipp Schubert's avatar Philipp Schubert
Browse files

improvements and fixes

* improve memory requirements and performance in kimimaro skeletonization step
parent c18fa280
Pipeline #80336 failed with stage
in 62 minutes and 8 seconds
......@@ -35,7 +35,7 @@ For SyConn v1, please refer to the old [documentation](https://structuralneurobi
The Team
--------
The Synaptic connectivity inference toolkit is currently developed at the Max-Planck-Institute of Neurobiology in Martinsried by
Philipp Schubert, Maria Kawula, Carl Constantin v. Wedemeyer, Atul Mohite, Gaurav Kumar and Joergen Kornfeld.
Philipp Schubert, Jonathan Klimesch, Alexandra Rother and Joergen Kornfeld.
Acknowledgements
......@@ -49,8 +49,8 @@ used for our project can be found
[here](https://github.com/deepmind/dm_control/blob/30069ac11b60ee71acbd9159547d0bc334d63281/dm_control/_render/pyopengl/egl_ext.py).
Thanks to Julia Kuhl (see http://somedonkey.com/ for more beautiful
work) for designing and creating the logo and to Rangoli Saxena, Mariana
Shumliakivska, Josef Mark, Maria Kawula, Atul Mohite, Alexandra Rother
and Martin Drawitsch and for code contributions.
Shumliakivska, Josef Mark, Maria Kawula, Atul Mohite, Carl Constantin v. Wedemeyer,
Gaurav Kumar and Martin Drawitsch for code contributions.
Publications
......
......@@ -12,10 +12,10 @@ syconn.exec.exec\_init module
:undoc-members:
:show-inheritance:
syconn.exec.exec\_multiview module
syconn.exec.exec\_inference module
----------------------------------------------
.. automodule:: syconn.exec.exec_multiview
.. automodule:: syconn.exec.exec_inference
:members:
:undoc-members:
:show-inheritance:
......
......@@ -44,11 +44,11 @@ if __name__ == '__main__':
('glia', {'prior_glia_removal': prior_glia_removal}),
('use_point_models', False),
('pyopengl_platform', 'egl'), # 'osmesa' or 'egl'
('batch_proc_system', None), # None, 'SLURM' or 'QSUB'
('ncores_per_node', 20),
('mem_per_node', 249500),
('ngpus_per_node', 1),
('nnodes_total', 1),
('batch_proc_system', 'SLURM'), # None, 'SLURM' or 'QSUB'
('ncores_per_node', 32),
('mem_per_node', 208000),
('ngpus_per_node', 2),
('nnodes_total', 2),
('skeleton', {'use_kimimaro': False}),
('log_level', log_level),
# these will be created during synapse type prediction (
......@@ -203,7 +203,7 @@ if __name__ == '__main__':
log.info('Step 4/9 - Creating SuperSegmentationDataset')
ftimer.start('SSD generation')
exec_init.run_create_neuron_ssd(kimimaro=global_params.config.use_kimimaro)
exec_init.run_create_neuron_ssd()
ftimer.stop()
if not (global_params.config.use_onthefly_views or global_params.config.use_point_models):
......
......@@ -30,7 +30,7 @@ if __name__ == '__main__':
('batch_proc_system', 'SLURM'),
('ncores_per_node', 32),
('ngpus_per_node', 2),
('nnodes_total', 5),
('nnodes_total', 2),
('mem_per_node', 208990),
('use_point_models', False),
('skeleton', {'use_kimimaro': True}),
......@@ -79,7 +79,7 @@ if __name__ == '__main__':
# --------------------------------------------------------------------------
# Setup working directory and logging
shape_j0251 = np.array([27119, 27350, 15494])
cube_size = np.array([2048, 2048, 1024])
cube_size = np.array([2048, 2048, 1024]) * 2
cube_offset = (shape_j0251 - cube_size) // 2
cube_of_interest_bb = (cube_offset, cube_offset + cube_size)
# cube_of_interest_bb = None # process the entire cube!
......
File mode changed from 100644 to 100755
File mode changed from 100644 to 100755
......@@ -26,9 +26,11 @@ sso_ids = args[0]
ssd = SuperSegmentationDataset()
for sso in ssd.get_super_segmentation_object(sso_ids):
assert sso.load_skeleton(), f"Skeleton of SSO {sso.id} does not exist."
# if 'spinehead_vol' in sso.skeleton:
# continue
extract_spinehead_volume_mesh(sso)
# TODO: remove try-except
try:
extract_spinehead_volume_mesh(sso)
except Exception:
raise()
sso.save_skeleton()
with open(path_out_file, "wb") as f:
......
import sys
import pickle as pkl
import numpy as np
from syconn.handler.basics import load_pkl2obj
from syconn.proc.skeleton import kimimaro_mergeskels, kimimaro_skels_tokzip
from syconn import global_params
......@@ -20,6 +21,7 @@ working_dir = global_params.config.working_dir
scaling = global_params.config["scaling"]
path2results_dc, ssv_ids, zipname = args
results_dc = load_pkl2obj(path2results_dc)
for ssv_id in ssv_ids:
ssv_id = int(ssv_id)
combined_skel, degree_dict, neighbour_dict = kimimaro_mergeskels(results_dc[ssv_id], ssv_id)
......@@ -28,13 +30,17 @@ for ssv_id in ssv_ids:
sso.skeleton["neighbours"] = neighbour_dict
if combined_skel.vertices.size > 0:
sso.skeleton["nodes"] = combined_skel.vertices / scaling # to fit voxel coordinates
# get radius in pseudo-voxel units
sso.skeleton["diameters"] = (combined_skel.radii / scaling[0]) * 2 # divide by x scale
# kimimaro_skels_tokzip(combined_skel, ssv_id, zipname)
sso.skeleton["edges"] = combined_skel.edges
sso.skeleton["degree"] = degree_dict
else:
sso.skeleton["nodes"] = combined_skel.vertices
sso.skeleton["diameters"] = combined_skel.radii
sso.skeleton["edges"] = combined_skel.edges
sso.skeleton["degree"] = degree_dict
sso.skeleton["nodes"] = np.array([sso.rep_coord], dtype=np.float32)
sso.skeleton["diameters"] = np.zeros((1, ), dtype=np.float32)
sso.skeleton["edges"] = np.array([[0, 0], ], dtype=np.float32)
sso.skeleton["degree"] = {0: 0}
sso.save_skeleton()
with open(path_out_file, "wb") as f:
......
......@@ -27,17 +27,15 @@ ssv_ids = args[0]
version = args[1]
version_dict = args[2]
working_dir = args[3]
if len(args) == 5:
cube_of_interest_bb = args[4]
else:
cube_of_interest_bb = None
ssd = SuperSegmentationDataset(working_dir=working_dir, version=version,
version_dict=version_dict)
for ssv in ssd.get_super_segmentation_object(ssv_ids):
ssv.load_skeleton()
ssv.skeleton["myelin"] = map_myelin2coords(ssv.skeleton["nodes"], mag=4,
cube_of_interest_bb=cube_of_interest_bb)
try:
ssv.skeleton["myelin"] = map_myelin2coords(ssv.skeleton["nodes"], mag=4)
except Exception:
raise ()
majorityvote_skeleton_property(
ssv, prop_key='myelin', max_dist=global_params.config['compartments']['dist_axoness_averaging'])
ssv.save_skeleton()
......
......@@ -6,11 +6,7 @@
# Authors: Philipp Schubert, Jörgen Kornfeld
import sys
try:
import cPickle as pkl
except ImportError:
import pickle as pkl
import pickle as pkl
from syconn.proc import sd_proc
path_storage_file = sys.argv[1]
......
......@@ -40,7 +40,7 @@ def run_morphology_embedding(max_n_jobs: Optional[int] = None):
:func:`~syconn.exec.skeleton.run_skeleton_generation`.
"""
if max_n_jobs is None:
max_n_jobs = global_params.config.ngpu_total * 2
max_n_jobs = global_params.config.ngpu_total * 4
log = initialize_logging('morphology_embedding', global_params.config.working_dir
+ '/logs/', overwrite=False)
ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir)
......@@ -76,7 +76,7 @@ def run_celltype_prediction(max_n_jobs_gpu: Optional[int] = None):
Requires :func:`~syconn.exec.exec_init.run_create_neuron_ssd` and :func:`~run_neuron_rendering`.
"""
if max_n_jobs_gpu is None:
max_n_jobs_gpu = global_params.config.ngpu_total * 3 if qu.batchjob_enabled() else 1
max_n_jobs_gpu = global_params.config.ngpu_total * 4 if qu.batchjob_enabled() else 1
log = initialize_logging('celltype_prediction', global_params.config.working_dir + '/logs/',
overwrite=False)
ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir)
......@@ -115,7 +115,7 @@ def run_semsegaxoness_prediction(max_n_jobs_gpu: Optional[int] = None):
"""
if max_n_jobs_gpu is None:
max_n_jobs_gpu = global_params.config.ngpu_total * 10 if qu.batchjob_enabled() else 1
max_n_jobs_gpu = global_params.config.ngpu_total * 4 if qu.batchjob_enabled() else 1
if qu.batchjob_enabled():
n_cores = global_params.config['ncores_per_node'] // global_params.config['ngpus_per_node']
else:
......@@ -150,7 +150,7 @@ def run_semsegspiness_prediction(max_n_jobs_gpu: Optional[int] = None):
max_n_jobs_gpu: Number of parallel GPU jobs. Used for the inference.
"""
if max_n_jobs_gpu is None:
max_n_jobs_gpu = global_params.config.ngpu_total * 10 if qu.batchjob_enabled() else 1
max_n_jobs_gpu = global_params.config.ngpu_total * 4 if qu.batchjob_enabled() else 1
log = initialize_logging('compartment_prediction', global_params.config.working_dir
+ '/logs/', overwrite=False)
ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir)
......@@ -180,7 +180,7 @@ def run_glia_prediction_pts(max_n_jobs_gpu: Optional[int] = None):
Requires :func:`~syconn.exec_init.init_cell_subcell_sds`.
"""
if max_n_jobs_gpu is None:
max_n_jobs_gpu = global_params.config.ngpu_total * 10
max_n_jobs_gpu = global_params.config.ngpu_total * 4
log = initialize_logging('glia_separation', global_params.config.working_dir + '/logs/', overwrite=False)
pred_key = "glia_probas"
......
# -*- coding: utf-8 -*-
# SyConn - Synaptic connectivity inference toolkit
#
# Copyright (c) 2016 - now
# Max-Planck-Institute of Neurobiology, Munich, Germany
# Authors: Philipp Schubert, Joergen Kornfeld
import numpy as np
import shutil
import os
import glob
from typing import Optional, Union
import numpy as np
from knossos_utils.chunky import ChunkDataset
from knossos_utils import knossosdataset
......@@ -19,6 +20,7 @@ from syconn.handler.config import initialize_logging
from syconn.mp import batchjob_utils as qu
from syconn.proc.skel_based_classifier import SkelClassifier
from syconn import global_params
from syconn.mp.mp_utils import start_multiprocess_imap
from syconn.handler.basics import load_pkl2obj, write_obj2pkl
......@@ -65,17 +67,13 @@ def run_skeleton_generation(max_n_jobs: Optional[int] = None,
# remove_jobfolder=True)
def map_myelin_global(max_n_jobs: Optional[int] = None,
cube_of_interest_bb: Union[Optional[tuple], np.ndarray] = None):
def map_myelin_global(max_n_jobs: Optional[int] = None):
"""
Stand-alone myelin mapping to cell reconstruction skeletons. See kwarg ``map_myelin``
in :func:`run_skeleton_generation` for a mapping right after skeleton generation.
Args:
max_n_jobs: Number of parallel jobs.
cube_of_interest_bb: Optional bounding box (in mag 1 voxel coordinates). If given,
translates the skeleton nodes coordinates by the offset ``cube_of_interest_bb[0]`` to
match the coordinate frame of the complete data set volume.
"""
if max_n_jobs is None:
......@@ -91,7 +89,7 @@ def map_myelin_global(max_n_jobs: Optional[int] = None,
multi_params = chunkify(multi_params, max_n_jobs)
# add ssd parameters
multi_params = [(ssv_ids, ssd.version, ssd.version_dict, ssd.working_dir, cube_of_interest_bb)
multi_params = [(ssv_ids, ssd.version, ssd.version_dict, ssd.working_dir)
for ssv_ids in multi_params]
# create SSV skeletons, requires SV skeletons!
......@@ -141,6 +139,7 @@ def run_kimimaro_skelgen(max_n_jobs: Optional[int] = None, map_myelin: bool = Tr
kd = knossosdataset.KnossosDataset()
kd.initialize_from_knossos_path(global_params.config['paths']['kd_seg'])
cd = ChunkDataset()
# TODO: cube_size and overlap should be voxel size dependent
if cube_size is None:
cube_size = np.array([1024, 1024, 512])
overlap = np.array([100, 100, 50])
......@@ -151,7 +150,7 @@ def run_kimimaro_skelgen(max_n_jobs: Optional[int] = None, map_myelin: bool = Tr
# TODO: factor 1/2 must be adapted if anisotropic downsampling is used in KD!
dataset_size = (cube_of_interest_bb[1] - cube_of_interest_bb[0]) // 2
# if later working on mag=2
if np.all(cube_size > dataset_size):
cube_size = dataset_size
......@@ -159,25 +158,22 @@ def run_kimimaro_skelgen(max_n_jobs: Optional[int] = None, map_myelin: bool = Tr
cd.initialize(kd, dataset_size, cube_size, f'{tmp_dir}/cd_tmp_skel/',
box_coords=cube_of_interest_bb[0] // 2, fit_box_size=True)
multi_params = [(cube_size, off, overlap, cube_of_interest_bb) for off in cd.coord_dict]
out_dir = qu.batchjob_script(multi_params, "kimimaroskelgen", log=log, remove_jobfolder=False, n_cores=4)
out_dir = qu.batchjob_script(multi_params, "kimimaroskelgen", log=log, remove_jobfolder=False, n_cores=6)
ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir)
# list of SSV IDs and SSD parameters need to be given to each batch job
path_dic = {ssv_id: [] for ssv_id in ssd.ssv_ids}
# TODO: Each job needs to output a dictionary with IDs. Then create a global dict from these instead of loading
# the entire skeleton output files.
path_dc = {ssv_id: [] for ssv_id in ssd.ssv_ids}
log.info('Cube-wise skeleton generation finished. Generating cells-to-cubes dict.')
for fname in glob.glob(out_dir + '/*_ids.pkl'):
partial_skels = load_pkl2obj(fname)
for cell_id in partial_skels:
path_dic[cell_id].append(fname[:-4] + '.pkl')
res = start_multiprocess_imap(_collect_paths, glob.glob(out_dir + '*_ids.pkl'), nb_cpus=None)
for dc in res:
for k, v in dc.items():
path_dc[k].append(v[:-8] + '.pkl')
pathdict_filepath = f"{tmp_dir}/excube1_path_dict.pkl"
write_obj2pkl(pathdict_filepath, path_dic)
del path_dic
multi_params = ssd.ssv_ids
ssv_sizes = np.array([ssv.size for ssv in ssd.ssvs])
multi_params = chunkify_weighted(multi_params, max_n_jobs, ssv_sizes)
write_obj2pkl(pathdict_filepath, path_dc)
del path_dc
multi_params = chunkify_weighted(ssd.ssv_ids, max_n_jobs, ssd.load_cached_data('size'))
# add ssd parameters needed for merging of skeleton, ssv_ids, path to folder for kzip files
zipname = ("%s/excube1_kimimaro_skels_binaryfillingc100dps4/" % tmp_dir)
......@@ -190,9 +186,15 @@ def run_kimimaro_skelgen(max_n_jobs: Optional[int] = None, map_myelin: bool = Tr
qu.batchjob_script(multi_params, "kimimaromerge", log=log, remove_jobfolder=True, n_cores=2)
if map_myelin:
map_myelin_global(cube_of_interest_bb=cube_of_interest_bb)
map_myelin_global()
shutil.rmtree(tmp_dir)
shutil.rmtree(out_dir + '/../')
log.info('Finished skeleton generation.')
def _collect_paths(p: str) -> dict:
partial_res = load_pkl2obj(p)
res = {cellid: p for cellid in partial_res}
return res
......@@ -1089,16 +1089,9 @@ def _extract_synapse_type_thread(args):
if trafo_dict is not None:
vxl -= trafo_dict[so_id]
vxl = vxl[:, [1, 0, 2]]
# TODO: remove try-except
if global_params.config.syntype_available:
try:
asym_prop = np.mean(kd_asym.from_raw_cubes_to_list(vxl) == asym_label)
sym_prop = np.mean(kd_sym.from_raw_cubes_to_list(vxl) == sym_label)
except:
log_extraction.error("Failed to read raw cubes during synapse type "
"extraction.")
sym_prop = 0
asym_prop = 0
asym_prop = np.mean(kd_asym.from_raw_cubes_to_list(vxl) == asym_label)
sym_prop = np.mean(kd_sym.from_raw_cubes_to_list(vxl) == sym_label)
else:
sym_prop = 0
asym_prop = 0
......
......@@ -75,7 +75,7 @@ def batchjob_script(params: list, name: str,
remove_jobfolder: bool = False,
log: Logger = None, sleep_time: int = 20,
show_progress=True,
overwrite=False):
overwrite=False, max_njobs_parallel: Optional[bool] = None):
"""
Submits batch jobs to process a list of parameters `params` with a python
script on the specified environment (either None, SLURM or QSUB; run
......@@ -113,6 +113,7 @@ def batchjob_script(params: list, name: str,
sleep_time: Sleep duration before checking batch job states again.
show_progress: Only used if ``disabled_batchjob=True``.
overwrite:
max_njobs_parallel: Maximum number of jobs running at the same time.
"""
starttime = datetime.datetime.today().strftime("%m.%d")
# Parameter handling
......@@ -347,10 +348,10 @@ def batchjob_script(params: list, name: str,
p = subprocess.Popen([f'df', '-T {batchjob_folder}'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
stdin=subprocess.PIPE)
output += p.stdout.read()
print(output)
batchjob_folder_old = f"{os.path.dirname(batchjob_folder)}/DEL/{os.path.basename(batchjob_folder)}_DEL"
log_batchjob.warning(f'Deletion of job folder "{batchjob_folder}" was not complete. Moving to '
f'{batchjob_folder_old}. Error: "{str(e)}".\n "lsof {batchjob_folder}": {output}')
f'{batchjob_folder_old}. Error: "{str(e)}".\n "lsof {batchjob_folder}": '
f'{str(output)}')
if os.path.exists(os.path.dirname(batchjob_folder_old)):
shutil.rmtree(os.path.dirname(batchjob_folder_old), ignore_errors=True)
os.makedirs(os.path.dirname(batchjob_folder_old), exist_ok=True)
......
......@@ -414,7 +414,7 @@ def map_subcell_extract_props(kd_seg_path: str, kd_organelle_paths: dict,
if qu.batchjob_enabled():
path_to_out = qu.batchjob_script(
multi_params, "map_subcell_extract_props", n_cores=n_cores)
out_files = glob.glob(path_to_out + "/*")
out_files = glob.glob(path_to_out + "/*.pkl")
for out_file in tqdm.tqdm(out_files, leave=False):
with open(out_file, 'rb') as f:
......@@ -547,7 +547,7 @@ def map_subcell_extract_props(kd_seg_path: str, kd_organelle_paths: dict,
start = time.time()
# create "dummy" IDs which represent each a unique storage path
storage_location_ids = rep_helper.get_unique_subfold_ixs(n_folders_fs_sc)
n_jobs = int(max(2 * global_params.config.ncore_total, len(storage_location_ids) / 10))
n_jobs = int(min(2 * global_params.config.ncore_total, len(storage_location_ids)))
multi_params = [(sv_id_block, n_folders_fs_sc, kd_organelle_paths)
for sv_id_block in basics.chunkify(storage_location_ids, n_jobs)]
if not qu.batchjob_enabled():
......
......@@ -51,23 +51,23 @@ def kimimaro_skelgen(cube_size, cube_offset, overlap, cube_of_interest_bb) -> di
# converting mag 2 units to mag 1 (required by load_seg)
seg = kd.load_seg(size=cube_size*2, offset=np.array(cube_offset)*2, mag=2).swapaxes(0, 2)
seg_cell = np.zeros_like(seg)
# transform IDs to agglomerated SVs
for x in range(seg.shape[0]):
for y in range(seg.shape[1]):
for z in range(seg.shape[2]):
try:
seg_cell[x, y, z] = ssd.mapping_dict_reversed[seg[x, y, z]]
seg[x, y, z] = ssd.mapping_dict_reversed[seg[x, y, z]]
except KeyError:
seg_cell[x, y, z] = 0
seg[x, y, z] = 0
seg_cell = multi_mop_backgroundonly(ndimage.binary_fill_holes, seg_cell, iterations=None)
seg = multi_mop_backgroundonly(ndimage.binary_fill_holes, seg, iterations=None)
if np.all(cube_size < dataset_size) is True:
seg_cell = seg_cell[overlap[0]:-overlap[0], overlap[1]:-overlap[1], overlap[2]:-overlap[2]]
if np.all(cube_size < dataset_size):
seg = seg[overlap[0]:-overlap[0], overlap[1]:-overlap[1], overlap[2]:-overlap[2]]
# kimimaro code
skels = kimimaro.skeletonize(
seg_cell,
seg,
teasar_params={
'scale': 4,
'const': 100, # physical units
......@@ -87,20 +87,14 @@ def kimimaro_skelgen(cube_size, cube_offset, overlap, cube_of_interest_bb) -> di
fix_branching=True, # default True
fix_borders=True, # default True
progress=False, # show progress bar
parallel=1, # <= 0 all cpu, 1 single process, 2+ multiprocess
parallel_chunk_size=100, # how many skeletons to process before updating progress bar
parallel=2, # <= 0 all cpu, 1 single process, 2+ multiprocess
)
for ii in skels:
cell = skels[ii]
for i, v in enumerate(cell.vertices):
c = cell.vertices[i] # already in physical coordinates (nm)
# now add the offset in physical coordinates, both are originally in mag 2
# TODO: the factor 1/2 must be adapted when using anisotropic downsampling of the
# KnossosDataset
c = np.array(c + (cube_offset - cube_of_interest_bb[0] // 2) * kd.scales[1],
dtype=np.int)
cell.vertices[i] = c
# cell.vertices already in physical coordinates (nm)
# now add the offset in physical coordinates, both are originally in mag 2
# TODO: the factor 1/2 must be adapted when using anisotropic downsampling of the
# KnossosDataset
skels[ii].vertices += (cube_offset * kd.scales[1]).astype(np.int)
# cloud_volume docu: " reduce size of skeleton by factor of 2, preserves branch and end
# points" link:https://github.com/seung-lab/cloud-volume/wiki/Advanced-Topic:-Skeleton
# cell = cell.downsample(2)
......
......@@ -551,7 +551,7 @@ def create_sso_skeletons_wrapper(ssvs: List['super_segmentation.SuperSegmentatio
def map_myelin2coords(coords: np.ndarray,
cube_edge_avg: np.ndarray = np.array([21, 21, 11]),
thresh_proba: float = 255 // 2, thresh_majority: float = 0.1,
mag: int = 1, cube_of_interest_bb: Optional[tuple] = None) -> np.ndarray:
mag: int = 1) -> np.ndarray:
"""
Retrieves a myelin prediction at every location in `coords`. The classification
is the majority label within a cube of size `cube_edge_avg` around the
......@@ -595,9 +595,6 @@ def map_myelin2coords(coords: np.ndarray,
``thresh_majority=0.1`` means that 10% myelin voxels within ``cube_edge_avg``
will flag the corresponding locations as myelinated.
mag: Data mag. level used to retrieve the prediction results.
cube_of_interest_bb: Optional bounding box (in mag 1 voxel coordinates). If given,
translates the skeleton nodes coordinates by the offset ``cube_of_interest_bb[0]`` to
match the coordinate frame of the complete data set volume.
Returns:
Myelin prediction (0: no myelin, 1: myelinated neuron) at every coordinate.
......@@ -605,16 +602,12 @@ def map_myelin2coords(coords: np.ndarray,
myelin_kd_p = global_params.config.working_dir + "/knossosdatasets/myelin/"
if not os.path.isdir(myelin_kd_p):
raise ValueError(f'Could not find myelin KnossosDataset at {myelin_kd_p}.')
if cube_of_interest_bb is not None:
cube_of_interest_bb = np.array(cube_of_interest_bb, dtype=np.int)
kd = kd_factory(myelin_kd_p)
myelin_preds = np.zeros((len(coords)), dtype=np.uint8)
n_cube_vx = np.prod(cube_edge_avg)
# convert to mag 1, TODO: requires adaption if anisotropic downsampling was used in KD!
cube_edge_avg = cube_edge_avg * mag
for ix, c in enumerate(coords):
if cube_of_interest_bb is not None:
c += cube_of_interest_bb[0]
offset = c - cube_edge_avg // 2
myelin_proba = kd.load_raw(size=cube_edge_avg, offset=offset, mag=mag).swapaxes(0, 2)
myelin_ratio = np.sum(myelin_proba > thresh_proba) / n_cube_vx
......@@ -1843,8 +1836,7 @@ def view_embedding_of_sso_nocache(sso: 'SuperSegmentationObject', model: 'torch.
and according to given view properties without storing them on the file system. Views will
be predicted with the given `model`. See `predict_views_embedding` in `super_segmentation_object`
for an alternative which uses file-system cached views.
By default, resulting predictions and probabilities are stored as `latent_morph`
and `latent_morph`.
By default, resulting predictions are stored as `latent_morph`.
Args:
sso:
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment