Commit 58973e94 authored by pschubert's avatar pschubert
Browse files

added `version_sd` and `version_dict_sd` kwargs to `combine_voxels`, minor refinements

parent 624b519e
......@@ -918,7 +918,7 @@ def _extract_voxels_thread(args):
return map_dict
def combine_voxels(workfolder, hdf5names,
def combine_voxels(workfolder, hdf5names, version_sd=0, version_dict_sd=None,
n_folders_fs=10000, stride=10, nb_cpus=1,
qsub_pe=None, qsub_queue=None, n_max_co_processes=None):
"""
......@@ -954,12 +954,13 @@ def combine_voxels(workfolder, hdf5names,
segdataset = segmentation.SegmentationDataset(obj_type=hdf5_name,
working_dir=workfolder,
version="new",
create=True,
version=version_sd,
create=True, version_dict=version_dict_sd,
n_folders_fs=n_folders_fs)
for p in voxel_rel_paths_2stage:
os.makedirs(segdataset.so_storage_path + p)
if not os.path.isdir(segdataset.so_storage_path + p):
os.makedirs(segdataset.so_storage_path + p)
multi_params = []
path_blocks = np.array_split(np.array(voxel_rel_paths),
......@@ -992,7 +993,7 @@ def combine_voxels(workfolder, hdf5names,
multi_params.append([workfolder, hdf5_name, path_block,
path_block_dicts, segdataset.version,
n_folders_fs])
n_folders_fs, version_dict_sd])
if qsub_pe is None and qsub_queue is None:
results = sm.start_multiprocess_imap(_combine_voxels_thread,
......@@ -1017,12 +1018,13 @@ def _combine_voxels_thread(args):
path_block_dicts = args[3]
dataset_version = args[4]
n_folders_fs = args[5]
version_dict_sd = args[6]
dataset_temp_path = workfolder + "/%s_temp/" % hdf5_name
segdataset = segmentation.SegmentationDataset(
obj_type=hdf5_name, working_dir=workfolder, version=dataset_version,
n_folders_fs=n_folders_fs)
n_folders_fs=n_folders_fs, version_dict=version_dict_sd)
for i_voxel_rel_path, voxel_rel_path in enumerate(voxel_rel_paths):
......
......@@ -432,7 +432,7 @@ def from_probabilities_to_objects_parameter_sweeping(cset,
def from_ids_to_objects(cset, filename, hdf5names=None, n_folders_fs=10000,
overlaydataset_path=None, chunk_list=None, offset=None,
size=None, suffix="", qsub_pe=None, qsub_queue=None, qsub_slots=None,
n_max_co_processes=None, n_chunk_jobs=5000):
n_max_co_processes=None, n_chunk_jobs=5000, version_dict_sd=None):
"""
Main function for the object extraction step; combines all needed steps
Parameters
......@@ -494,13 +494,13 @@ def from_ids_to_objects(cset, filename, hdf5names=None, n_folders_fs=10000,
all_times.append(time.time() - time_start)
step_names.append("voxel extraction")
print("\nTime needed for extracting voxels: %.3fs" % all_times[-1])
#
# # --------------------------------------------------------------------------
#
time_start = time.time()
oes.combine_voxels(os.path.dirname(cset.path_head_folder.rstrip("/")),
hdf5names, qsub_pe=qsub_pe, qsub_queue=qsub_queue,
n_folders_fs=n_folders_fs,
n_folders_fs=n_folders_fs, version_dict_sd=version_dict_sd,
n_max_co_processes=n_max_co_processes)
all_times.append(time.time() - time_start)
step_names.append("combine voxels")
......
......@@ -380,8 +380,8 @@ def data2kzip(kzip_path, fpaths, fnames_in_zip=None, force_overwrite=True,
verbose : bool
force_overwrite : bool
"""
if not force_overwrite:
raise NotImplementedError('Currently modification of data in existing kzip is not implemented.')
# if not force_overwrite:
# raise NotImplementedError('Currently modification of data in existing kzip is not implemented.')
nb_files = len(fpaths)
if verbose:
log_handler.info('Writing {} files to .zip.'.format(nb_files))
......
......@@ -327,7 +327,7 @@ def create_h5_gt_file(fname, raw, label, foreground_ids=None, debug=False):
raw = xyz2zxy(raw)
print("Raw:", raw.shape, raw.dtype, raw.min(), raw.max())
print("Label:", label.shape, label.dtype, label.min(), label.max())
print("-----------------\nGT Summary:\n%s\n" %str(Counter(label.flatten()).items()))
print("-----------------\nGT Summary:\n%s\n" % str(Counter(label.flatten()).items()))
if not fname[-2:] == "h5":
fname = fname + ".h5"
if debug:
......@@ -433,11 +433,11 @@ def predict_dataset(kd_p, kd_pred_p, cd_p, model_p, imposed_patch_size=None,
"""
if isinstance(gpu_ids, int) or len(gpu_ids) == 1:
_pred_dataset(kd_p, kd_pred_p, cd_p, model_p, imposed_patch_size,
mfp_active, gpu_ids, overwrite)
mfp_active, gpu_ids, overwrite)
else:
print("Starting multi-gpu prediction with GPUs:", gpu_ids)
_multi_gpu_ds_pred(kd_p, kd_pred_p, cd_p, model_p,imposed_patch_size, gpu_ids)
_multi_gpu_ds_pred(kd_p, kd_pred_p, cd_p, model_p, imposed_patch_size, gpu_ids)
def _pred_dataset(kd_p, kd_pred_p, cd_p, model_p, imposed_patch_size=None,
......@@ -464,7 +464,7 @@ def _pred_dataset(kd_p, kd_pred_p, cd_p, model_p, imposed_patch_size=None,
the GPU used
overwrite : bool
True: fresh predictions ; False: earlier prediction continues
Returns
-------
......@@ -515,7 +515,7 @@ def _pred_dataset(kd_p, kd_pred_p, cd_p, model_p, imposed_patch_size=None,
if n is None:
kd_pred = KnossosDataset()
kd_pred.initialize_without_conf(kd_pred_p, kd.boundary, kd.scale,
kd.experiment_name, mags=[1,2,4,8])
kd.experiment_name, mags=[1, 2, 4, 8])
cd.export_cset_to_kd(kd_pred, "pred", ["pred"], [4, 4], as_raw=True,
stride=[256, 256, 256])
......@@ -537,7 +537,7 @@ def to_knossos_dataset(kd_p, kd_pred_p, cd_p, model_p, imposed_patch_size,mfp_ac
cd.initialize(kd, kd.boundary, [512, 512, 256], cd_p, overlap=offset,
box_coords=np.zeros(3), fit_box_size=True)
kd_pred.initialize_without_conf(kd_pred_p, kd.boundary, kd.scale,
kd.experiment_name, mags=[1,2,4,8])
kd.experiment_name, mags=[1, 2, 4, 8])
cd.export_cset_to_kd(kd_pred, "pred", ["pred"], [4, 4], as_raw=True,
stride=[256, 256, 256])
......@@ -607,6 +607,7 @@ class NeuralNetworkInterface(object):
Inference class for elektronn2 models, support will end at some point.
Switching to 'InferenceModel' in elektronn3.model.base in the long run.
"""
def __init__(self, model_path, arch='marvin', imposed_batch_size=1,
channels_to_load=(0, 1, 2, 3), normal=False, nb_labels=2,
normalize_data=False, normalize_func=None, init_gpu=None):
......@@ -666,7 +667,7 @@ class NeuralNetworkInterface(object):
cnt += 1
pbar.update()
x_b = x[b]
proba[b] = self.model.predict(x_b)[None, ]
proba[b] = self.model.predict(x_b)[None,]
overhead = len(x) % bs
# TODO: add proper axis handling, maybe introduce axistags
if overhead != 0:
......@@ -680,8 +681,8 @@ class NeuralNetworkInterface(object):
end = time.time()
sys.stdout.write("\r%0.2f\n" % 1.0)
sys.stdout.flush()
print("Prediction of %d samples took %0.2fs; %0.4fs/sample." %\
(len(x), end-start, (end-start)/len(x)))
print("Prediction of %d samples took %0.2fs; %0.4fs/sample." % \
(len(x), end - start, (end - start) / len(x)))
pbar.close()
if self.arch == "triplet":
return proba[..., 0]
......@@ -865,7 +866,6 @@ def _multi_gpu_ds_pred(kd_p, kd_pred_p, cd_p, model_p,
def start_partial_pred(kd_p, kd_pred_p, cd_p, model_p, imposed_patch_size,
gpuid, i, n):
fpath = os.path.dirname(os.path.abspath(__file__))
path, file = os.path.split(os.path.dirname(fpath))
cmd = "python {0}/syconn/handler/partial_ds_pred.py {1} {2} {3} {4}" \
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment