Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Philipp Schubert
SyConn
Commits
f72fd8d0
Commit
f72fd8d0
authored
Aug 12, 2020
by
Philipp Schubert
Browse files
improvements and fixes
parent
ebbaac10
Changes
25
Hide whitespace changes
Inline
Side-by-side
README.md
View file @
f72fd8d0
...
...
@@ -35,7 +35,7 @@ For SyConn v1, please refer to the old [documentation](https://structuralneurobi
The Team
--------
The Synaptic connectivity inference toolkit is currently developed at the Max-Planck-Institute of Neurobiology in Martinsried by
Philipp Schubert,
Maria Kawula, Carl Constantin v. Wedemeyer, Atul Mohite, Gaurav Kuma
r and Joergen Kornfeld.
Philipp Schubert,
Jonathan Klimesch, Alexandra Rothe
r and Joergen Kornfeld.
Acknowledgements
...
...
@@ -49,8 +49,8 @@ used for our project can be found
[
here
](
https://github.com/deepmind/dm_control/blob/30069ac11b60ee71acbd9159547d0bc334d63281/dm_control/_render/pyopengl/egl_ext.py
)
.
Thanks to Julia Kuhl (see http://somedonkey.com/ for more beautiful
work) for designing and creating the logo and to Rangoli Saxena, Mariana
Shumliakivska, Josef Mark, Maria Kawula, Atul Mohite,
Alexandra Roth
er
and Martin Drawitsch
and
for code contributions.
Shumliakivska, Josef Mark, Maria Kawula, Atul Mohite,
Carl Constantin v. Wedemey
er
,
Gaurav Kumar
and Martin Drawitsch for code contributions.
Publications
...
...
docs/syconn.exec.rst
View file @
f72fd8d0
...
...
@@ -12,10 +12,10 @@ syconn.exec.exec\_init module
:undoc-members:
:show-inheritance:
syconn.exec.exec\_
multiview
module
syconn.exec.exec\_
inference
module
----------------------------------------------
.. automodule:: syconn.exec.exec_
multiview
.. automodule:: syconn.exec.exec_
inference
:members:
:undoc-members:
:show-inheritance:
...
...
examples/start.py
View file @
f72fd8d0
...
...
@@ -8,7 +8,6 @@ import os
import
glob
import
shutil
import
sys
import
time
import
argparse
import
numpy
as
np
...
...
@@ -45,11 +44,11 @@ if __name__ == '__main__':
(
'glia'
,
{
'prior_glia_removal'
:
prior_glia_removal
}),
(
'use_point_models'
,
False
),
(
'pyopengl_platform'
,
'egl'
),
# 'osmesa' or 'egl'
(
'batch_proc_system'
,
None
),
# None, 'SLURM' or 'QSUB'
(
'ncores_per_node'
,
2
0
),
(
'mem_per_node'
,
2
495
00
),
(
'ngpus_per_node'
,
1
),
(
'nnodes_total'
,
1
),
(
'batch_proc_system'
,
'SLURM'
),
# None, 'SLURM' or 'QSUB'
(
'ncores_per_node'
,
3
2
),
(
'mem_per_node'
,
2
080
00
),
(
'ngpus_per_node'
,
2
),
(
'nnodes_total'
,
2
),
(
'skeleton'
,
{
'use_kimimaro'
:
False
}),
(
'log_level'
,
log_level
),
# these will be created during synapse type prediction (
...
...
@@ -204,7 +203,7 @@ if __name__ == '__main__':
log
.
info
(
'Step 4/9 - Creating SuperSegmentationDataset'
)
ftimer
.
start
(
'SSD generation'
)
exec_init
.
run_create_neuron_ssd
(
kimimaro
=
global_params
.
config
.
use_kimimaro
)
exec_init
.
run_create_neuron_ssd
()
ftimer
.
stop
()
if
not
(
global_params
.
config
.
use_onthefly_views
or
global_params
.
config
.
use_point_models
):
...
...
scripts/dataset_specific/start_j0251_gce.py
View file @
f72fd8d0
...
...
@@ -9,12 +9,16 @@ import os
import
time
import
numpy
as
np
import
networkx
as
nx
import
shutil
from
syconn.handler.basics
import
FileTimer
from
syconn.handler.config
import
generate_default_conf
,
initialize_logging
from
syconn
import
global_params
from
syconn.exec
import
exec_init
,
exec_syns
,
exec_render
,
exec_dense_prediction
,
exec_inference
# TODO: set myelin knossosdataset - currently mapping of myelin to skeletons does not allow partial cubes
if
__name__
==
'__main__'
:
# ----------------- DEFAULT WORKING DIRECTORY ---------------------
experiment_name
=
'j0251'
...
...
@@ -26,9 +30,10 @@ if __name__ == '__main__':
(
'batch_proc_system'
,
'SLURM'
),
(
'ncores_per_node'
,
32
),
(
'ngpus_per_node'
,
2
),
(
'nnodes_total'
,
5
),
(
'nnodes_total'
,
2
),
(
'mem_per_node'
,
208990
),
(
'use_point_models'
,
True
),
(
'use_point_models'
,
False
),
(
'skeleton'
,
{
'use_kimimaro'
:
True
}),
(
'meshes'
,
{
'use_new_meshing'
:
True
}),
(
'views'
,
{
'use_onthefly_views'
:
True
,
'use_new_renderings_locs'
:
True
,
...
...
@@ -45,6 +50,7 @@ if __name__ == '__main__':
}
)
]
chunk_size
=
(
512
,
512
,
256
)
n_folders_fs
=
10000
n_folders_fs_sc
=
10000
...
...
@@ -69,24 +75,32 @@ if __name__ == '__main__':
vc
=
lambda
x
:
(
x
==
3
).
astype
(
np
.
uint8
),
sj
=
lambda
x
:
(
x
==
2
).
astype
(
np
.
uint8
))
# Prepar
ing
data
# Prepar
e
data
# --------------------------------------------------------------------------
# Setup working directory and logging
shape_j0251
=
np
.
array
([
27119
,
27350
,
15494
])
cube_size
=
np
.
array
([
2048
,
2048
,
1024
])
cube_size
=
np
.
array
([
2048
,
2048
,
1024
])
*
2
cube_offset
=
(
shape_j0251
-
cube_size
)
//
2
cube_of_interest_bb
=
(
cube_offset
,
cube_offset
+
cube_size
)
# cube_of_interest_bb = None # process the entire cube!
working_dir
=
f
"/mnt/example_runs/j0251_off
{
'_'
.
join
(
map
(
str
,
cube_offset
))
}
_size
{
'_'
.
join
(
map
(
str
,
cube_size
))
}
"
log
=
initialize_logging
(
experiment_name
,
log_dir
=
working_dir
+
'/logs/'
)
time_stamps
=
[
time
.
time
()]
step_idents
=
[
't-0'
]
log
.
info
(
'Step 0/9 - Preparation'
)
ftimer
=
FileTimer
(
working_dir
+
'/.timing.pkl'
)
# Preparing config
# currently this is were SyConn looks for the neuron rag
log
.
info
(
'Step 0/9 - Preparation'
)
ftimer
.
start
(
'Preparation'
)
# figure out SyConn data path and copy models to working directory
for
curr_dir
in
[
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
+
'/'
,
os
.
path
.
abspath
(
os
.
path
.
curdir
)
+
'/'
,
os
.
path
.
expanduser
(
'~/SyConn/'
)]:
if
os
.
path
.
isdir
(
curr_dir
+
'/models'
):
break
if
os
.
path
.
isdir
(
curr_dir
+
'/models/'
)
and
not
os
.
path
.
isdir
(
working_dir
+
'/models/'
):
shutil
.
copytree
(
curr_dir
+
'/models'
,
working_dir
+
'/models/'
)
# Prepare config
if
global_params
.
wd
is
not
None
:
log
.
critical
(
'Example run started. Original working directory defined'
' in `global_params.py` '
log
.
critical
(
'Example run started. Original working directory defined in `global_params.py` '
'is overwritten and set to "{}".'
.
format
(
working_dir
))
generate_default_conf
(
working_dir
,
scale
,
syntype_avail
=
syntype_avail
,
kd_seg
=
seg_kd_path
,
kd_mi
=
mi_kd_path
,
...
...
@@ -95,33 +109,35 @@ if __name__ == '__main__':
global_params
.
wd
=
working_dir
os
.
makedirs
(
global_params
.
config
.
temp_path
,
exist_ok
=
True
)
start
=
time
.
time
()
# create symlink to myelin predictions
if
not
os
.
path
.
exists
(
f
'/mnt/j0251_data/myelin
{
working_dir
}
/knossosdatasets/'
):
assert
os
.
path
.
exists
(
'/mnt/j0251_data/myelin'
)
os
.
system
(
f
'ln -s /mnt/j0251_data/myelin
{
working_dir
}
/knossosdatasets/'
)
# check model existence
for
mpath_key
in
[
'mpath_spiness'
,
'mpath_syn_rfc'
,
'mpath_celltype_e3'
,
'mpath_axonsem'
,
'mpath_glia_e3'
,
'mpath_myelin'
,
'mpath_tnet'
]:
for
mpath_key
in
[
'mpath_spiness'
,
'mpath_syn_rfc'
,
'mpath_celltype_e3'
,
'mpath_axonsem'
,
'mpath_glia_e3'
,
'mpath_myelin'
,
'mpath_tnet'
]:
mpath
=
getattr
(
global_params
.
config
,
mpath_key
)
if
not
(
os
.
path
.
isfile
(
mpath
)
or
os
.
path
.
isdir
(
mpath
)):
raise
ValueError
(
'Could not find model "{}". Make sure to copy the'
'
"models" folder into the current
working
'
'directory "{}".'
.
format
(
mpath
,
working_dir
)
)
raise
ValueError
(
'Could not find model "{}". Make sure to copy the
"models" folder into the current
'
'
working directory "{}".'
.
format
(
mpath
,
working
_dir
))
ftimer
.
stop
(
)
# Start SyConn
# --------------------------------------------------------------------------
log
.
info
(
'Finished example cube initialization (shape: {}). Starting'
' SyConn pipeline.'
.
format
(
cube_size
))
log
.
info
(
'Example data will be processed in "{}".'
.
format
(
working_dir
))
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Preparation'
)
log
.
info
(
'Step 1/9 - Predicting sub-cellular structures'
)
# myelin is not needed before `run_create_neuron_ssd`
# ftimer.start('Myelin prediction')
# # myelin is not needed before `run_create_neuron_ssd`
# exec_dense_prediction.predict_myelin(raw_kd_path, cube_of_interest=cube_of_interest_bb)
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Dense predictions'
)
# ftimer.stop()
log
.
info
(
'Step 2/9 - Creating SegmentationDatasets (incl. SV meshes)'
)
ftimer
.
start
(
'SD generation'
)
exec_init
.
init_cell_subcell_sds
(
chunk_size
=
chunk_size
,
n_folders_fs_sc
=
n_folders_fs_sc
,
n_folders_fs
=
n_folders_fs
,
cube_of_interest_bb
=
cube_of_interest_bb
,
load_cellorganelles_from_kd_overlaycubes
=
True
,
...
...
@@ -142,73 +158,63 @@ if __name__ == '__main__':
nx
.
write_edgelist
(
rag_sub_g
,
global_params
.
config
.
init_rag_path
)
exec_init
.
run_create_rag
()
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'SD generation'
)
ftimer
.
stop
()
if
global_params
.
config
.
prior_glia_removal
:
log
.
info
(
'Step 2.5/9 - Glia separation'
)
ftimer
.
start
(
'Glia separation'
)
if
not
global_params
.
config
.
use_point_models
:
exec_render
.
run_glia_rendering
()
exec_inference
.
run_glia_prediction
()
else
:
exec_inference
.
run_glia_prediction_pts
()
exec_inference
.
run_glia_splitting
()
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Glia separation'
)
ftimer
.
stop
()
log
.
info
(
'Step 3/9 - Creating SuperSegmentationDataset'
)
exec_init
.
run_create_neuron_ssd
(
)
time_stamps
.
append
(
time
.
time
()
)
step_idents
.
append
(
'SSD generation'
)
ftimer
.
start
(
'SSD generation'
)
exec_init
.
run_create_neuron_ssd
(
cube_of_interest_bb
=
cube_of_interest_bb
)
ftimer
.
stop
(
)
if
not
(
global_params
.
config
.
use_onthefly_views
or
global_params
.
config
.
use_point_models
):
log
.
info
(
'Step 3.5/9 - Neuron rendering'
)
ftimer
.
start
(
'Neuron rendering'
)
exec_render
.
run_neuron_rendering
()
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Neuron rendering'
)
ftimer
.
stop
()
log
.
info
(
'Step 4/9 - Synapse detection'
)
exec_syns
.
run_syn_generation
(
chunk_size
=
chunk_size
,
n_folders_fs
=
n_folders_fs_sc
)
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Synapse detection'
)
ftimer
.
start
(
'Synapse detection'
)
exec_syns
.
run_syn_generation
(
chunk_size
=
chunk_size
,
n_folders_fs
=
n_folders_fs_sc
,
cube_of_interest_bb
=
cube_of_interest_bb
)
ftimer
.
stop
()
log
.
info
(
'Step 5/9 - Axon prediction'
)
ftimer
.
start
(
'Axon prediction'
)
exec_inference
.
run_semsegaxoness_prediction
()
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Axon prediction'
)
ftimer
.
stop
()
log
.
info
(
'Step 6/9 - Spine prediction'
)
ftimer
.
start
(
'Spine prediction'
)
exec_inference
.
run_semsegspiness_prediction
()
exec_syns
.
run_spinehead_volume_calc
()
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Spine prediction'
)
ftimer
.
stop
()
log
.
info
(
'Step 7/9 - Morphology extraction'
)
ftimer
.
start
(
'Morphology extraction'
)
exec_inference
.
run_morphology_embedding
()
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Morphology extraction'
)
ftimer
.
stop
()
log
.
info
(
'Step 8/9 - Celltype analysis'
)
# exec_inference.run_celltype_prediction(
)
time_stamps
.
append
(
time
.
time
()
)
step_idents
.
append
(
'Celltype analysis'
)
ftimer
.
start
(
'Celltype analysis'
)
exec_inference
.
run_celltype_prediction
()
ftimer
.
stop
(
)
log
.
info
(
'Step 9/9 - Matrix export'
)
ftimer
.
start
(
'Matrix export'
)
exec_syns
.
run_matrix_export
()
time_stamps
.
append
(
time
.
time
())
step_idents
.
append
(
'Matrix export'
)
time_stamps
=
np
.
array
(
time_stamps
)
dts
=
time_stamps
[
1
:]
-
time_stamps
[:
-
1
]
dt_tot
=
time_stamps
[
-
1
]
-
time_stamps
[
0
]
dt_tot_str
=
time
.
strftime
(
"%Hh:%Mmin:%Ss"
,
time
.
gmtime
(
dt_tot
))
time_summary_str
=
f
"
\n
EM data analysis of experiment '
{
experiment_name
}
' finished after
{
dt_tot_str
}
.
\n
"
n_steps
=
len
(
step_idents
[
1
:])
-
1
for
i
in
range
(
len
(
step_idents
[
1
:])):
step_dt
=
time
.
strftime
(
"%Hh:%Mmin:%Ss"
,
time
.
gmtime
(
dts
[
i
]))
step_dt_per
=
int
(
dts
[
i
]
/
dt_tot
*
100
)
step_str
=
'{:<10}{:<25}{:<20}{:<4s}
\n
'
.
format
(
f
'[
{
i
}
/
{
n_steps
}
]'
,
step_idents
[
i
+
1
],
step_dt
,
f
'
{
step_dt_per
}
%'
)
time_summary_str
+=
step_str
ftimer
.
stop
()
time_summary_str
=
ftimer
.
prepare_report
(
experiment_name
)
log
.
info
(
time_summary_str
)
log
.
info
(
'Setting up flask server for inspection. Annotated cell reconstructions and wiring can be analyzed via '
'the KNOSSOS-SyConn plugin at `SyConn/scripts/kplugin/syconn_knossos_viewer.py`.'
)
...
...
scripts/point_party/eval_cmpt.py
0 → 100755
View file @
f72fd8d0
import
os
from
tqdm
import
tqdm
import
numpy
as
np
from
syconn.reps.super_segmentation
import
SuperSegmentationObject
,
SuperSegmentationDataset
from
morphx.classes.pointcloud
import
PointCloud
if
__name__
==
'__main__'
:
wd
=
os
.
path
.
expanduser
(
'~/SyConn/example_cube3/'
)
save_path
=
os
.
path
.
expanduser
(
'~/thesis/current_work/paper/pipeline_tests/example_cube3/'
)
ssd
=
SuperSegmentationDataset
(
wd
)
total_verts
=
None
total_labels
=
None
for
ix
in
tqdm
(
range
(
len
(
ssd
.
ssv_ids
))):
id
=
ssd
.
ssv_ids
[
ix
]
sso
=
SuperSegmentationObject
(
id
)
verts
=
sso
.
mesh
[
1
].
reshape
((
-
1
,
3
))
ads
=
sso
.
label_dict
()[
'ads'
]
abt
=
sso
.
label_dict
()[
'abt'
]
dnh
=
sso
.
label_dict
()[
'dnh'
]
a_mask
=
(
ads
==
1
).
reshape
(
-
1
)
d_mask
=
(
ads
==
0
).
reshape
(
-
1
)
abt
[
abt
==
0
]
=
3
abt
[
abt
==
2
]
=
4
dnh
[
dnh
==
1
]
=
5
dnh
[
dnh
==
2
]
=
6
ads
[
a_mask
]
=
abt
[
a_mask
]
ads
[
d_mask
]
=
dnh
[
d_mask
]
if
ix
%
10
==
0
:
pc
=
PointCloud
(
vertices
=
verts
,
labels
=
ads
)
pc
.
save2pkl
(
save_path
+
f
"
{
id
}
.pkl"
)
if
total_verts
is
None
:
total_verts
=
verts
total_labels
=
ads
else
:
total_verts
=
np
.
concatenate
((
total_verts
,
verts
))
total_labels
=
np
.
concatenate
((
total_labels
,
ads
))
total_pc
=
PointCloud
(
vertices
=
total_verts
,
labels
=
total_labels
)
total_pc
.
save2pkl
(
save_path
+
"total.pkl"
)
scripts/point_party/eval_cmpt_j0126.py
0 → 100755
View file @
f72fd8d0
import
time
from
syconn
import
global_params
from
syconn.exec
import
exec_inference
if
__name__
==
'__main__'
:
global_params
.
wd
=
"/wholebrain/songbird/j0126/areaxfs_v6/"
global_params
.
config
[
'use_point_models'
]
=
True
global_params
.
config
[
'ncores_per_node'
]
=
10
global_params
.
config
[
'ngpus_per_node'
]
=
1
global_params
.
config
[
'nnodes_total'
]
=
1
global_params
.
config
[
'log_level'
]
=
'DEBUG'
global_params
.
config
[
'batch_proc_system'
]
=
None
start
=
time
.
time
()
exec_inference
.
run_semsegaxoness_prediction
()
print
(
time
.
time
()
-
start
)
syconn/batchjob_scripts/batchjob_calculate_spinehead_volume.py
View file @
f72fd8d0
...
...
@@ -26,9 +26,11 @@ sso_ids = args[0]
ssd
=
SuperSegmentationDataset
()
for
sso
in
ssd
.
get_super_segmentation_object
(
sso_ids
):
assert
sso
.
load_skeleton
(),
f
"Skeleton of SSO
{
sso
.
id
}
does not exist."
# if 'spinehead_vol' in sso.skeleton:
# continue
extract_spinehead_volume_mesh
(
sso
)
# TODO: remove try-except
try
:
extract_spinehead_volume_mesh
(
sso
)
except
Exception
:
raise
()
sso
.
save_skeleton
()
with
open
(
path_out_file
,
"wb"
)
as
f
:
...
...
syconn/batchjob_scripts/
QSUB
_kimimaromerge.py
→
syconn/batchjob_scripts/
batchjob
_kimimaromerge.py
View file @
f72fd8d0
import
sys
import
pickle
as
pkl
import
numpy
as
np
from
syconn.handler.basics
import
load_pkl2obj
try
:
import
cPickle
as
pkl
except
ImportError
:
import
pickle
as
pkl
from
syconn.proc.skeleton
import
kimimaro_mergeskels
,
kimimaro_skels_tokzip
from
syconn
import
global_params
from
syconn.reps.super_segmentation_object
import
SuperSegmentationObject
import
numpy
as
np
path_storage_file
=
sys
.
argv
[
1
]
path_out_file
=
sys
.
argv
[
2
]
...
...
@@ -24,6 +21,7 @@ working_dir = global_params.config.working_dir
scaling
=
global_params
.
config
[
"scaling"
]
path2results_dc
,
ssv_ids
,
zipname
=
args
results_dc
=
load_pkl2obj
(
path2results_dc
)
for
ssv_id
in
ssv_ids
:
ssv_id
=
int
(
ssv_id
)
combined_skel
,
degree_dict
,
neighbour_dict
=
kimimaro_mergeskels
(
results_dc
[
ssv_id
],
ssv_id
)
...
...
@@ -32,13 +30,17 @@ for ssv_id in ssv_ids:
sso
.
skeleton
[
"neighbours"
]
=
neighbour_dict
if
combined_skel
.
vertices
.
size
>
0
:
sso
.
skeleton
[
"nodes"
]
=
combined_skel
.
vertices
/
scaling
# to fit voxel coordinates
# get radius in pseudo-voxel units
sso
.
skeleton
[
"diameters"
]
=
(
combined_skel
.
radii
/
scaling
[
0
])
*
2
# divide by x scale
# kimimaro_skels_tokzip(combined_skel, ssv_id, zipname)
sso
.
skeleton
[
"edges"
]
=
combined_skel
.
edges
sso
.
skeleton
[
"degree"
]
=
degree_dict
else
:
sso
.
skeleton
[
"nodes"
]
=
combined_skel
.
vertices
sso
.
skeleton
[
"diameters"
]
=
combined_skel
.
radii
sso
.
skeleton
[
"edges"
]
=
combined_skel
.
edges
sso
.
skeleton
[
"degree"
]
=
degree_dict
sso
.
skeleton
[
"nodes"
]
=
np
.
array
([
sso
.
rep_coord
],
dtype
=
np
.
float32
)
sso
.
skeleton
[
"diameters"
]
=
np
.
zeros
((
1
,
),
dtype
=
np
.
float32
)
sso
.
skeleton
[
"edges"
]
=
np
.
array
([[
0
,
0
],
],
dtype
=
np
.
float32
)
sso
.
skeleton
[
"degree"
]
=
{
0
:
0
}
sso
.
save_skeleton
()
with
open
(
path_out_file
,
"wb"
)
as
f
:
...
...
syconn/batchjob_scripts/
QSUB
_kimimaroskelgen.py
→
syconn/batchjob_scripts/
batchjob
_kimimaroskelgen.py
View file @
f72fd8d0
import
sys
try
:
import
cPickle
as
pkl
except
ImportError
:
import
pickle
as
pkl
import
pickle
as
pkl
from
syconn.proc.skeleton
import
kimimaro_skelgen
path_storage_file
=
sys
.
argv
[
1
]
...
...
@@ -16,8 +12,11 @@ with open(path_storage_file, 'rb') as f:
args
.
append
(
pkl
.
load
(
f
))
except
EOFError
:
break
cube_size
,
cube_offset
,
overlap
,
boundary
=
args
skels
=
kimimaro_skelgen
(
cube_size
,
cube_offset
,
overlap
,
boundary
)
cube_size
,
cube_offset
,
overlap
,
cube_of_interest_bb
=
args
skels
=
kimimaro_skelgen
(
cube_size
,
cube_offset
,
overlap
,
cube_of_interest_bb
)
with
open
(
path_out_file
[:
-
4
]
+
'_ids.pkl'
,
"wb"
)
as
f
:
pkl
.
dump
(
list
(
skels
.
keys
()),
f
)
with
open
(
path_out_file
,
"wb"
)
as
f
:
pkl
.
dump
(
skels
,
f
)
...
...
syconn/batchjob_scripts/
QSUB
_map_myelin2skel.py
→
syconn/batchjob_scripts/
batchjob
_map_myelin2skel.py
View file @
f72fd8d0
...
...
@@ -6,11 +6,8 @@
# Authors: Sven Dorkenwald, Philipp Schubert, Jörgen Kornfeld
import
sys
import
pickle
as
pkl
try
:
import
cPickle
as
pkl
except
ImportError
:
import
pickle
as
pkl
from
syconn.reps.super_segmentation_helper
import
map_myelin2coords
,
majorityvote_skeleton_property
from
syconn.reps.super_segmentation
import
SuperSegmentationDataset
from
syconn
import
global_params
...
...
@@ -35,10 +32,12 @@ ssd = SuperSegmentationDataset(working_dir=working_dir, version=version,
version_dict
=
version_dict
)
for
ssv
in
ssd
.
get_super_segmentation_object
(
ssv_ids
):
ssv
.
load_skeleton
()
ssv
.
skeleton
[
"myelin"
]
=
map_myelin2coords
(
ssv
.
skeleton
[
"nodes"
],
mag
=
4
)
try
:
ssv
.
skeleton
[
"myelin"
]
=
map_myelin2coords
(
ssv
.
skeleton
[
"nodes"
],
mag
=
4
)
except
Exception
:
raise
()
majorityvote_skeleton_property
(
ssv
,
prop_key
=
'myelin'
,
max_dist
=
global_params
.
config
[
'compartments'
][
'dist_axoness_averaging'
])
ssv
,
prop_key
=
'myelin'
,
max_dist
=
global_params
.
config
[
'compartments'
][
'dist_axoness_averaging'
])
ssv
.
save_skeleton
()
with
open
(
path_out_file
,
"wb"
)
as
f
:
...
...
syconn/batchjob_scripts/
QSUB
_map_subcell_extract_props.py
→
syconn/batchjob_scripts/
batchjob
_map_subcell_extract_props.py
View file @
f72fd8d0
...
...
@@ -6,11 +6,7 @@
# Authors: Philipp Schubert, Jörgen Kornfeld
import
sys
try
:
import
cPickle
as
pkl
except
ImportError
:
import
pickle
as
pkl
import
pickle
as
pkl
from
syconn.proc
import
sd_proc
path_storage_file
=
sys
.
argv
[
1
]
...
...
syconn/batchjob_scripts/batchjob_predict_axoness_semseg.py
View file @
f72fd8d0
...
...
@@ -38,7 +38,7 @@ view_props = global_params.config['compartments']['view_properties_semsegax']
if
global_params
.
config
.
use_point_models
:
# TODO: move mpath to global params
ssd_kwargs
=
dict
(
working_dir
=
global_params
.
config
.
working_dir
)
predict_cmpt_ssd
(
ssd_kwargs
=
ssd_kwargs
,
ssv_ids
=
ch
,
mpath
=
'~/thesis/current_work/paper/test_models/'
)
predict_cmpt_ssd
(
ssd_kwargs
=
ssd_kwargs
,
ssv_ids
=
ch
,
mpath
=
'~/thesis/current_work/paper/test_models/'
,
bs
=
2
)
else
:
n_worker
=
2
params
=
[(
ch_sub
,
view_props
,
ncpus
,
map_properties
,
...
...
syconn/cnn/TrainData.py
View file @
f72fd8d0
...
...
@@ -61,7 +61,7 @@ if elektronn3_avail:
"""
def
__init__
(
self
,
ssd_kwargs
=
None
,
npoints
=
20000
,
transform
:
Callable
=
Identity
(),
train
=
True
,
cv_val
=
0
,
cellshape_only
=
False
,
ctx_size
=
20000
,
use_syntype
=
True
,
onehot
=
True
,
batch_size
=
1
):
onehot
=
True
,
batch_size
=
1
,
map_myelin
:
bool
=
False
):
"""
Args:
...
...
@@ -93,6 +93,7 @@ if elektronn3_avail:
self
.
ssd
=
ssd
self
.
sso_ids
=
None
gt_dir
=
ssd
.
path
self
.
map_myelin
=
map_myelin
log_cnn
.
info
(
f
'Set
{
ssd
}
as GT source.'
)
split_dc_path
=
f
'
{
gt_dir
}
/ctgt_v4_splitting_cv0_10fold.pkl'
...
...
@@ -188,7 +189,7 @@ if elektronn3_avail:
sso_id
,
(
sample_feats
,
sample_pts
)
=
[
*
pts_loader_scalar
(
self
.
ssd_kwargs
,
[
self
.
sso_ids
[
item
],
]
*
2
,
self
.
_batch_size
*
2
,
self
.
num_pts
,
transform
=
self
.
transform
,
ctx_size
=
self
.
ctx_size
,
train
=
True
,
draw_local
=
True
,
cache
=
False
)][
0
]
train
=
True
,
draw_local
=
True
,
cache
=
False
,
map_myelin
=
self
.
map_myelin
)][
0
]
else
:
sso_id
,
(
sample_feats
,
sample_pts
)
=
[
*
pts_loader_scalar
(
self
.
ssd_kwargs
,
[
self
.
sso_ids
[
item
],
],
self
.
_batch_size
,
...
...
syconn/exec/exec_inference.py
View file @
f72fd8d0
...
...
@@ -15,7 +15,8 @@ import numpy as np
from
syconn
import
global_params
from
syconn.handler.basics
import
chunkify
from
syconn.handler.config
import
initialize_logging
from
syconn.handler.prediction_pts
import
predict_glia_ssv
,
predict_celltype_ssd
,
infere_cell_morphology_ssd
from
syconn.handler.prediction_pts
import
predict_glia_ssv
,
predict_celltype_ssd
,
infere_cell_morphology_ssd
,
\
predict_cmpt_ssd
from
syconn.mp
import
batchjob_utils
as
qu
from
syconn.proc.glia_splitting
import
qsub_glia_splitting
,
collect_glia_sv
,
write_glia_rag
,
transform_rag_edgelist2pkl
from
syconn.proc.graphs
import
create_ccsize_dict
...
...
@@ -39,7 +40,7 @@ def run_morphology_embedding(max_n_jobs: Optional[int] = None):
:func:`~syconn.exec.skeleton.run_skeleton_generation`.
"""
if
max_n_jobs
is
None
:
max_n_jobs
=
global_params
.
config
.
ngpu_total
*
2
max_n_jobs
=
global_params
.
config
.
ngpu_total
*
4
log
=
initialize_logging
(
'morphology_embedding'
,
global_params
.
config
.
working_dir
+
'/logs/'
,
overwrite
=
False
)
ssd
=
SuperSegmentationDataset
(
working_dir
=
global_params
.
config
.
working_dir
)
...
...
@@ -75,7 +76,7 @@ def run_celltype_prediction(max_n_jobs_gpu: Optional[int] = None):
Requires :func:`~syconn.exec.exec_init.run_create_neuron_ssd` and :func:`~run_neuron_rendering`.
"""
if
max_n_jobs_gpu
is
None
:
max_n_jobs_gpu
=
global_params
.
config
.
ngpu_total
*
3
if
qu
.
batchjob_enabled
()
else
1
max_n_jobs_gpu
=
global_params
.
config
.
ngpu_total
*
4
if
qu
.
batchjob_enabled
()
else
1
log
=
initialize_logging
(
'celltype_prediction'
,
global_params
.
config
.
working_dir
+
'/logs/'
,
overwrite
=
False
)
ssd
=
SuperSegmentationDataset
(
working_dir
=
global_params
.
config
.
working_dir
)
...
...
@@ -114,7 +115,7 @@ def run_semsegaxoness_prediction(max_n_jobs_gpu: Optional[int] = None):
"""
if
max_n_jobs_gpu
is
None
:
max_n_jobs_gpu
=
global_params
.
config
.
ngpu_total
*
10
if
qu
.
batchjob_enabled
()
else
1
max_n_jobs_gpu
=
global_params
.
config
.
ngpu_total
*
4
if
qu
.
batchjob_enabled
()
else
1
if
qu
.
batchjob_enabled
():
n_cores
=
global_params
.
config
[
'ncores_per_node'
]
//
global_params
.
config
[
'ngpus_per_node'
]
else
:
...
...
@@ -125,16 +126,20 @@ def run_semsegaxoness_prediction(max_n_jobs_gpu: Optional[int] = None):
np
.
random
.
seed
(
0
)
multi_params
=
ssd
.
ssv_ids
np
.
random
.
shuffle
(
multi_params
)
multi_params
=
chunkify
(
multi_params
,
max_n_jobs_gpu
)
# job parameter will be read sequentially, i.e. in order to provide only
# one list as parameter one needs an additonal axis
multi_params
=
[(
ixs
,)
for
ixs
in
multi_params
]
path_to_out
=
qu
.
batchjob_script
(
multi_params
,
'predict_axoness_semseg'
,
log
=
log
,
suffix
=
""
,
additional_flags
=
"--gres=gpu:1"
,
n_cores
=
n_cores
,
remove_jobfolder
=
False
)
log
.
info
(
f
'Finished prediction of
{
len
(
ssd
.
ssv_ids
)
}
SSVs.'
)
shutil
.
rmtree
(
os
.
path
.
abspath
(
path_to_out
+
"/../"
),
ignore_errors
=
True
)
if
not
qu
.
batchjob_enabled
():
ssd_kwargs
=
dict
(
working_dir
=
global_params
.
config
.
working_dir
)
predict_cmpt_ssd
(
ssd_kwargs
=
ssd_kwargs
,
ssv_ids
=
multi_params
,
bs
=
2
)
else
:
multi_params
=
chunkify
(
multi_params
,
max_n_jobs_gpu
)
# job parameter will be read sequentially, i.e. in order to provide only
# one list as parameter one needs an additonal axis
multi_params
=
[(
ixs
,)
for
ixs
in
multi_params
]
path_to_out
=
qu
.
batchjob_script
(
multi_params
,
'predict_axoness_semseg'
,
log
=
log
,
suffix
=
""
,
additional_flags
=
"--gres=gpu:1"
,
n_cores
=
n_cores
,
remove_jobfolder
=
False
)
log
.
info
(
f
'Finished prediction of
{
len
(
ssd
.
ssv_ids
)
}
SSVs.'
)
shutil
.
rmtree
(
os
.
path
.
abspath
(
path_to_out
+
"/../"
),
ignore_errors
=
True
)
def
run_semsegspiness_prediction
(
max_n_jobs_gpu
:
Optional
[
int
]
=
None
):
...
...
@@ -145,7 +150,7 @@ def run_semsegspiness_prediction(max_n_jobs_gpu: Optional[int] = None):
max_n_jobs_gpu: Number of parallel GPU jobs. Used for the inference.
"""
if
max_n_jobs_gpu
is
None
:
max_n_jobs_gpu
=
global_params
.
config
.
ngpu_total
*
10
if
qu
.
batchjob_enabled
()
else
1
max_n_jobs_gpu
=
global_params
.
config
.
ngpu_total
*
4
if
qu
.
batchjob_enabled
()
else
1
log
=
initialize_logging
(
'compartment_prediction'
,
global_params
.
config
.
working_dir
+
'/logs/'
,
overwrite
=
False
)
ssd
=
SuperSegmentationDataset
(
working_dir
=
global_params
.
config
.
working_dir
)
...
...
@@ -174,7 +179,7 @@ def run_glia_prediction_pts(max_n_jobs_gpu: Optional[int] = None):
Requires :func:`~syconn.exec_init.init_cell_subcell_sds`.
"""
if
max_n_jobs_gpu
is
None
: