diff --git a/src/soap/SOAPRC.in b/src/soap/SOAPRC.in
index 8352beb59f9332ae5a09cd3b380db902748a46c3..6aa354f740d6b25bc952c43204b491dbdee11255 100644
--- a/src/soap/SOAPRC.in
+++ b/src/soap/SOAPRC.in
@@ -6,6 +6,7 @@ test "$is_csh" = 101 && goto CSH
 export PYTHONPATH="${PYTHONPATH}:@CMAKE_INSTALL_PREFIX@"
 export SOAP_ROOT="@CMAKE_INSTALL_PREFIX@"
 export LD_LIBRARY_PATH="{LD_LIBRARY_PATH}:@CMAKE_INSTALL_PREFIX@/soap"
+return
 
 # CSH/TCSH
 CSH:
diff --git a/src/soap/__init__.py b/src/soap/__init__.py
index b34ff962953e77482c65b5a0905cd45636feb879..bae475953d7be9cca073e43056017965fcb7d3e5 100644
--- a/src/soap/__init__.py
+++ b/src/soap/__init__.py
@@ -1,4 +1,5 @@
 from _soapxx import *
 from linalg import *
 from . import soapy
+from . import tools
 
diff --git a/src/soap/soapy/CMakeLists.txt b/src/soap/soapy/CMakeLists.txt
index 03e2dea3441ebdbe3eb49916ac11597659404306..382427cdc8d344f7ac8369b4ec6c51686db86715 100644
--- a/src/soap/soapy/CMakeLists.txt
+++ b/src/soap/soapy/CMakeLists.txt
@@ -1,2 +1,2 @@
-install(FILES __init__.py elements.py util.py npthreads.py kernel.py simspace.py pca.py dimred.py lagraph.py lamatch.py math.py momo.py DESTINATION ${LOCAL_INSTALL_DIR}/soapy)
+install(FILES __init__.py learn.py elements.py util.py npthreads.py kernel.py simspace.py pca.py dimred.py lagraph.py lamatch.py math.py momo.py DESTINATION ${LOCAL_INSTALL_DIR}/soapy)
 
diff --git a/src/soap/soapy/__init__.py b/src/soap/soapy/__init__.py
index 01c9db61f22e82077147abc607efdc12f79f863e..c4612746260622731767e2b0f7ef9834157d0a2c 100644
--- a/src/soap/soapy/__init__.py
+++ b/src/soap/soapy/__init__.py
@@ -5,4 +5,5 @@ from pca import *
 from util import *
 from math import *
 from elements import *
+import learn
 
diff --git a/src/soap/soapy/kernel.py b/src/soap/soapy/kernel.py
index ed85654d7f9c6d27bf2a1c8e762931a96eabaaef..c13bfdbca2c8c69167ca9934e86942cbdf99d4f3 100644
--- a/src/soap/soapy/kernel.py
+++ b/src/soap/soapy/kernel.py
@@ -162,24 +162,37 @@ class KernelAdaptorSpecificUnique(object):
         self.types = types_global
         self.S = len(types_global)
         return
-    def adapt(self, spectrum):
-        IX = np.zeros((0.,0.), dtype='float64')
+    def adapt(self, spectrum, return_pos_matrix=False):
+        IX = np.zeros((0,0), dtype='float64') # feature matrix
         dimX = -1
+        IR = np.zeros((0,0), dtype='float64') # position matrix
+        types = []
         for atomic_i in spectrum:
             Xi_unnorm, Xi_norm = self.adaptScalar(atomic_i)
+            Ri = atomic_i.getCenter().pos
+            types.append(atomic_i.getCenter().type)
             dimX = Xi_norm.shape[0]
             if not IX.any():
                 IX = np.copy(Xi_norm) # TODO Is this necessary?
                 IX.resize((1, dimX))
+                IR = np.copy(Ri)
+                IR.resize((1, 3))
             else:
                 i = IX.shape[0]
                 IX.resize((i+1, dimX))
                 IX[-1,:] = Xi_norm
-            #print IX
-        return IX
-    def adaptScalar(self, atomic):
+                IR.resize((i+1, 3))
+                IR[-1,:] = Ri
+        if return_pos_matrix:
+            return IX, IR, types
+        else:
+            return IX
+    def adaptScalar(self, atomic, epsilon=1e-20):
         X = reduce_xnklab_atomic(atomic, self.types)
-        X_norm = X/np.dot(X,X)**0.5
+        X_mag = np.dot(X,X)**0.5
+        if X_mag < epsilon: 
+            X_mag = 1.
+        X_norm = X/X_mag
         return X, X_norm
 
 class KernelAdaptorSpecific(object):
@@ -203,7 +216,6 @@ class KernelAdaptorSpecific(object):
                 i = IX.shape[0]
                 IX.resize((i+1, dimX))
                 IX[-1,:] = Xi_norm
-            #print IX
         return IX
     def adaptScalar(self, atomic):
         xnklab_atomic = Xnklab(atomic, self.types)
diff --git a/src/soap/soapy/lagraph.py b/src/soap/soapy/lagraph.py
index a229eb3d94b4d7071a7344c8778edd3837a687b1..b0f7b01a4b61dcd92b1396b47ddfa886f1842790 100644
--- a/src/soap/soapy/lagraph.py
+++ b/src/soap/soapy/lagraph.py
@@ -680,10 +680,7 @@ class ParticleGraph(object):
                 #print z, z_idx
             P = np.array(ix)
             dim = P.shape[1]
-            if options_soap.get('kernel.adaptor') in [ 'global-generic', 'global-specific' ]:
-                pass
-            else:
-                assert P.shape[0] == n_atoms 
+            assert P.shape[0] == n_atoms 
             #print P.dot(P.T)
         elif descriptor_type == 'npy_load':
             folder = options_descriptor["folder"]
diff --git a/src/soap/soapy/learn.py b/src/soap/soapy/learn.py
new file mode 100644
index 0000000000000000000000000000000000000000..623228860e3ebb87414b7ff437c9d32cea515b84
--- /dev/null
+++ b/src/soap/soapy/learn.py
@@ -0,0 +1,66 @@
+import numpy as np
+
+def subsample_array(array, n_select, method='stride', stride_shift=0):
+    # Number of data items; selected, discarded items
+    if type(array) == list:
+        n_data = len(array)
+    else:
+        n_data = array.shape[0]
+    n_discard = n_data - n_select
+    assert n_discard >= 0 # Trying to select more data than available?
+    # Subsample
+    if method == 'random':
+        # Random access indices
+        idcs = np.arange(0, n_data)
+        np.random.shuffle(idcs)
+        # Create selections
+        if type(array) == list:
+            array_select = []
+            array_discard = []
+            for i in range(n_data):
+                if i < n_select:
+                    array_select.append(array[idcs[i]])
+                else:
+                    array_discard.append(array[idcs[i]])
+        else:
+            raise NotImplementedError("<subsample_data_array> Array type other than list")
+    elif method == 'stride':
+        # Construct index sets
+        idcs = np.arange(0, n_data)
+        idcs_sel = [ int(float(idcs.shape[0])/n_select*i) for i in range(n_select) ]
+        idcs_sel = np.array(idcs_sel)
+        # Take into account periodic shift
+        if stride_shift != 0:
+            idcs_sel = np.sort((idcs_sel+stride_shift) % idcs.shape[0])
+        # Index complement
+        mask = np.zeros(idcs.shape[0], dtype=bool)
+        mask[idcs_sel] = True
+        idcs_disc = idcs[~mask]
+        # Create selections
+        if type(array) == list:
+            array_select = []
+            array_discard = []
+            for i in idcs_sel:
+                array_select.append(array[i])
+            for i in idcs_disc:
+                array_discard.append(array[i])
+        else:
+            raise NotImplementedError("<subsample_data_array> Array type other than list")
+    return array_select, array_discard
+
+if __name__ == "__main__":
+    print "Data array"
+    a = range(20)
+    print a
+    print "Select with stride, shift=0"
+    a1, a2 = subsample_array(a, 5, 'stride', 0)
+    print a1, a2
+    print "Select with stride, shift=6"
+    a1, a2 = subsample_array(a, 5, 'stride', 6)
+    print a1, a2
+    print "Select random subsample"
+    a1, a2 = subsample_array(a, 5, 'random')
+    print a1, a2
+
+
+
diff --git a/src/soap/soapy/util.py b/src/soap/soapy/util.py
index a4ed33699f0bc9d4f570100f5cad2c2176b9c03b..96666d15c022ddba580e8bd20dbe9d3887692371 100644
--- a/src/soap/soapy/util.py
+++ b/src/soap/soapy/util.py
@@ -11,6 +11,30 @@ HARTREE_TO_KCALMOL = 627.509469
 
 MP_LOCK = mp.Lock()
 
+def mp_pool_compute_upper_triangle(
+        kfct,
+        g_list,
+        n_procs,
+        dtype='float64',
+        mplog=None,
+        **kwargs):
+    kfct_primed=fct.partial(kfct, **kwargs)
+    n_rows = len(g_list)
+    kmat = np.zeros((n_rows, n_rows), dtype=dtype)
+    for i in range(n_rows):
+        if mplog: mplog << mplog.back << "Computing row %d" % i << mplog.endl
+        g_pair_list = []
+        gi = g_list[i]
+        for j in range(i, n_rows):
+            gj = g_list[j]
+            g_pair_list.append([gi,gj])
+        pool = mp.Pool(processes=n_procs)
+        krow = pool.map(kfct_primed, g_pair_list)
+        pool.close()
+        pool.join()
+        kmat[i,i:] = krow
+    return kmat
+
 def mp_compute_vector(
         kfct,
         g_list,
@@ -42,12 +66,30 @@ def mp_compute_column_block(gi, gj_list, kfct):
         krow.append(k)
     return krow
 
+def compute_upper_triangle(
+        kfct,
+        g_list,
+        **kwargs):
+    dim = len(g_list)
+    kmat = np.zeros((dim,dim), dtype='float64')
+    kfct_primed = fct.partial(
+        kfct,
+        **kwargs)
+    for i in range(dim):
+        gi = g_list[i]
+        for j in range(i, dim):
+            gj = g_list[j]
+            kij = kfct(gi, gj)
+            kmat[i,j] = kij
+            kmat[j,i] = kij
+    return kmat
+
 def mp_compute_upper_triangle(
         kfct, 
         g_list, 
         n_procs, 
         n_blocks, 
-        log=None, 
+        mplog=None, 
         tstart_twall=(None,None), 
         backup=True,
         verbose=True,
@@ -63,7 +105,7 @@ def mp_compute_upper_triangle(
     n_blocks: number of column blocks onto which computation is split
     kwargs: keyword arguments supplied to kfct
     """
-    if not verbose: log=None
+    if not verbose: mplog=None
     t_start = tstart_twall[0]
     t_wall = tstart_twall[1]
     dim = len(g_list)
@@ -78,7 +120,7 @@ def mp_compute_upper_triangle(
         # Column start, column end
         c0 = col_div[0]
         c1 = col_div[-1]+1
-        if log: log << "Column block i[%d:%d] j[%d:%d]" % (0, c1, c0, c1) << log.endl
+        if mplog: mplog << "Column block i[%d:%d] j[%d:%d]" % (0, c1, c0, c1) << mplog.endl
         gj_list = g_list[c0:c1]
         gi_list = g_list[0:c1]
         # Prime kernel function
@@ -95,7 +137,7 @@ def mp_compute_upper_triangle(
         npyfile = 'out.block_i_%d_%d_j_%d_%d.npy' % (0, c1, c0, c1)
         # ... but first check for previous calculations of same slice
         if backup and npyfile in os.listdir('./'):
-            if log: log << "Load block from '%s'" % npyfile << log.endl
+            if mplog: mplog << "Load block from '%s'" % npyfile << mplog.endl
             kmat_column_block = np.load(npyfile)
         else:
             kmat_column_block = pool.map(mp_compute_column_block_primed, gi_list)
@@ -110,9 +152,9 @@ def mp_compute_upper_triangle(
         dt_block = t_out-t_in
         if t_start and t_wall:
             t_elapsed = t_out-t_start
-            if log: log << "Time elapsed =" << t_elapsed << " (wall time = %s) (maxmem = %d)" % (t_wall, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) << log.endl
+            if mplog: mplog << "Time elapsed =" << t_elapsed << " (wall time = %s) (maxmem = %d)" % (t_wall, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) << mplog.endl
             if col_div_idx+1 != len(col_div_list) and t_elapsed+dt_block > t_wall-dt_block:
-                log << "Wall time hit expected for next iteration, break ..." << log.endl
+                mplog << "Wall time hit expected for next iteration, break ..." << mplog.endl
                 break
             else: pass
     return kmat
diff --git a/src/soap/spectrum.cpp b/src/soap/spectrum.cpp
index 6cb4a9666c40f5b949e3126074b8886208f092bd..21fcf1d93342e91ca80a6370d6397c586d3d7096 100644
--- a/src/soap/spectrum.cpp
+++ b/src/soap/spectrum.cpp
@@ -47,6 +47,10 @@ void Spectrum::compute() {
     this->compute(_structure->particles(), _structure->particles());
 }
 
+void Spectrum::compute(Segment *center) {
+    this->compute(center->particles(), _structure->particles());
+}
+
 void Spectrum::compute(Segment *center, Segment *target) {
     this->compute(center->particles(), target->particles());
 }
@@ -139,7 +143,7 @@ AtomicSpectrum *Spectrum::computeAtomic(Particle *center, Structure::particle_ar
             weight0 *= _basis->getCutoff()->getCenterWeight();
         }
 
-        GLOG() << (*pit)->getType() << " " << dr.getX() << " " << dr.getY() << " " << dr.getZ() << std::endl;
+        GLOG() << (*pit)->getType() << " " << dr.getX() << " " << dr.getY() << " " << dr.getZ() << " " << (*pit)->getWeight() << std::endl;
 
         // COMPUTE EXPANSION & ADD TO SPECTRUM
         bool gradients = (is_image) ? false : _options->get<bool>("spectrum.gradients");
@@ -275,6 +279,7 @@ void Spectrum::load(std::string archfile) {
 void Spectrum::registerPython() {
     using namespace boost::python;
     void (Spectrum::*computeAll)() = &Spectrum::compute;
+    void (Spectrum::*computeSeg)(Segment*) = &Spectrum::compute;
     void (Spectrum::*computeSegPair)(Segment*, Segment*) = &Spectrum::compute;
     void (Spectrum::*computeCentersTargets)(Structure::particle_array_t&, Structure::particle_array_t&) = &Spectrum::compute;
 
@@ -283,6 +288,7 @@ void Spectrum::registerPython() {
     	.def(init<std::string>())
     	.def("__iter__", range<return_value_policy<reference_existing_object> >(&Spectrum::beginAtomic, &Spectrum::endAtomic))
 	    .def("compute", computeAll)
+        .def("compute", computeSeg)
 	    .def("compute", computeSegPair)
 	    .def("compute", computeCentersTargets)
 		.def("computePower", &Spectrum::computePower)
diff --git a/src/soap/spectrum.hpp b/src/soap/spectrum.hpp
index c964550ab6759e26f1ba4a22bbac277306160e94..c2f0d66bafee172ceb26f22b664b1f5a66a24d5c 100644
--- a/src/soap/spectrum.hpp
+++ b/src/soap/spectrum.hpp
@@ -46,6 +46,7 @@ public:
 	void clean();
 
 	void compute();
+    void compute(Segment *centers);
 	void compute(Segment *centers, Segment *targets);
 	void compute(Structure::particle_array_t &sources, Structure::particle_array_t &targets);
 	AtomicSpectrum *computeAtomic(Particle *center);
diff --git a/src/soap/tools/CMakeLists.txt b/src/soap/tools/CMakeLists.txt
index 9025bbbfcbe1de4bf52443c6942ad32925e6da11..c56b47351a5479fed45c59d107bec22849cbecc3 100644
--- a/src/soap/tools/CMakeLists.txt
+++ b/src/soap/tools/CMakeLists.txt
@@ -1,2 +1,2 @@
-install(FILES __init__.py loadwrite.py inverse.py extract.py DESTINATION ${LOCAL_INSTALL_DIR}/tools)
+install(FILES __init__.py loadwrite.py inverse.py extract.py partition.py DESTINATION ${LOCAL_INSTALL_DIR}/tools)
 
diff --git a/src/soap/tools/__init__.py b/src/soap/tools/__init__.py
index 6570f3d30c78d5ec1245d5528232d617f5ab73e3..6985d9b6e8a806513a89fce3828653f33c7cf29d 100644
--- a/src/soap/tools/__init__.py
+++ b/src/soap/tools/__init__.py
@@ -1,4 +1,5 @@
 from loadwrite import *
 from inverse import *
 from extract import *
+import partition
 
diff --git a/src/soap/tools/loadwrite.py b/src/soap/tools/loadwrite.py
index 807b5c806de0f3bd3ebee3d6313ca22e9bcede78..391d1d6cd965b89838aaa4b85b7f12825dd91405 100644
--- a/src/soap/tools/loadwrite.py
+++ b/src/soap/tools/loadwrite.py
@@ -1,8 +1,10 @@
 import os
 import numpy as np
+import itertools
+import partition
 from .. import _soapxx as soap
 from ..soapy import momo
-
+from ..soapy import elements
 
 try:
     import ase
@@ -10,6 +12,169 @@ try:
 except ImportError:
     print("Note: ase.io import failed. Install PYTHON-ASE to harvest full reader functionality.")
 
+def structures_from_xyz(xyz_file, **kwargs):
+    # Read xyz via ASE
+    ase_configs = ase.io.read(xyz_file, index=':')
+    return structures_from_ase(
+        ase_configs=ase_configs, 
+        **kwargs)
+
+def structures_from_ase(
+        configs, 
+        return_all=False, 
+        **kwargs):
+    configs_mod = []
+    structs = []
+    return_tuples = []
+    for config in configs:
+        return_tuple = \
+            structure_from_ase(
+                config,
+                **kwargs)
+        configs_mod.append(return_tuple[0])
+        structs.append(return_tuple[1])
+    if return_all:
+        return return_tuples
+    else:
+        return configs_mod, structs
+
+def structure_from_ase(
+        config, 
+        do_partition=False, 
+        add_fragment_com=False, 
+        use_center_of_geom=False, 
+        log=None):
+    # NOTE Center of mass is computed without considering PBC => Requires unwrapped coordinates
+    structure = None
+    frag_bond_matrix = None
+    atom_bond_matrix = None
+    frag_labels = []
+    atom_labels = []
+    # System properties
+    label = config.info['label']
+    positions = config.get_positions()
+    types = config.get_chemical_symbols()
+    if log: log << log.back << "Reading '%s'" % label << log.flush
+    # Simulation cell
+    if config.pbc.all(): 
+        box = np.array([config.cell[0], config.cell[1], config.cell[2]])
+    elif not config.pbc.any(): 
+        box = np.zeros((3,3))
+    else: 
+        raise NotImplementedError("<structures_from_xyz> Partial periodicity not implemented.")
+    # Partition 
+    if do_partition:
+        # Partition => Frags, top, reordered positions
+        frags, top = partition.PartitionStructure(types, positions, outfile_gro='%s.gro' % label)
+        atms = [ atm for atm in itertools.chain(*frags) ]
+        positions = [ atm.xyz for atm in itertools.chain(*frags) ]
+        types = [ atm.e for atm in itertools.chain(*frags) ]
+        # Generate fragment labels
+        for frag in frags:
+            label = '-'.join(atm.e for atm in frag)
+            frag_labels.append(label)
+        # Reorder map
+        id_reorder_map = {}
+        for atm in atms:
+            id_reorder_map[atm.id_initial] = atm.id
+        # Connectivity matrix: fragments
+        frag_bond_matrix = np.zeros((len(frags),len(frags)), dtype=bool)
+        for i in range(len(frags)):
+            frag_bond_matrix[i,i] = True
+            for j in range(i+1, len(frags)):
+                frags_are_bonded = False
+                for ai in frags[i]:
+                    for aj in frags[j]:
+                        if aj in ai.bonded:
+                            frags_are_bonded = True
+                            break
+                    if frags_are_bonded: break
+                frag_bond_matrix[i,j] = frags_are_bonded
+                frag_bond_matrix[j,i] = frags_are_bonded
+        # Connectivity matrix: atoms
+        atom_bond_matrix = np.zeros((len(positions),len(positions)), dtype=bool)
+        for i in range(len(atms)):
+            atom_bond_matrix[i,i] = True
+            ai = atms[i]
+            for j in range(i+1, len(atms)):
+                atoms_are_bonded = False
+                aj = atms[j]
+                if aj in ai.bonded:
+                    atoms_are_bonded = True
+                atom_bond_matrix[i,j] = atoms_are_bonded
+                atom_bond_matrix[j,i] = atoms_are_bonded
+        # Reorder ASE atoms
+        config = ase.Atoms(
+            sorted(config, key = lambda atm: id_reorder_map[atm.index+1]), 
+            info=config.info, 
+            cell=config.cell, 
+            pbc=config.pbc)
+    else:
+        top = [('SEG', 1, len(positions))]
+    # Check particle count consistent with top
+    atom_count = 0
+    for section in top:
+        atom_count += section[1]*section[2]
+    assert atom_count == len(positions) # Does topology match structure?
+    # Create segments, particles
+    structure = soap.Structure(label)
+    structure.box = box
+    atom_idx = 0
+    for section in top:
+        seg_type = section[0]
+        n_segs = section[1]
+        n_atoms = section[2]
+        for i in range(n_segs):
+            segment = structure.addSegment()
+            segment.name = seg_type
+            # Add particles, compute CoM
+            com = np.array([0.,0.,0.])
+            com_weight_total = 0
+            for j in range(n_atoms):
+                particle = structure.addParticle(segment)
+                particle.pos = positions[atom_idx]
+                particle.weight = 1.
+                particle.sigma = 0.5
+                particle.type = types[atom_idx]
+                atom_labels.append(particle.type)
+                # Compute CoMass/CoGeom
+                if use_center_of_geom:
+                    com_weight = 1.
+                else:
+                    com_weight = elements.periodic_table[particle.type].mass
+                com_weight_total += com_weight
+                com = com + com_weight*positions[atom_idx]
+                atom_idx += 1
+            com = com/com_weight_total
+            # Add CoM particle if requested
+            if add_fragment_com:
+                segment = structure.addSegment()
+                segment.name = "%s.COM" % seg_type
+                particle = structure.addParticle(segment)
+                particle.pos = com
+                particle.weight = 0.
+                particle.sigma = 0.5
+                particle.type = "COM"
+    if log: log << log.endl
+    return config, structure, top, frag_bond_matrix, atom_bond_matrix, frag_labels, atom_labels
+
+def join_structures_as_segments(structs):
+            # NOTE Segments of the individual structures will be lost
+            # NOTE Box will be adopted from first structure in list
+            label = ':'.join([s.label for s in structs])
+            struct = soap.Structure(label)
+            struct.box = structs[0].box
+            for struct_i in structs:
+                seg = struct.addSegment()
+                seg.name = struct_i.label
+                for p_i in struct_i.particles:
+                    p = struct.addParticle(seg)
+                    p.pos = p_i.pos
+                    p.weight = p_i.weight
+                    p.sigma = p_i.sigma
+                    p.type = p_i.type
+            return struct
+
 def write_xyz(xyz_file, structure):
     ofs = open(xyz_file, 'w')
     ofs.write('%d\n%s\n' % (len(list(structure.particles)), structure.label))
diff --git a/src/soap/tools/partition.py b/src/soap/tools/partition.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a851bb9587c8918721e51e6bf4c9813a30b0573
--- /dev/null
+++ b/src/soap/tools/partition.py
@@ -0,0 +1,924 @@
+#! /usr/bin/env python
+from momo import sys, np, osio, endl, flush
+try:
+    from __qmshell__ import e_xyz_from_xyz
+    from __molecules__ import Atom, Molecule
+except ImportError:
+    def e_xyz_from_xyz(xyzfile):
+        raise ImportError("__qmshell__::e_xyz_from_xyz not available")
+    class Atom(object):
+        def __init__(self, *args, **kwargs):
+            raise ImportError("__molecules__::Atom not available")
+    class Molecule(object):
+        def __init__(self, *args, **kwargs):
+            raise ImportError("__molecules__::Molecule not available")
+
+# TODO Atoms docked to a core atom are all moved to the same ligand group 
+#      This is not appropriate for branched side chains
+
+#osio.Connect()
+#osio.AddArg('file',         typ=str,             default=None,             help='Input xyz-file')
+#osio.AddArg('molname',         typ=str,             default='UNSCRAMBLED',     help='Molecule name')
+#osio.AddArg('gro',             typ=str,             default='',             help='Output gro-file')
+#osio.AddArg('xyz',             typ=str,             default='',             help='Output xyz-file')
+#osio.AddArg('ring_exclude', typ=(list, str),     default=['Al','Zn'],     help='Exclude these atoms from ring structure')
+#opts = osio.Parse()
+
+xyzfile             = "in.xyz" #opts.file
+molname             = "heypretty" #opts.molname
+outfile_xyz         = '' #opts.xyz
+outfile_gro         = '' #opts.gro
+exclude_bonds_to    = [] #opts.ring_exclude
+wordy               = False
+leaky               = False
+path_cutoff_length  = 7
+ligand_size_lower   = 4
+
+def covalent_cutoff(rad1, rad2):
+    return 1.15*(rad1+rad2)
+
+def calculate_connectivity_mat(distance_mat, type_vec, cutoff=None):
+    dim = distance_mat.shape[0]
+    connectivity_mat = np.zeros((dim,dim), dtype=bool)
+    if cutoff != None:
+        for i in range(dim):
+            for j in range(dim):
+                if distance_mat[i,j] < constant:
+                    connectivity_mat[i,j] = True
+                else:
+                    connectivity_mat[i,j] = False
+    else:
+        for i in range(dim):
+            for j in range(dim):
+                t1 = type_vec[i]
+                t2 = type_vec[j]
+                r1 = COVRAD_TABLE[t1]
+                r2 = COVRAD_TABLE[t2]
+                r12 = distance_mat[i,j]
+                if r12 <= covalent_cutoff(r1, r2):
+                    connectivity_mat[i,j] = True
+                else:
+                    connectivity_mat[i,j] = False
+    return connectivity_mat
+
+# COVALENCE RADII (from Cambridge Structural Database, table see http://en.wikipedia.org/wiki/Covalent_radius)
+COVRAD_TABLE = {}
+COVRAD_TABLE['H'] = 0.31
+COVRAD_TABLE['C'] = 0.76
+COVRAD_TABLE['N'] = 0.71
+COVRAD_TABLE['F'] = 0.57
+COVRAD_TABLE['O'] = 0.66
+#COVRAD_TABLE['Se'] = 1.20
+COVRAD_TABLE['S'] = 1.05
+#COVRAD_TABLE['Zn'] = 1.22
+COVRAD_TABLE['Br'] = 1.20
+COVRAD_TABLE['Cl'] = 1.02
+COVRAD_TABLE['I'] = 1.39
+COVRAD_TABLE['P'] = 1.07
+
+# FORCEFIELD TYPE TABLE
+TYPE_TABLE = {\
+'C:CCH'     : 'CA',        # Aromatic
+'C:CCN'     : 'CA',        # Aromatic + Nitrogen (TODO)
+'C:CHS'     : 'CA',        #
+'C:CCSe'    : 'CB',        # Aromatic + Selenium (TODO)
+'C:CCS'     : 'CB',        # Aromatic + Sulphur
+'C:CCO'     : 'CO',        # Aromatic + Carboxylic
+'C:CNSe'    : 'CS',        # Aromatic + Selenium + Nitrogen (TODO)
+'C:CCHH'    : 'CR',        # Aliphatic
+'C:CHHN'    : 'CR',        # Aliphatic
+'C:CHHH'    : 'CR',        # Methyl
+'C:CCC'     : 'CC',        # 
+'C:CN'      : 'CN',        # Cyano-group (TODO)
+'H:C'       : 'HC',        # 
+'N:CCC'     : 'NA',        # Aromatic
+'N:C'       : 'NC',        # 
+'O:C'       : 'OC',        # Carboxylic group
+'S:CC'      : 'S',         # Thiophene sulphur
+'Se:CC'     : 'Se'}        #
+
+ALPHABET = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
+NUMBERS = '1234567890ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz'
+
+class AtomXyz(object):
+    def __init__(self, e, xyz, id):
+        # PROPERTIES
+        self.e = e
+        self.id_initial = id
+        self.id = id
+        self.xyz = xyz
+        self.x = xyz[0]
+        self.y = xyz[1]
+        self.z = xyz[2]
+        self.covrad = COVRAD_TABLE[self.e]
+        if self.e in exclude_bonds_to: self.covrad = 0.0
+        # FRAGMENT INFO
+        self.name = ''
+        self.type = ''
+        self.fragname = '___'
+        self.fragid = 0
+        # BONDING LEVEL (-1 = CORE)
+        self.level = -1
+        # ALL BONDS
+        self.bonded = []
+        self.bonded_short = []
+        # CORE BONDS
+        self.bonded_core = []
+        self.bonds_core = []
+        # NON-RING CORE BONDS
+        self.bonded_non_ring = []
+        self.bonds_non_ring = []
+        # DOCKED        
+        self.docked_to = []
+        self.dock_for = []
+        # PATHS OF SOME LENGTH
+        self.path_length = -1
+        self.paths = []
+        # NON-RING PATHS OF SOME LENGTH
+        self.path_length_non_ring = -1
+        self.paths_non_ring = []
+    def info_str(self):
+        return "%d:%s:%s" % (self.id, self.name, self.fragname)
+    def generate_type(self):
+        type_key = ''
+        bonded_elems = []
+        for b in self.bonded:
+            bonded_elems.append(b.e)
+        bonded_elems.sort()    
+        for e in bonded_elems:
+            type_key += e
+        type_key = self.e + ':' + type_key
+        try:
+            self.type = TYPE_TABLE[type_key]
+        except KeyError:
+            default_type = self.e+'X'
+            #print "Type definition missing for '%s', defaulting to '%s'" % (type_key, default_type)
+            self.type = default_type
+        return
+    def get_all_docked_atoms(self):
+        docked_atoms = []
+        docked_atoms = docked_atoms + self.dock_for
+        for d in self.dock_for:
+            docked_atoms = docked_atoms + d.get_all_docked_atoms()
+        return docked_atoms
+    def add_bond_core(self, bonded_atom, bond):
+        self.bonded_core.append(bonded_atom)
+        self.bonds_core.append(bond)
+        return
+    def add_bond_non_ring(self, bonded_atom, bond):
+        self.bonded_non_ring.append(bonded_atom)
+        self.bonds_non_ring.append(bond)
+        return
+    def find_paths(self, length, exclusion_list=[], start_here=True):
+        if length == 0: return []
+        paths = []
+        #exclusion_list.append(self)
+        for bond in self.bonds_core:
+            new_path = Path(start_with_atom=self if start_here else None)
+            if not bond.b in exclusion_list:
+                new_path.extend(bond)
+                paths.append(new_path)
+                other = bond.b
+                #other_paths = other.find_paths(length=length-1, exclusion_list=exclusion_list, start_here=False)
+                other_paths = other.find_paths(length=length-1, exclusion_list=[self], start_here=False)
+                for o in other_paths:
+                    joined = JoinPaths(new_path, o)
+                    paths.append(joined)
+        if start_here:
+            self.path_length = length
+            self.paths = paths
+        return paths
+    def find_paths_non_ring(self, length=4, exclusion_list=[], start_here=True):
+        if length == 0: return []
+        paths = []
+        #exclusion_list.append(self)
+        for bond in self.bonds_non_ring:
+            new_path = Path(start_with_atom=self if start_here else None)
+            if not bond.b in exclusion_list:
+                new_path.extend(bond)
+                paths.append(new_path)
+                other = bond.b
+                #other_paths = other.find_paths(length=length-1, exclusion_list=exclusion_list, start_here=False)
+                other_paths = other.find_paths_non_ring(length=length-1, exclusion_list=[self], start_here=False)
+                for o in other_paths:
+                    joined = JoinPaths(new_path, o)
+                    paths.append(joined)
+        if start_here:
+            self.path_length_non_ring = length
+            self.paths_non_ring = paths
+        return paths            
+    
+class Path(object):
+    def __init__(self, start_with_atom=None):
+        self.visited = []
+        if start_with_atom != None: self.visited.append(start_with_atom)
+        self.bonds = []
+    def info_str(self):
+        info_str = ''
+        for v in self.visited:
+            info_str += v.info_str()+' '
+        return info_str
+    def add_visited(self, v):
+        self.visited.append(v)
+    def add_bond(self, b):
+        self.bonds.append(b)
+    def extend(self, bond):
+        self.add_visited(bond.b)
+        self.add_bond(bond)
+    def print_info(self):
+        for v in self.visited:
+            print "[%d:%s]" % (v.id,v.e),
+        print ""
+    def get_first(self):
+        return self.visited[0]
+    def get_last(self):
+        return self.visited[-1]        
+        
+class BondXyz(object):
+    def __init__(self, atom1, atom2):
+        self.a = atom1
+        self.b = atom2        
+        
+def JoinPaths(path1, path2):
+    joined_path = Path()
+    joined_path.visited = path1.visited + path2.visited
+    joined_path.bonds = path1.bonds + path2.bonds
+    return joined_path    
+
+class Ring(object):
+    def __init__(self, first_pair=None):
+        self.atoms = []
+        self.bonded_structures = []
+        if first_pair != None:
+            self.atoms.append(first_pair[0])
+            self.atoms.append(first_pair[1])
+    def type(self):
+        return "ring"
+    def has_atom(self, atom):
+        return self.check_atom(atom)
+    def check_atom(self, atom):
+        if atom in self.atoms: return True
+        else: return False
+    def check_add_pair(self, pair):
+        has_a = self.check_atom(pair[0])
+        has_b = self.check_atom(pair[1])
+        if has_a and not has_b:
+            self.atoms.append(pair[1])
+            return True
+        elif has_b and not has_a:
+            self.atoms.append(pair[0])
+            return True
+        elif has_b and has_a:
+            return True
+        else:
+            return False
+    def print_info(self):
+        for a in self.atoms:
+            print "%2d" % a.id,
+        print ""
+    def intersects(self, other):
+        intersects = False
+        for a in self.atoms:
+            if a in other.atoms:
+                intersects = True
+                break
+        return intersects
+    def add(self, other):
+        for atom in other.atoms:
+            if not atom in self.atoms:
+                self.atoms.append(atom)
+        return
+    def find_round_trip_path(self, visited=[], start_atm=None):
+        started_here = False
+        if start_atm == None:            
+            self.atoms = sorted(self.atoms, key=lambda atm: len(atm.bonded))
+            start_atm = self.atoms[0]
+            visited.append(start_atm)
+            started_here = True
+        for bond in start_atm.bonds_core:
+            # Backup visited list to be able to revert to this point
+            visited_0 = []
+            for v in visited: visited_0.append(v)
+            if bond.b in visited or not bond.b in self.atoms: continue
+            # Partner atom not yet visited, proceed            
+            visited.append(bond.b)
+            visited = self.find_round_trip_path(visited, bond.b)
+            # All atoms visited = round path?
+            if len(visited) == len(self.atoms):
+                break
+            # A dead end. Revert & try next bond
+            else:
+                visited = []
+                for v in visited_0: visited.append(v)
+        #assert len(visited) <= len(self.atoms)
+        if started_here:
+            if len(visited) != len(self.atoms):
+                if wordy: osio << osio.mr << "WARNING: Ring - Failed to generate round-trip path (atom order may be compromised)" << endl
+                visited = self.atoms
+            else:
+                if wordy:
+                    osio << "Found round-trip path" << endl
+        return visited
+    def order_atoms(self):
+        self.atoms = self.find_round_trip_path(visited=[], start_atm=None)
+        return
+    def get_all_bonded_structures(self, exclude_list=[]):
+        bonded = []
+        exclude_list.append(self)
+        for b in self.bonded_structures:
+            if b in exclude_list:
+                continue
+            bonded.append(b)
+            bonded = bonded + b.get_all_bonded_structures(exclude_list)
+        return bonded
+
+def JoinRings(ring1, ring2):
+    joined_ring = Ring()
+    for a in ring1.atoms:
+        joined_ring.atoms.append(a)
+    for b in ring2.atoms:
+        if not joined_ring.has_atom(b):
+            joined_ring.atoms.append(b)
+    return    
+
+class Chain(object):
+    def __init__(self, first_pair=None):
+        self.atoms = []
+        self.bonded_structures = []
+        if first_pair != None:
+            self.atoms.append(first_pair[0])
+            self.atoms.append(first_pair[1])
+    def type(self):
+        return "chain"
+    def has_atom(self, atom):
+        return self.check_atom(atom)
+    def intersects(self, other):
+        intersects = False
+        for a in self.atoms:
+            if a in other.atoms:
+                intersects = True
+                break
+        return intersects
+    def add(self, other):
+        for atom in other.atoms:
+            if not atom in self.atoms:
+                self.atoms.append(atom)
+        return
+    def print_info(self):
+        for a in self.atoms:
+            print "%2d" % a.id,
+        print ""
+    def find_round_trip_path(self, visited=[], start_atm=None):
+        started_here = False
+        # Figure out end atoms
+        ranks = []
+        for atm in self.atoms:
+            n_bonds_in_core_chain = 0
+            for bond in atm.bonds_core:
+                if bond.b in self.atoms:
+                    n_bonds_in_core_chain += 1
+            ranks.append(n_bonds_in_core_chain)
+        if start_atm == None:            
+            self.atoms = sorted(self.atoms, key=lambda atm: ranks[self.atoms.index(atm)])
+            start_atm = self.atoms[0]
+            visited.append(start_atm)
+            started_here = True
+        for bond in start_atm.bonds_core:
+            # Backup visited list to be able to revert to this point
+            visited_0 = []
+            for v in visited: visited_0.append(v)
+            if bond.b in visited or not bond.b in self.atoms: continue
+            # Partner atom not yet visited, proceed            
+            visited.append(bond.b)
+            visited = self.find_round_trip_path(visited, bond.b)
+            # All atoms visited = round path?
+            if len(visited) == len(self.atoms):
+                break
+            # A dead end. Revert & try next bond
+            else:
+                visited = []
+                for v in visited_0: visited.append(v)
+        #assert len(visited) <= len(self.atoms)
+        if started_here:
+            if len(visited) != len(self.atoms):
+                if wordy: osio << osio.mr << "WARNING: Chain - Failed to generate round-trip path (expected for branched core units, atom order may be compromised)" << endl
+                #visited = self.atoms
+                for atm in self.atoms:
+                    if not atm in visited:
+                        visited.append(atm)
+            else:
+                if wordy:
+                    osio << "Found round-trip path" << endl
+        return visited
+    def order_atoms(self):
+        self.atoms = self.find_round_trip_path(visited=[], start_atm=None)
+        return
+    def get_all_bonded_structures(self, exclude_list=[]):
+        bonded = []
+        exclude_list.append(self)
+        for b in self.bonded_structures:
+            if b in exclude_list:
+                continue
+            bonded.append(b)
+            bonded = bonded + b.get_all_bonded_structures(exclude_list)
+        return bonded
+
+def CreateMolecule(name, atoms, xyz_conv_fact=0.1):
+    molecule = Molecule(0, name)
+    for atom in atoms:
+        gro_atom = Atom(ln='')
+        gro_atom.fragId        = atom.fragid
+        gro_atom.fragName    = atom.fragname
+        gro_atom.name        = atom.name
+        gro_atom.Id            = atom.id
+        gro_atom.pos        = np.array(atom.xyz)*xyz_conv_fact
+        gro_atom.vel        = np.array([0.,0.,0.])
+        molecule.atoms.append(gro_atom)
+    return molecule
+
+def PartitionStructure(e=[], xyz=[], xyzfile=None, generate_bonded=False, verbose=False, outfile_xyz='', outfile_gro=''):
+    if xyzfile:
+        e,xyz = e_xyz_from_xyz(xyzfile)
+    # LOAD ATOMS
+    atoms = []
+    count = 0
+    for e,r in zip(e,xyz):
+        count += 1
+        atoms.append(AtomXyz(e,r,count))
+
+    # ESTABLISH BONDING VIA COVALENCE CRITERION
+    if verbose: osio << osio.mg << "Find bonds using covalence criterion" << endl
+    bond_count = 0
+    for i in range(len(atoms)):
+        for j in range(i+1, len(atoms)):
+            a = atoms[i]
+            b = atoms[j]
+            dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
+            dv = covalent_cutoff(a.covrad,b.covrad)        
+            if dr < dv:
+                bond_count += 1
+                a.bonded.append(b)
+                b.bonded.append(a)
+    if verbose: osio << "%d bonds in molecule" % bond_count << endl
+    if leaky:
+        for a in atoms:
+            print "%2s bonded to %d" % (a.e, len(a.bonded))
+
+    for a in atoms:
+        if len(a.bonded) == 0:
+            osio << osio.my << "NOTE: Unbonded atom" << a.e << a.id << endl
+
+    # SEQUENTIALLY SPLIT OFF LIGAND UNITS
+    if verbose: osio << osio.mg << "Find core using sequential reduction" << endl
+    short_list = []
+    for a in atoms:
+        short_list.append(a)
+
+    selection_levels = []
+    this_level = 0
+
+    while True:
+        if leaky:
+            print "Level", this_level
+            print "Short-listed", len(short_list)
+        for a in short_list:
+            a.bonded_short = []
+        for i in range(len(short_list)):
+            for j in range(i+1, len(short_list)):
+                a = short_list[i]
+                b = short_list[j]
+                dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
+                dv = covalent_cutoff(a.covrad,b.covrad)        
+                if dr < dv:
+                    a.bonded_short.append(b)
+                    b.bonded_short.append(a)    
+        rm_atoms = []
+        for s in short_list:
+            if len(s.bonded_short) == 1:
+                rm_atoms.append(s)    
+        if len(rm_atoms) == 0:
+            break
+        if leaky:
+            print "Removing", len(rm_atoms)
+        for r in rm_atoms:
+            r.level = this_level
+            for b in r.bonded_short:
+                b.dock_for.append(r)
+                r.docked_to.append(b)
+            short_list.remove(r)
+        if leaky:
+            ofs = open('level_%d.xyz' % this_level, 'w')
+            ofs.write('%d\n\n' % len(short_list))
+            for s in short_list:
+                ofs.write('%s %+1.7f %+1.7f %+1.7f\n' % (s.e, s.x, s.y, s.z))
+            ofs.close()    
+        this_level += 1
+
+    # READ OFF CORE ATOMS
+    core = []
+    for a in atoms:
+        assert len(a.docked_to) <= 1    
+        docked_atoms = a.get_all_docked_atoms()
+        if wordy:
+            osio << "%-2s bonded to %d, docked to %d, dock for %d/%-2d at level %+d" \
+                % (a.e, len(a.bonded), len(a.docked_to), len(a.dock_for), len(docked_atoms), a.level) << endl
+        if len(a.docked_to) < 1:
+            core.append(a)
+    if verbose: osio << "%d atoms in core" % len(core) << endl
+
+    # ESTABLISH BONDING AMONG CORE ATOMS
+    if verbose: osio << osio.mg << "Find core-atom bonds using covalence criterion" << endl
+    bonds = []
+    for i in range(len(core)):
+        for j in range(i+1, len(core)):
+            a = core[i]
+            b = core[j]
+            dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
+            dv = covalent_cutoff(a.covrad,b.covrad)
+            if dr < dv:
+                a.bonded_short.append(b)
+                b.bonded_short.append(a)
+                bond_ab = BondXyz(a,b)
+                bond_ba = BondXyz(b,a)
+                a.add_bond_core(b, bond_ab)
+                b.add_bond_core(a, bond_ba)
+                bonds.append(bond_ab)
+    if verbose: osio << "%d bonds in core" %  len(bonds) << endl
+
+    # GENERATE PATHS ALONG CORE BONDS
+    if verbose: osio << osio.mg << "Find connecting paths (max. length %d)" % path_cutoff_length << endl
+    path_count = 0
+    for c in core:
+        paths = c.find_paths(length=path_cutoff_length, exclusion_list=[], start_here=True)
+        if wordy:
+            osio << "%2d paths of length <= %d from atom %2d" % (len(paths), path_cutoff_length, c.id) << endl
+        if leaky:
+            for p in paths:
+                p.print_info()
+        path_count += len(paths)
+    if verbose: osio << "Generated a total of %d bond paths" % path_count << endl
+
+    # FROM PATHS FIND RING-CONNECTED ATOMS
+    ring_pairs = []
+    for i in range(len(core)):
+        for j in range(i+1, len(core)):
+            a = core[i]
+            b = core[j]
+            
+            paths_ab = []
+            for p in a.paths:
+                if p.get_last() == b:
+                    paths_ab.append(p)
+            
+            paths_ba = []
+            for p in b.paths:
+                if p.get_last() == a:
+                    paths_ba.append(p)
+            
+            if leaky:
+                print "ID1 %d ID2 %d" % (a.id, b.id)
+                print "a => b: %d" % len(paths_ab)
+                #for p in paths_ab:
+                #    p.print_info()
+                print "b => a: %d" % len(paths_ba)
+                #for p in paths_ba:
+                #    p.print_info()
+            
+            assert len(paths_ab) == len(paths_ba)        
+            if len(paths_ab) == 1: continue
+            
+            has_disjoint_paths = False
+            for k in range(len(paths_ab)):
+                for l in range(k+1, len(paths_ab)):
+                    intersects = False
+                    p1 = paths_ab[k]
+                    p2 = paths_ab[l]
+                    b1 = p1.bonds
+                    b2 = p2.bonds
+                    
+                    for bond in b1:
+                        if bond in b2:
+                            intersects = True
+                    if not intersects:
+                        has_disjoint_paths = True
+                    
+            if has_disjoint_paths:
+                pair = [a,b]
+                ring_pairs.append(pair)
+                if leaky:
+                    osio << osio.mg << "Ring pair:" << a.id-1 << b.id-1 << endl
+
+    # FROM RING PAIRS, FIND RINGS VIA SUCCESSIVE ADDITION
+    if verbose: osio << osio.mg << "Find rings using set of ring pairs" << endl
+    rings = []
+
+    for pair in ring_pairs:
+        new_ring = Ring(first_pair=pair)
+        rings.append(new_ring)
+
+    i = 0
+    while i <= len(rings)-1:
+        ring = rings[i]
+        rm_rings = []
+        for j in range(i+1, len(rings)):
+            other = rings[j]
+            if ring.intersects(other):
+                rm_rings.append(other)
+                ring.add(other)
+        for r in rm_rings:
+            rings.remove(r)
+        i += 1
+        
+    if verbose: osio << "Core rings (# = %d)" % len(rings) << endl
+    if wordy:
+        for r in rings:
+            r.print_info()
+
+    # READ OFF NON-RING ATOMS
+    non_ring_core_atoms = []
+    for c in core:
+        in_ring = False
+        for r in rings:
+            if r.has_atom(c):
+                in_ring = True
+        if not in_ring:
+            non_ring_core_atoms.append(c)
+
+    if verbose: osio << "Non-ring core atoms: %d" % len(non_ring_core_atoms) << endl
+
+    # ESTABLISH BONDING AMONG NON-RING CORE ATOMS
+    if verbose: osio << osio.mg << "Find non-ring core-atom bonds using covalence criterion" << endl
+    bonds = []
+    for i in range(len(non_ring_core_atoms)):
+        for j in range(i+1, len(non_ring_core_atoms)):
+            a = non_ring_core_atoms[i]
+            b = non_ring_core_atoms[j]
+            dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
+            dv = covalent_cutoff(a.covrad,b.covrad)
+            if dr < dv:
+                a.bonded_short.append(b)
+                b.bonded_short.append(a)
+                bond_ab = BondXyz(a,b)
+                bond_ba = BondXyz(b,a)
+                a.add_bond_non_ring(b, bond_ab)
+                b.add_bond_non_ring(a, bond_ba)
+                bonds.append(bond_ab)
+    if verbose: osio << "%d bonds in non-ring core" %  len(bonds) << endl
+
+    # GENERATE PATHS ALONG NON-RING CORE BONDS
+    if verbose: osio << osio.mg << "Find connecting non-ring paths (max. length %d)" % path_cutoff_length << endl
+    path_count = 0
+    for c in non_ring_core_atoms:
+        paths = c.find_paths_non_ring(length=path_cutoff_length, exclusion_list=[], start_here=True)
+        if wordy:
+            print "%2d paths of length <= %d from atom %2d" % (len(paths), path_cutoff_length, c.id)
+        if leaky:
+            for p in paths:
+                p.print_info()
+        path_count += len(paths)
+    if verbose: osio << "Generated a total of %d non-ring bond paths" % path_count << endl
+
+    # FROM PATHS FIND NON-RING-CONNECTED ATOMS
+    non_ring_pairs = []
+    for i in range(len(non_ring_core_atoms)):
+        for j in range(i+1, len(non_ring_core_atoms)):
+            a = non_ring_core_atoms[i]
+            b = non_ring_core_atoms[j]
+            
+            paths_ab = []
+            for p in a.paths_non_ring:
+                if p.get_last() == b:
+                    paths_ab.append(p)
+            
+            paths_ba = []
+            for p in b.paths_non_ring:
+                if p.get_last() == a:
+                    paths_ba.append(p)
+            
+            if leaky:
+                print "ID1 %d ID2 %d" % (a.id, b.id)
+                print "a => b: %d" % len(paths_ab)
+                #for p in paths_ab:
+                #    p.print_info()
+                print "b => a: %d" % len(paths_ba)
+                #for p in paths_ba:
+                #    p.print_info()
+            
+            assert len(paths_ab) == len(paths_ba)        
+            assert len(paths_ab) <= 1
+            
+            if len(paths_ab) > 0:
+                pair = [a,b]
+                non_ring_pairs.append(pair)
+                if leaky:
+                    osio << osio.mg << "Non-ring pair:" << a.id-1 << b.id-1 << endl
+
+    # FROM NON-RING PAIRS, FIND NON-RINGS (= CHAINS) VIA SUCCESSIVE ADDITION
+    if verbose: osio << osio.mg << "Find non-ring structures using set of non-ring pairs" << endl
+    chains = []
+
+    # ADD NON-RING CHAINS WITH > 1 CORE ATOM
+    for pair in non_ring_pairs:
+        new_chain = Chain(first_pair=pair)
+        chains.append(new_chain)
+
+    # ADD NON-RING CHAINS WITH ONLY 1 CORE ATOM (HENCE WITHOUT CHAIN PATHS)
+    for a in non_ring_core_atoms:
+        if a.paths_non_ring == []:
+            new_chain = Chain()
+            new_chain.atoms.append(a)
+            chains.append(new_chain)
+
+    i = 0
+    while i <= len(chains)-1:
+        chain = chains[i]
+        rm_chains = []
+        for j in range(i+1, len(chains)):
+            other = chains[j]
+            if chain.intersects(other):
+                rm_chains.append(other)
+                chain.add(other)
+        for r in rm_chains:
+            chains.remove(r)
+        i += 1
+
+    if len(chains) == 0:
+        for atom in non_ring_core_atoms:
+            new_chain = Chain()
+            new_chain.atoms.append(atom)
+            chains.append(new_chain)
+
+    if verbose: osio << "Core chains (# = %d)" % len(chains) << endl
+    if wordy:
+        for c in chains:
+            c.print_info()
+
+    # REORDER STRUCTURAL ELEMENTS (CORE RINGS & CORE CHAINS)
+    molecule = []
+    structures = rings + chains
+    for i in range(len(structures)):
+        for j in range(i+1, len(structures)):
+            s1 = structures[i]
+            s2 = structures[j]
+            bond_count = 0
+            for a in s1.atoms:
+                for b in s2.atoms:
+                    dr = np.dot(a.xyz-b.xyz,a.xyz-b.xyz)**0.5
+                    dv = covalent_cutoff(a.covrad,b.covrad)
+                    if dr < dv:
+                        bond_count += 1
+            assert bond_count <= 1
+            if bond_count:
+                s1.bonded_structures.append(s2)
+                s2.bonded_structures.append(s1)
+
+    start_struct_idx = 0
+    if len(structures) == 1:
+        pass
+    else:
+        structures = sorted(structures, key=lambda s: len(s.bonded_structures))
+        while structures[start_struct_idx].bonded_structures == []:
+            molecule.append(structures[start_struct_idx])
+            if start_struct_idx+1 == len(structures): break
+            start_struct_idx += 1
+    start_struct = structures[start_struct_idx]
+
+    docked_structures = start_struct.get_all_bonded_structures(exclude_list=[])
+    molecule =  molecule + [start_struct] + docked_structures
+
+    # REORDER ATOMS IN EACH STRUCTURE
+    for struct in molecule:
+        if verbose: osio << "Structure type %-10s" % ("'%s'" % struct.type()) << "(bonded to %d)" % len(struct.bonded_structures) << endl
+        struct.order_atoms()
+
+    # ASSIGN SMALL CORE STRUCTURES TO NEIGHBOUR STRUCTURES
+    rm_structs = []
+    for struct in molecule:
+        if len(struct.atoms) < 2:
+            rm_atoms = []
+            for atom in struct.atoms:
+                if atom.bonded_core != []:
+                    rm_atoms.append(atom)
+                    molecule[molecule.index(struct)-1].atoms.append(atom)
+            for r in rm_atoms:
+                struct.atoms.remove(r)
+            if len(struct.atoms) == 0:
+                rm_structs.append(struct)
+    for r in rm_structs:
+        molecule.remove(r)
+
+    # GENERATE ATOM TYPES
+    if verbose: osio << osio.mg << "Assign atom types" << endl
+    frag_atom_type_count = {}
+    for atm in atoms:
+        atm.generate_type()
+        frag_atom_type_count[atm.type] = 0
+
+    # SORT ATOMS AND ASSIGN FRAGMENT NAMES & IDs
+    if verbose: osio << osio.mg << "Sort atoms, assign fragment names & IDs" << endl
+    atoms_ordered = []
+    frag_count = 0
+    core_count = 0
+    ligand_count = 0
+    core_alphabet_index = 0
+    ligand_alphabet_index = 1
+    atoms_top = []
+    top = [] # entries of the form (name, repeats, n_atoms_in_frag)
+    for struct in molecule:
+        # Core atoms
+        frag_count += 1
+        core_count += 1
+        ligand_sets = []
+        # Reset fragment atom-type counter
+        for key in frag_atom_type_count.keys():
+            frag_atom_type_count[key] = 0
+        if verbose: osio << "Core   '%s' (size at core level: %2d)" % (ALPHABET[core_count-1], len(struct.atoms)) << endl
+        # Topology entry
+        atoms_top.append([])
+        top.append(['CO'+ALPHABET[core_count-1],1,0])
+        for atm in struct.atoms:
+            top[-1][2] += 1
+            atoms_top[-1].append(atm)
+            atm.fragid = frag_count
+            atm.fragname = 'CO' + ALPHABET[core_count-1]        
+            atm.name = atm.type + NUMBERS[frag_atom_type_count[atm.type]]
+            frag_atom_type_count[atm.type] += 1
+            atoms_ordered.append(atm)        
+            docked = atm.get_all_docked_atoms()
+            if len(docked) <= ligand_size_lower:
+                for datm in docked:
+                    top[-1][2] += 1
+                    atoms_top[-1].append(datm)
+                    datm.fragid = frag_count
+                    datm.fragname = 'CO' + ALPHABET[core_count-1]
+                    datm.name = datm.type + NUMBERS[frag_atom_type_count[datm.type]]
+                    atoms_ordered.append(datm)
+            else:
+                ligand_sets.append(docked)    
+        # Reset fragment atom-type counter
+        for key in frag_atom_type_count.keys():
+            frag_atom_type_count[key] = 0
+        # Ligand atoms
+        ligand_count_this_core = 0
+        for lset in ligand_sets:
+            frag_count += 1
+            ligand_count += 1
+            ligand_count_this_core += 1
+            # Topology entry
+            top.append(['L%s%d' % (ALPHABET[core_count-1], ligand_count_this_core),1,0])
+            atoms_top.append([])
+            if verbose: osio  << "Ligand '%s' (size at core level: %2d)" % (ALPHABET[core_count-1], len(lset)) << endl        
+            for atm in lset:
+                top[-1][2] += 1
+                atoms_top[-1].append(atm)
+                atm.fragid = frag_count
+                atm.fragname = 'L%s%d' % (ALPHABET[core_count-1], ligand_count_this_core)
+                atm.name = atm.type + NUMBERS[frag_atom_type_count[atm.type]]
+                frag_atom_type_count[atm.type] += 1
+                atoms_ordered.append(atm)
+    
+    # FIX ATOM IDs        
+    atom_count = 0
+    for atom in atoms_ordered:
+        atom_count += 1
+        atom.id = atom_count
+
+    # DEFINE INTERACTIONS VIA BONDED PATHS
+    # ... Paths of length 2 => bonds, paths of length 3 => angles
+    # ... Atoms with three bonded partners => star impropers
+    # ... Remove dihedrals across ring units (terminal atoms connected by a 3-path and (n>3)-path)
+    if generate_bonded:
+        for atom in atoms_ordered:
+            paths = atom.find_paths(3, [], True)
+            if verbose:
+                for p in paths:
+                    osio << p.info_str() << endl
+        
+    # OUTPUT XYZ
+    if outfile_xyz != '':
+        ofs = open(outfile_xyz, 'w')
+        ofs.write('%d\n\n' % len(atoms_ordered))        
+        for atm in atoms_ordered:
+            if wordy:
+                print atm.e, atm.fragname, atm.fragid, atm.type, atm.name
+            ofs.write('%-2s %+1.7f %+1.7f %+1.7f\n' % (atm.e, atm.x, atm.y, atm.z))
+        ofs.close()
+
+    # OUTPUT GRO
+    if outfile_gro != '':
+        molecule = CreateMolecule(molname, atoms_ordered)
+        molecule.write_gro(outfile_gro)
+
+    return atoms_top, top
+
+        
+        
+        
+        
+
+
+
+
+
+
+