diff --git a/common/python/nomadcore/md_data_access/MDDataAccess.py b/common/python/nomadcore/md_data_access/MDDataAccess.py
index 2ff3241ff74c06c81868f6ac2081374bdcb1951a..60d9ebd5d4dd4a31a38dca608c945ffee0ce016d 100644
--- a/common/python/nomadcore/md_data_access/MDDataAccess.py
+++ b/common/python/nomadcore/md_data_access/MDDataAccess.py
@@ -36,7 +36,7 @@ import copy
 mda_coordinates_modules = tuple(x[1] for x in inspect.getmembers(mda_c,inspect.ismodule))
 logger = logging.getLogger(__name__)
 
-# The info in the table below is taken from MDAnalysis and definitions 
+# The info in the table below is taken from MDAnalysis and definitions
 # are checked with the info in Amber, CHARMM, Gromacs, and GROMOS
 # The units in the table are in atomic masses (u)
 # Please see:
@@ -70,14 +70,14 @@ ELEMENTS_MASS_TABLE = {
         "Sn" : 118.71,    "Ti" : 47.88,     "W"  : 183.85,    "U"  : 238.0289,
         "V"  : 50.9415,   "Xe" : 131.29,    "Yb" : 173.04,    "Y"  : 88.90585,
         "Zn" : 65.37000,  "Zr" : 91.224
-        }  
+        }
 
 TEXTCHARS = bytearray({7,8,9,10,12,13,27} | set(range(0x20, 0x100)) - {0x7f})
 
 def is_file_binary(fName, checkBytes=None):
     testin=None
     if checkBytes is None:
-        checkBytes = 1024 
+        checkBytes = 1024
     try:
         with open(fName, 'rb') as fin:
             testin = fin.read(checkBytes)
@@ -288,7 +288,7 @@ def charmm_coor_reader(fname, ftextmem=None):
                     for line in fin:
                         # If the line is a title line
                         # add it to title list
-                        if(line.startswith('*') or 
+                        if(line.startswith('*') or
                            line.startswith('TITLE')):
                             title.append(line)
                         # else start readinf COOR format
@@ -320,14 +320,14 @@ def charmm_coor_reader(fname, ftextmem=None):
 def get_dir_base_extension(file_name):
     """ Splits directory, file base and file extensions
 
-        Returns: directory without leading '/', 
+        Returns: directory without leading '/',
                  file base name, and file extension without '.'
     """
     file_base, file_extension_with_dot = os.path.splitext(os.path.basename(file_name))
     file_extension = file_extension_with_dot.split(".")[-1]
     file_dir = os.path.dirname(file_name)
     return file_dir, file_base, file_extension
-            
+
 def pmdConvertTopoDict(topoStor, topoPMD):
     """ Function to convert ParmEd topology info
         from CharmmPsfFile and CharmmParameterSet
@@ -339,7 +339,7 @@ def pmdConvertTopoDict(topoStor, topoPMD):
        isinstance(topoPMD, pmd.charmm.CharmmPsfFile) or
        isinstance(topoPMD, pmd.tinker.tinkerfiles.DynFile) or
        isinstance(topoPMD, pmd.tinker.tinkerfiles.XyzFile) or
-       (topoPMD is None and topoStor.charmmcoor is not None) or 
+       (topoPMD is None and topoStor.charmmcoor is not None) or
        topoStor.charmmcoortopo is not None):
         topo = topoPMD
 
@@ -375,7 +375,7 @@ def pmdConvertTopoDict(topoStor, topoPMD):
                 atmbfac = None
             atm_unique = ''
             if atmname:
-                atm_unique = atm_unique + atmname 
+                atm_unique = atm_unique + atmname
             if atmtyp:
                 atm_unique = atm_unique + "-" + atmtyp
             if atom[16]:
@@ -426,7 +426,7 @@ def pmdConvertTopoDict(topoStor, topoPMD):
 
         def atompairids(atom1,atom2):
             return [atom1[0],atom2[0]]
-     
+
         def getX(x, attr, attr2=None):
             try:
                 if attr2 is not None:
@@ -452,23 +452,23 @@ def pmdConvertTopoDict(topoStor, topoPMD):
            isinstance(topoPMD, pmd.tinker.tinkerfiles.XyzFile)):
             unassignedAtomType = False
             try:
-                structure = [[getX(x,'name'), getX(x,'type'), getX(x,'residue','name'), 
-                              getX(x,'residue','_idx'), getX(x,'residue','segid'), 
-                              getX(x,'residue','chain'), getX(x,'altloc'), 
-                              getX(x,'irotat'), getX(x,'occupancy'), getX(x,'bfactor'), 
-                              getX(x,'mass'), getX(x,'_charge'), getX(x,'solvent_radius'), 
-                              getX(x,'atomic_number'), getX(x,'tree'), getX(x,'atom_type','name'), 
-                              getX(x,'atom_type','epsilon'), getX(x,'atom_type','rmin'), 
-                              getX(x,'atom_type','epsilon_14'), getX(x,'atom_type','rmin_14'), 
+                structure = [[getX(x,'name'), getX(x,'type'), getX(x,'residue','name'),
+                              getX(x,'residue','_idx'), getX(x,'residue','segid'),
+                              getX(x,'residue','chain'), getX(x,'altloc'),
+                              getX(x,'irotat'), getX(x,'occupancy'), getX(x,'bfactor'),
+                              getX(x,'mass'), getX(x,'_charge'), getX(x,'solvent_radius'),
+                              getX(x,'atomic_number'), getX(x,'tree'), getX(x,'atom_type','name'),
+                              getX(x,'atom_type','epsilon'), getX(x,'atom_type','rmin'),
+                              getX(x,'atom_type','epsilon_14'), getX(x,'atom_type','rmin_14'),
                               getX(x,'atom_type','nbfix')] for x in topoPMD.atoms]
             except AttributeError:
                 unassignedAtomType = True
-                structure = [[getX(x,'name'), getX(x,'type'), getX(x,'residue.name'), 
-                              getX(x,'residue._idx'), getX(x,'residue.segid'), 
-                              getX(x,'residue.chain'), getX(x,'altloc'), 
-                              getX(x,'irotat'), getX(x,'occupancy'), getX(x,'bfactor'), getX(x,'mass'), 
-                              getX(x,'._charge'), getX(x,'solvent_radius'), getX(x,'atomic_number'), 
-                              getX(x,'tree'), '', None, None, None, None, 
+                structure = [[getX(x,'name'), getX(x,'type'), getX(x,'residue.name'),
+                              getX(x,'residue._idx'), getX(x,'residue.segid'),
+                              getX(x,'residue.chain'), getX(x,'altloc'),
+                              getX(x,'irotat'), getX(x,'occupancy'), getX(x,'bfactor'), getX(x,'mass'),
+                              getX(x,'._charge'), getX(x,'solvent_radius'), getX(x,'atomic_number'),
+                              getX(x,'tree'), '', None, None, None, None,
                               None] for x in topoPMD.atoms]
 
             chainList = [a[5] for a in structure]
@@ -490,16 +490,16 @@ def pmdConvertTopoDict(topoStor, topoPMD):
             #        element = get_element_name(atom[2])
             #        if element is None:
             #            element = atom[1]
-            #    atom_element_list.append(element) 
+            #    atom_element_list.append(element)
         # If topo is ParSet, the parameter file or files might be supplied.
-        # If Psf file is also supplied with the parameters, we can extract 
+        # If Psf file is also supplied with the parameters, we can extract
         # full information of the topology with parameter values.
         # However, if there is no Psf file and user only supplied COOR CARD
-        # or CRD file, than we can only extract atom definitions through 
+        # or CRD file, than we can only extract atom definitions through
         # these two files and we try to fill in the blanks of the topology
         # puzzle in CHARMM with parameter set class in ParmEd.
         elif(isinstance(topoPMD, pmd.charmm.CharmmParameterSet) and
-            (topoStor.charmmcoor is not None or 
+            (topoStor.charmmcoor is not None or
              topoStor.charmmcoortopo is not None)):
             coortopo=False
             #ATOMNO RESNO   RES  TYPE  X     Y     Z   SEGID RESID Weighting
@@ -578,14 +578,14 @@ def pmdConvertTopoDict(topoStor, topoPMD):
                 #        element = get_element_name(atom[2])
                 #        if element is None:
                 #            element = atom[1]
-                #    atom_element_list.append(element) 
-
-        # There is also one possibility that we do not have 
-        # Psf or parameter set but only COOR info. If this is 
-        # the case and the info is not from a binary CRD file,        
-        # we can only extract names of residues, atom kinds 
-        # and types from the coordinate card info and 
-        elif(topoStor.charmmcoor is not None or 
+                #    atom_element_list.append(element)
+
+        # There is also one possibility that we do not have
+        # Psf or parameter set but only COOR info. If this is
+        # the case and the info is not from a binary CRD file,
+        # we can only extract names of residues, atom kinds
+        # and types from the coordinate card info and
+        elif(topoStor.charmmcoor is not None or
              topoStor.charmmcoortopo is not None):
             coortopo=False
             #ATOMNO RESNO   RES  TYPE  X     Y     Z   SEGID RESID Weighting
@@ -642,7 +642,7 @@ def pmdConvertTopoDict(topoStor, topoPMD):
                 #        element = get_element_name(atom[2])
                 #        if element is None:
                 #            element = atom[1]
-                #    atom_element_list.append(element) 
+                #    atom_element_list.append(element)
 
         if structure is None:
             return topoStor
@@ -712,11 +712,11 @@ def pmdConvertTopoDict(topoStor, topoPMD):
             atomlabelDict.append(typelabelList)
 
         for ai, atom in enumerate(atomAllList):
-            atom_element_list.append(atom[16]) 
+            atom_element_list.append(atom[16])
 
         for atom in atomAllList:
             atomlabelList.append([atom[0],atom_type_dict[atom[1]]])
-        
+
         system_name = ''
         if atom_element_list:
             system_name = system_name + ''.join([
@@ -748,7 +748,7 @@ def pmdConvertTopoDict(topoStor, topoPMD):
                     for bi, bondtype in enumerate(topoPMD.bonds[0].type.list):
                         topoPMD.bonds[0].type.list[bi]._idx=bi
                     topo_bond_list = np.asarray([
-                        [x.atom1._idx, x.atom2._idx, x.type._idx, 
+                        [x.atom1._idx, x.atom2._idx, x.type._idx,
                          x.type.k, x.type.req] for x in topoPMD.bonds
                         ])
                 else:
@@ -759,7 +759,7 @@ def pmdConvertTopoDict(topoStor, topoPMD):
             else:
                 topbList = []
         elif(isinstance(topoPMD, pmd.charmm.CharmmParameterSet) and
-            (topoStor.charmmcoor is not None or 
+            (topoStor.charmmcoor is not None or
              topoStor.charmmcoortopo is not None)):
             if topoPMD.bond_types is not None:
                 bi=0
@@ -789,21 +789,21 @@ def pmdConvertTopoDict(topoStor, topoPMD):
                                 continue         # COOR file only includes residue ids for each atom
                             if at1[0] == at2[0]:
                                 continue
-                            if((at1[2] in btyp[0] or at1[3] in btyp[0] or at1[16] in btyp[0]) and 
+                            if((at1[2] in btyp[0] or at1[3] in btyp[0] or at1[16] in btyp[0]) and
                                (at2[2] in btyp[1] or at2[3] in btyp[1] or at2[16] in btyp[1])):
                                 if(str(at1[0])+'-'+str(at2[0]) in bond_found_names or
                                    str(at2[0])+'-'+str(at1[0]) in bond_found_names):
                                        continue
-                                bond_found_list.append([at1[0], at2[0], topoPMD.bond_types[btyp]._idx, 
+                                bond_found_list.append([at1[0], at2[0], topoPMD.bond_types[btyp]._idx,
                                     topoPMD.bond_types[btyp].k,topoPMD.bond_types[btyp].req])
                                 bond_found_names.append(str(at1[0])+'-'+str(at2[0]))
                                 bond_found_names.append(str(at2[0])+'-'+str(at1[0]))
-                            if((at1[2] in btyp[1] or at1[3] in btyp[1] or at1[16] in btyp[1]) and 
+                            if((at1[2] in btyp[1] or at1[3] in btyp[1] or at1[16] in btyp[1]) and
                                (at2[2] in btyp[0] or at2[3] in btyp[0] or at2[16] in btyp[0])):
                                 if(str(at1[0])+'-'+str(at2[0]) in bond_found_names or
                                    str(at2[0])+'-'+str(at1[0]) in bond_found_names):
                                        continue
-                                bond_found_list.append([at1[0], at2[0], topoPMD.bond_types[btyp]._idx, 
+                                bond_found_list.append([at1[0], at2[0], topoPMD.bond_types[btyp]._idx,
                                     topoPMD.bond_types[btyp].k,topoPMD.bond_types[btyp].req])
                                 bond_found_names.append(str(at1[0])+'-'+str(at2[0]))
                                 bond_found_names.append(str(at2[0])+'-'+str(at1[0]))
@@ -843,14 +843,14 @@ def pmdConvertTopoDict(topoStor, topoPMD):
         if topbCont:
             for pair in topbList:
                 topbNames.append(atomAllList[int(pair[0])][1] + '-' + atomAllList[int(pair[1])][1])
-            
+
             topb=list(set(topbNames))
             interNum = 0
             for tb in topb:
                 bondList = []
                 typeList = []
                 noninter = True
-                bc = 0 
+                bc = 0
                 for bb in topbList:
                     b=[int(x) for x in bb]
                     topt=atomAllList[b[0]][1] + '-' + atomAllList[b[1]][1]
@@ -883,7 +883,7 @@ def pmdConvertTopoDict(topoStor, topoPMD):
 #                        reslist=atom_respairs(b.atoms[0],b.atoms[1])
 #                        atmlist=atompairs(b.atoms[0],b.atoms[1])
 #                        atmidlist=atompairids(b.atoms[0],b.atoms[1])
-#                        if((aelm == str(atmlist[0][2]) and belm == str(atmlist[1][2])) or 
+#                        if((aelm == str(atmlist[0][2]) and belm == str(atmlist[1][2])) or
 #                           (aelm == str(atmlist[1][2]) and belm == str(atmlist[0][2]))):
 #                            bondList.append(atmidlist)
 #                            interDict.update({interNum : bondList})
@@ -901,7 +901,7 @@ def pmdConvertTopoDict(topoStor, topoPMD):
         #atom_to_residue = np.zeros((len(residueList), 2), dtype=int)
         #atom_to_residue[:,0] = atomIndex+1
         #atom_to_residue[:,1] = np.array(residueList)+1
-       
+
 
         topoStor.topoDict.update({"system_name" : system_name})
         topoStor.topoDict.update({"atom_name_list" : atom_name_list})
@@ -997,7 +997,7 @@ def gtoConvertTopoDict(topoStor, topoGTO):
                 element = get_element_name(atom.name)
                 if element is None:
                     element = atom.name
-            atom_element_list.append(element) 
+            atom_element_list.append(element)
 
         system_name = ''
         if atom_element_list:
@@ -1051,7 +1051,7 @@ def gtoConvertTopoDict(topoStor, topoGTO):
 
         for atom in atomAllList:
             atomlabelList.append([atom[0],atom_type_dict[atom[1]]])
-        
+
         massList = list(massDict.values())
         atom_type_list = list(atomtypesDict.values())
         name_list = list(atomnameDict.values())
@@ -1069,7 +1069,7 @@ def gtoConvertTopoDict(topoStor, topoGTO):
             bondList = []
             typeList = []
             noninter = True
-            bc = 0 
+            bc = 0
             for i1, a1 in enumerate(topo.atomtypes):
                 for i2, a2 in enumerate(topo.atomtypes):
                     if i2<i1:
@@ -1104,7 +1104,7 @@ def gtoConvertTopoDict(topoStor, topoGTO):
 #            bondList = []
 #            typeList = []
 #            noninter = True
-#            bc = 0 
+#            bc = 0
 #            for i1, a1 in enumerate(topo.atomtypes):
 #                for i2, a2 in enumerate(topo.atomtypes):
 #                    if((b.type1_name == a1.name and b.type2_name == a2.name) or
@@ -1153,9 +1153,9 @@ def pymConvertTopoDict(topoStor, topoPYM):
         to MDDataAccess
 
         Pymolfile stores structure data in numpy with following fields
-            (name, type, resname, resid, segid, chain, altloc, insertion, 
+            (name, type, resname, resid, segid, chain, altloc, insertion,
              occupancy, bfactor, mass, charge, radius, atomicnumber)
-        Ex.: ['N' 'NH3' 'ASP' '48' 'PRO1' 'P' ' ' '' 
+        Ex.: ['N' 'NH3' 'ASP' '48' 'PRO1' 'P' ' ' ''
               '0.0' '0.0' '14.007' '-0.3' '0.0' '0']
     """
     topo=None
@@ -1194,7 +1194,7 @@ def pymConvertTopoDict(topoStor, topoPYM):
                 atmbfac = None
             atm_unique = ''
             if atmname:
-                atm_unique = atm_unique + atmname 
+                atm_unique = atm_unique + atmname
             if atmtyp:
                 atm_unique = atm_unique + "-" + atmtyp
             #if atmres:
@@ -1251,7 +1251,7 @@ def pymConvertTopoDict(topoStor, topoPYM):
                 element = get_element_name(atom[1])
                 if element is None:
                     element = atom[1]
-            atom_element_list.append(element) 
+            atom_element_list.append(element)
 
         system_name = ''
         if atom_element_list:
@@ -1314,7 +1314,7 @@ def pymConvertTopoDict(topoStor, topoPYM):
 
         for atom in atomAllList:
             atomlabelList.append([atom[0],atom_type_dict[atom[1]]])
-        
+
         massList = list(massDict.values())
         atom_type_list = list(atomtypesDict.values())
         name_list = list(atomnameDict.values())
@@ -1337,7 +1337,7 @@ def pymConvertTopoDict(topoStor, topoPYM):
             bondList = []
             typeList = []
             noninter = True
-            bc = 0 
+            bc = 0
             for b in topbList:
                 topt=atomAllList[b[0]-1][1] + '-' + atomAllList[b[1]-1][1]
                 if topt == tb:
@@ -1369,7 +1369,7 @@ def pymConvertTopoDict(topoStor, topoPYM):
 #                        reslist=atom_respairs(b.atoms[0],b.atoms[1])
 #                        atmlist=atompairs(b.atoms[0],b.atoms[1])
 #                        atmidlist=atompairids(b.atoms[0],b.atoms[1])
-#                        if((aelm == str(atmlist[0][2]) and belm == str(atmlist[1][2])) or 
+#                        if((aelm == str(atmlist[0][2]) and belm == str(atmlist[1][2])) or
 #                           (aelm == str(atmlist[1][2]) and belm == str(atmlist[0][2]))):
 #                            bondList.append(atmidlist)
 #                            interDict.update({interNum : bondList})
@@ -1387,7 +1387,7 @@ def pymConvertTopoDict(topoStor, topoPYM):
         #atom_to_residue = np.zeros((len(residueList), 2), dtype=int)
         #atom_to_residue[:,0] = atomIndex+1
         #atom_to_residue[:,1] = np.array(residueList)+1
-       
+
 
         topoStor.topoDict.update({"system_name" : system_name})
         topoStor.topoDict.update({"atom_name_list" : atom_name_list})
@@ -1408,7 +1408,7 @@ def pymConvertTopoDict(topoStor, topoPYM):
     return topoStor
 
 def mdtConvertTopoDict(topoStor, topoMDT):
-    if(isinstance(topoMDT, mdt_Topology) or 
+    if(isinstance(topoMDT, mdt_Topology) or
        isinstance(topoMDT, mdt_Trajectory)):
         topo = topoMDT
 
@@ -1451,7 +1451,7 @@ def mdtConvertTopoDict(topoStor, topoMDT):
                 for bond in topo.bonds:
                     molname1, molatom1 = str(bond[0]).split('-')
                     molname2, molatom2 = str(bond[1]).split('-')
-                    if((aelm == str(molatom1) and belm == str(molatom2)) or 
+                    if((aelm == str(molatom1) and belm == str(molatom2)) or
                         (aelm == str(molatom2) and belm == str(molatom1))):
                         bondList.append(list(topologyBonds[bondid]))
                         interDict.update({interNum : bondList})
@@ -1470,7 +1470,7 @@ def mdtConvertTopoDict(topoStor, topoMDT):
         atom_to_residue = np.zeros((len(residueList), 2), dtype=int)
         atom_to_residue[:,0] = atomIndex+1
         atom_to_residue[:,1] = np.array(residueList)+1
-        
+
         system_name = ''
         if atom_element_list:
             system_name = system_name + ''.join([
@@ -1497,7 +1497,7 @@ def mdtConvertTopoDict(topoStor, topoMDT):
     return topoStor
 
 def mdaConvertTopoDict(topoStor, topoMDA):
-    if(isinstance(topoMDA, mda_u.Universe) or 
+    if(isinstance(topoMDA, mda_u.Universe) or
        isinstance(topoMDA, mda_u.Topology)):
         topo = topoMDA
 
@@ -1508,7 +1508,7 @@ def mdaConvertTopoDict(topoStor, topoMDA):
             else:
                 segname = seg.segid
             return [seg.ix,segname]
-        
+
         def getres(res):
             regex = re.compile(r"^b'")
             if isinstance(res.resname, bytes):
@@ -1521,7 +1521,7 @@ def mdaConvertTopoDict(topoStor, topoMDA):
 
         def get_atomseg(atom):
             return [atom.segment.ix,atom.segment.segname.decode('utf-8')]
-        
+
         def get_atomres(atom):
             return [atom.resname.decode('utf-8'),atom.resid]
 
@@ -1612,7 +1612,7 @@ def mdaConvertTopoDict(topoStor, topoMDA):
                 #    element = get_element_name(atom[3])
                 if element is None:
                     element = atom[2]
-            atom_element_list.append(element) 
+            atom_element_list.append(element)
 
         system_name = ''
         if atom_element_list:
@@ -1693,7 +1693,7 @@ def mdaConvertTopoDict(topoStor, topoMDA):
 
         for atom in atomList:
             atomlabelList.append([atom[0],atom_type_dict[atom[1]]])
-        
+
         massList = list(massDict.values())
         atom_type_list = list(atomtypesDict.values())
         name_list = list(atomnameDict.values())
@@ -1742,7 +1742,7 @@ def mdaConvertTopoDict(topoStor, topoMDA):
 #                        reslist=atom_respairs(b.atoms[0],b.atoms[1])
 #                        atmlist=atompairs(b.atoms[0],b.atoms[1])
 #                        atmidlist=atompairids(b.atoms[0],b.atoms[1])
-#                        if((aelm == str(atmlist[0][2]) and belm == str(atmlist[1][2])) or 
+#                        if((aelm == str(atmlist[0][2]) and belm == str(atmlist[1][2])) or
 #                           (aelm == str(atmlist[1][2]) and belm == str(atmlist[0][2]))):
 #                            bondList.append(atmidlist)
 #                            interDict.update({interNum : bondList})
@@ -1760,7 +1760,7 @@ def mdaConvertTopoDict(topoStor, topoMDA):
         #atom_to_residue = np.zeros((len(residueList), 2), dtype=int)
         #atom_to_residue[:,0] = atomIndex+1
         #atom_to_residue[:,1] = np.array(residueList)+1
-       
+
 
         topoStor.topoDict.update({"system_name" : system_name})
         topoStor.topoDict.update({"atom_name_list" : atom_name_list})
@@ -1789,7 +1789,7 @@ class MDDataConverter(object):
 
     def topology_to_dictionary(self):
         """ This function generates self.topologyDict dictionary
-            and converts/stores all the topology information 
+            and converts/stores all the topology information
             according to the meta info definitions
         """
         newDict = {}
@@ -1822,7 +1822,7 @@ class MDDataConverter(object):
         return False, system_name, itemdict
 
     def topology_atom_to_mol(topodict, itemdict):
-        """ Function to generate data for atom_to_molecule 
+        """ Function to generate data for atom_to_molecule
         """
         if "residue_list" in topodict:
             atom_to_residue = topodict["residue_list"]
@@ -1840,7 +1840,7 @@ class MDDataBase(dict):
             if isinstance(arg, dict):
                 for k, v in arg.items():
                     if k in self:
-                        self[k] = v 
+                        self[k] = v
         if kwargs:
             for k, v in kwargs.items():
                 if k in self:
@@ -1862,7 +1862,7 @@ class MDDataCommon(MDDataBase):
 
         time is (1 x n_step) numpy array.
         n_ variables are in numbers.
-        iread is expected to be a generator 
+        iread is expected to be a generator
             that loops all n_steps of given array.
     """
     def __init__(self,
@@ -1874,11 +1874,11 @@ class MDDataCommon(MDDataBase):
         pass
 
 class MDDataTopology(MDDataCommon):
-    """ Stores the topology data in a conveniate 
-        way that individual components of topology 
-        data can be accessed as numpy arrays or 
+    """ Stores the topology data in a conveniate
+        way that individual components of topology
+        data can be accessed as numpy arrays or
         dictionaries.
-        
+
         n_ values are numbers
         _types values are numbers in numpy array
         _labels values are a list of strings
@@ -1919,7 +1919,7 @@ class MDDataTopology(MDDataCommon):
         pass
 
 class MDDataTrajectory(MDDataCommon):
-    """ Stores the trajectory data as well as 
+    """ Stores the trajectory data as well as
         velocities and forces, if they exist.
     """
     def __init__(self,
@@ -1938,10 +1938,10 @@ class MDDataTrajectory(MDDataCommon):
         pass
 
 class MDDataThermo(MDDataCommon):
-    """ Stores the thermostat values (energies) 
+    """ Stores the thermostat values (energies)
         data as well as, if it exists.
 
-        _tensor variables are Nx3x3 numpy arrays 
+        _tensor variables are Nx3x3 numpy arrays
             that hold 3x3 tensors of all N steps (n_steps).
     """
     def __init__(self,
@@ -1973,7 +1973,7 @@ class MDDataThermo(MDDataCommon):
 class MDDataAccess(object):
     """Data access class for various different atomic coordinate files.
 
-    Reading topology and trajectory files using one interface 
+    Reading topology and trajectory files using one interface
     for different MD data analyses packages.
 
     In current version, the following packages are supported:
@@ -1981,21 +1981,21 @@ class MDDataAccess(object):
         mdanalysis   : MDAnalysis 0.16.2 (27 Jun 2017) and 0.17.1 (4 Sep 2017)
         ase          : ASE 3.14.1 (28 Jun 2017)
         usersupplied : self.UserSuppliedInterface
-    
-    If user does not supply a package name or an ordering list for the 
-    interfaces using the names at first column above, the packages will 
+
+    If user does not supply a package name or an ordering list for the
+    interfaces using the names at first column above, the packages will
     be used for accessing data in given files with the order above.
 
-    A user defined function can also be supplied to the high-level 
+    A user defined function can also be supplied to the high-level
     data access in MDDataAccess. (See self.user_formats).
 
     Additional Interfaces:
         A common interface to access thermodynamical quantities for GROMACS
-        is added through panedr package. To initiate the file handler use 
-        thermoFileHandler function and to access the values at each step 
+        is added through panedr package. To initiate the file handler use
+        thermoFileHandler function and to access the values at each step
         use thermo_iread which returns a dictionary with all properties.
         For a full list of properties see docs of thermo_iread.
-        
+
         Interface is supplied by the following package:
           Panedr 0.2 (15 Jan 2016)
 
@@ -2020,7 +2020,7 @@ class MDDataAccess(object):
         self.outputcoords = None
         self.trajtype = None
         self.forcefield = None # Main storage for force field parameters
-        self.thermostats = None # Main storage for thermodynamical quantities 
+        self.thermostats = None # Main storage for thermodynamical quantities
                                    #  and properties such as energies, temperatures
         self.forcefieldhandler = None # Main storage for force field parameters
         self.access_ui = None
@@ -2056,7 +2056,7 @@ class MDDataAccess(object):
         self.trajiter = None # The object parsing the trajectory file
         self.trajcode = None # To explicitly define the parsing library (pymolfile, mdtraj, ASE ...) for trajectory files
         self.trajfile = None # File name of the trajectory file with path if needed
-        self.trajstream = None # List of lines in a text stream of input trajectory 
+        self.trajstream = None # List of lines in a text stream of input trajectory
         self.trajectory = None
 
     def init_incoord(self):
@@ -2067,7 +2067,7 @@ class MDDataAccess(object):
         self.incoorditer = None # The object parsing the trajectory file
         self.incoordcode = None # To explicitly define the parsing library (pymolfile, mdtraj, ASE ...) for trajectory files
         self.incoordfile = None # File name of the trajectory file with path if needed
-        self.incoordstream = None # List of lines in a text stream of input trajectory 
+        self.incoordstream = None # List of lines in a text stream of input trajectory
         self.inputpositions = None
 
     def init_outcoord(self):
@@ -2078,7 +2078,7 @@ class MDDataAccess(object):
         self.outcoorditer = None # The iteration at the output file
         self.outcoordcode = None # To explicitly define the parsing library (pymolfile, mdtraj, ASE ...) for the files
         self.outcoordfile = None # File name of the file with path if needed
-        self.outcoordstream = None # List of lines in a text stream of input trajectory 
+        self.outcoordstream = None # List of lines in a text stream of input trajectory
         self.outputpositions = None
 
     def init_thermo(self):
@@ -2120,7 +2120,7 @@ class MDDataAccess(object):
     def load(self, interface=None):
         """Loads the file handles for trajectory and/or topology
         """
-        
+
         #if self.topohandler is None:
         #    if self.topofile:
         self.check_topology_format_support()
@@ -2129,11 +2129,11 @@ class MDDataAccess(object):
             self.check_trajectory_format_support("traj")
 
         return self.trajhandler
-    
+
     def load_topology(self):
         """Loads the file handles for topology only
         """
-        
+
         #if self.topohandler is None:
         #    if self.topofile:
         #        self.check_topology_format_support()
@@ -2143,31 +2143,31 @@ class MDDataAccess(object):
         self.check_topology_format_support()
 
         return self.topohandler
-    
+
     def load_incoord(self):
         """Loads the file handles for coordinates only
         """
-        
+
         if self.incoordhandler is None:
         #    if self.incoordfile:
             self.check_trajectory_format_support("input")
 
         return self.incoordhandler
-    
+
     def load_outcoord(self):
         """Loads the file handles for coordinates only
         """
-        
+
         if self.outcoordhandler is None:
             if self.outcoordfile:
                 self.check_trajectory_format_support("output")
 
         return self.outcoordhandler
-    
+
     def load_thermo(self):
         """Loads the file handles for topology only
         """
-        
+
         if self.thermohandler is None:
             if self.thermofile:
                 self.thermohandler = panedr.edr_to_df(self.thermofile, verbose=False)
@@ -2181,14 +2181,14 @@ class MDDataAccess(object):
     def get_dir_base_extension(self, file_name):
         """ Splits directory, file base and file extensions
 
-            Returns: directory without leading '/', 
+            Returns: directory without leading '/',
             file base name, and file extension without '.'
         """
         file_base, file_extension_with_dot = os.path.splitext(os.path.basename(file_name))
         file_extension = file_extension_with_dot.split(".")[-1]
         file_dir = os.path.dirname(file_name)
         return file_dir, file_base, file_extension
-    
+
     def get_file_format(self, filename, givenformat):
         """Returns format of a file from extension
         """
@@ -2249,7 +2249,7 @@ class MDDataAccess(object):
                     ziptype = None
                     filetopoformat, zipfile, ziptype = get_zipType(self.topofile)
                     ftopoform = re.sub('[.]', '', filetopoformat)
-                    if('GROMOSTOP' == ftopoform.upper() or 
+                    if('GROMOSTOP' == ftopoform.upper() or
                        'GROMOSCNF' == ftopoform.upper(),
                        'CNF' == ftopoform.upper(),
                        'TOP' == ftopoform.upper()):
@@ -2262,14 +2262,14 @@ class MDDataAccess(object):
                     topohandler_check = None
                     charmmcoor_dict = None
                     filetopoformat = re.sub('[.]', '', fileloadformat)
-                    if('CHARMMCOR' == filetopoformat.upper() or 
-                       'CHARMMCOOR' == filetopoformat.upper() or 
-                       #'COR' == file_format.upper() or 
-                       #'CRD' == file_format.upper() or 
+                    if('CHARMMCOR' == filetopoformat.upper() or
+                       'CHARMMCOOR' == filetopoformat.upper() or
+                       #'COR' == file_format.upper() or
+                       #'CRD' == file_format.upper() or
                        'COOR' == filetopoformat.upper()):
                         if self.topofile:
                             charmmcoor_dict = charmm_coor_reader(self.topofile)
-                    if('CHARMMSTRCOR' == filetopoformat.upper() or 
+                    if('CHARMMSTRCOR' == filetopoformat.upper() or
                        'CHARMMSTRCRD' == filetopoformat.upper() or
                        'CHARMMSTREAM' == filetopoformat.upper()):
                         if self.topostream is not None:
@@ -2355,7 +2355,7 @@ class MDDataAccess(object):
                 if self.topohandler:
                     self.topocode = "mdtraj"
 
-            # If MDTraj does not have support for the format 
+            # If MDTraj does not have support for the format
             # or can not load the topology, use MDAnalysis and ASE.
             if self.topohandler is None:
                 self.topohandler = self.load_mdanalysis_topology(fileloadformat)
@@ -2366,7 +2366,7 @@ class MDDataAccess(object):
             if self.topohandler is None:
                 ase_support = False
                 ase_support = self.get_ase_format_support(fileloadformat)
-                # May still have chance that ASE can recognize the 
+                # May still have chance that ASE can recognize the
                 # format with its filetype checking function
                 if ase_support is None:
                     if self.topofile:
@@ -2426,7 +2426,7 @@ class MDDataAccess(object):
                     ziptype = None
                     filetrajformat, zipfile, ziptype = get_zipType(chkfile)
                     ftrajform = re.sub('[.]', '', filetrajformat)
-                    if('GROMOSCNF' == ftrajform.upper() or 
+                    if('GROMOSCNF' == ftrajform.upper() or
                        'GROMOSTRC' == ftrajform.upper(),
                        'GROMOSTRV' == ftrajform.upper(),
                        'CNF' == ftrajform.upper(),
@@ -2468,20 +2468,20 @@ class MDDataAccess(object):
                     trajhandler_check = None
                     charmmcoor_dict = None
                     filetrajformat = re.sub('[.]', '', fileloadformat)
-                    if('CHARMMCOOR' == filetrajformat.upper() or 
-                       'CHARMMCOR' == filetrajformat.upper() or 
-                       #'COR' == file_format.upper() or 
-                       #'CRD' == file_format.upper() or 
-                       #'COORBIN' == file_format.upper() or 
+                    if('CHARMMCOOR' == filetrajformat.upper() or
+                       'CHARMMCOR' == filetrajformat.upper() or
+                       #'COR' == file_format.upper() or
+                       #'CRD' == file_format.upper() or
+                       #'COORBIN' == file_format.upper() or
                        'COOR' == filetrajformat.upper()):
                         charmmcoor_dict = charmm_coor_reader(chkfile)
-                    if('CHARMMSTRCOR' == filetrajformat.upper() or 
+                    if('CHARMMSTRCOR' == filetrajformat.upper() or
                        'CHARMMSTRCRD' == filetrajformat.upper() or
                        'CHARMMSTREAM' == filetrajformat.upper()):
-                        if('CHARMMSTR' in filetrajformat.upper() and 
-                           self.topostream is not None and 
-                           (self.trajstream is None and 
-                            self.incoordstream is None and 
+                        if('CHARMMSTR' in filetrajformat.upper() and
+                           self.topostream is not None and
+                           (self.trajstream is None and
+                            self.incoordstream is None and
                             self.outcoordstream is None )):
                             if 'cor' in self.topostream:
                                 charmmcoor_dict = charmm_coor_reader(chkfile, ftextmem=self.topostream['cor'])
@@ -2655,7 +2655,7 @@ class MDDataAccess(object):
                     mdanalysis_format = re.sub('[.]', '', fileloadformat)
                     mdanalysis_format = mdanalysis_format.upper()
                     if self.topohandler is not None and chkfile is not None:
-                        # if the topology handler is a MDAnalysis universe, 
+                        # if the topology handler is a MDAnalysis universe,
                         # we may try replacing the trajectory data in it.
                         if isinstance(self.topohandler, mda_u.Universe):
                             try:
@@ -2677,7 +2677,7 @@ class MDDataAccess(object):
                                 usedefault=False
                                 break
                             except (AttributeError, IOError, OSError, ValueError, TypeError):
-                                try: 
+                                try:
                                     universe = mda_u.Universe(self.topohandler, self.trajfile, format=mdanalysis_format)
                                     trajhandler = None
                                     if isinstance(universe, mda_u.Universe):
@@ -2697,7 +2697,7 @@ class MDDataAccess(object):
                                             self.natoms = numatoms
                                         usedefault=False
                                         break
-                                    elif self.is_class_of_module(universe, 
+                                    elif self.is_class_of_module(universe,
                                             mda_c.Trajectory, mda_coordinates_modules):
                                         trajhandler = self.mdanalysis_iread(mdanalysis_handler=universe.trajectory)
                                         if "input" in filetype:
@@ -2717,12 +2717,12 @@ class MDDataAccess(object):
                                 #except (AttributeError, IOError, OSError, ValueError, TypeError):
                                 except IOError:
                                     pass
-                    # if topology handler is not a MDAnalysis Universe 
-                    # or is not initialized, we can try accessing only 
-                    # trajectory file. (A stub/converted topology might be supplied 
+                    # if topology handler is not a MDAnalysis Universe
+                    # or is not initialized, we can try accessing only
+                    # trajectory file. (A stub/converted topology might be supplied
                     # for MDAnalysis to solve the needed topology data later)
                     # Hang on it: PDB has both topology and trajectory data.
-                    # DuckTyper: If we can load only trajectory file, 
+                    # DuckTyper: If we can load only trajectory file,
                     #            we may access the data.
                     try:
                         universe = mda_u.Universe(self.trajfile, format=mdanalysis_format)
@@ -2766,7 +2766,7 @@ class MDDataAccess(object):
                                     self.natoms = numatoms
                                 usedefault=False
                                 break
-                        elif self.is_class_of_module(universe, 
+                        elif self.is_class_of_module(universe,
                                 mda_c.Trajectory, mda_coordinates_modules):
                             if self.interfacematch:
                                 if interface in self.topocode:
@@ -2923,11 +2923,11 @@ class MDDataAccess(object):
 
         if usedefault:
             if "input" in filetype:
-                trajhandler = self.incoordhandler 
+                trajhandler = self.incoordhandler
             elif "output" in filetype:
-                trajhandler = self.outcoordhandler 
+                trajhandler = self.outcoordhandler
             else:
-                trajhandler = self.trajhandler 
+                trajhandler = self.trajhandler
             if trajhandler is None:
                 filetrajformat = re.sub('[.]', '', chkformat)
                 # First check whether pymolfile has support for the file type
@@ -3043,11 +3043,11 @@ class MDDataAccess(object):
                                     self.natoms = numatoms
 
             if "input" in filetype:
-                trajhandler = self.incoordhandler 
+                trajhandler = self.incoordhandler
             elif "output" in filetype:
-                trajhandler = self.outcoordhandler 
+                trajhandler = self.outcoordhandler
             else:
-                trajhandler = self.trajhandler 
+                trajhandler = self.trajhandler
             if trajhandler is None:
                 # Second,check whether MDtraj has support for the file type
                 # trajhandler_check = mdt_FormatRegistry.loaders[file_format]
@@ -3064,9 +3064,9 @@ class MDDataAccess(object):
                             handlerresult = iter(trajhandler)
                         except:
                             trajhandler = None
-                        if (trajhandler is not None and 
+                        if (trajhandler is not None and
                             handlerresult is not None):
-                            # Now, we know that trajhandler works. We can ignore it at memory 
+                            # Now, we know that trajhandler works. We can ignore it at memory
                             # and set trajhandler again in its original home at self.
                             trajhandler = None
                             if self.interfacematch:
@@ -3087,14 +3087,14 @@ class MDDataAccess(object):
                                 self.trajcode = "mdtraj"
                                 self.natoms = numatoms
 
-            # If MDTraj does not have support for the format 
+            # If MDTraj does not have support for the format
             # or can not load the trajectory, use MDAnalysis or ASE.
             if "input" in filetype:
-                trajhandler = self.incoordhandler 
+                trajhandler = self.incoordhandler
             elif "output" in filetype:
-                trajhandler = self.outcoordhandler 
+                trajhandler = self.outcoordhandler
             else:
-                trajhandler = self.trajhandler 
+                trajhandler = self.trajhandler
             if trajhandler is None:
                 mdanalysis_format = re.sub('[.]', '', fileloadformat)
                 if self.topohandler is not None and chkfile is not None:
@@ -3115,7 +3115,7 @@ class MDDataAccess(object):
                                 self.trajcode = "mdanalysis"
                                 self.natoms = numatoms
                         except (AttributeError, IOError, OSError, ValueError, TypeError):
-                            try: 
+                            try:
                                 universe = mda_u.Universe(self.topohandler, self.trajfile, format=mdanalysis_format)
                                 if isinstance(universe, mda_u.Universe):
                                     self.topohandler = universe
@@ -3132,7 +3132,7 @@ class MDDataAccess(object):
                                         self.trajhandler = trajhandler
                                         self.trajcode = "mdanalysis"
                                         self.natoms = numatoms
-                                elif self.is_class_of_module(self.universe, 
+                                elif self.is_class_of_module(self.universe,
                                         mda_c.Trajectory, mda_coordinates_modules):
                                     trajhandler = self.mdanalysis_iread(mdanalysis_handler=universe)
                                     if "input" in filetype:
@@ -3186,7 +3186,7 @@ class MDDataAccess(object):
                                 self.trajhandler = trajhandler
                                 self.trajcode = "mdanalysis"
                                 self.natoms = numatoms
-                    elif self.is_class_of_module(self.universe, 
+                    elif self.is_class_of_module(self.universe,
                             mda_c.Trajectory, mda_coordinates_modules):
                         trajhandler = None
                         trajcode = None
@@ -3215,16 +3215,16 @@ class MDDataAccess(object):
                     pass
 
             if "input" in filetype:
-                trajhandler = self.incoordhandler 
+                trajhandler = self.incoordhandler
             elif "output" in filetype:
-                trajhandler = self.outcoordhandler 
+                trajhandler = self.outcoordhandler
             else:
-                trajhandler = self.trajhandler 
+                trajhandler = self.trajhandler
             if trajhandler is None:
                 ase_support = None
                 ase_support = self.get_ase_format_support(fileloadformat)
                 trajcode=None
-                # May still have chance that ASE can recognize the 
+                # May still have chance that ASE can recognize the
                 # format with its filetype checking function
                 if ase_support is None:
                     if chkfile is not None:
@@ -3287,11 +3287,11 @@ class MDDataAccess(object):
                     self.natoms = numatoms
 
         if "input" in filetype:
-            trajhandler = self.incoordhandler 
+            trajhandler = self.incoordhandler
         elif "output" in filetype:
-            trajhandler = self.outcoordhandler 
+            trajhandler = self.outcoordhandler
         else:
-            trajhandler = self.trajhandler 
+            trajhandler = self.trajhandler
         if trajhandler is None:
             return False
         else:
@@ -3383,7 +3383,7 @@ class MDDataAccess(object):
         configuration at a time.
         """
         return self.trajhandler.cell_lengths
-    
+
     def load_pymolfile_topology(self, file_format=None):
         """ function to call pymolfile topology reader
 
@@ -3608,9 +3608,9 @@ class MDDataAccess(object):
         return topology
 
     def load_mdtraj_topology(self, file_format, **kwargs):
-        """Borrowed from mdtraj.trajectory to remove 
+        """Borrowed from mdtraj.trajectory to remove
            IOError and TypeError warnings
-    
+
         Get the topology from an argument of indeterminate type.
         If top is a string, we try loading a pdb, if its a trajectory
         we extract its topology.
@@ -3700,7 +3700,7 @@ class MDDataAccess(object):
             handler = None
             try:
                 handler = ase_io.read(file_name, format=ioformat)
-            except (ModuleNotFoundError, ImportError, AttributeError, 
+            except (ModuleNotFoundError, ImportError, AttributeError,
                 IOError, OSError, ValueError):
                 pass
             if handler is None:
@@ -3710,10 +3710,10 @@ class MDDataAccess(object):
                         handler_iter = iter(handler_iread)
                         handler_iread = None
                         handler = ase_io.iread(file_name, index=":", format=ioformat)
-                    except (ModuleNotFoundError, ImportError, AttributeError, 
+                    except (ModuleNotFoundError, ImportError, AttributeError,
                         IOError, OSError, ValueError):
                         pass
-                except (ModuleNotFoundError, ImportError, AttributeError, 
+                except (ModuleNotFoundError, ImportError, AttributeError,
                     IOError, OSError, ValueError):
                     pass
             return handler
@@ -3758,7 +3758,7 @@ class MDDataAccess(object):
         handler = None
         try:
             handler = ase_io.iread(filename, index=":", format=fileformat)
-        except (AttributeError, IOError, OSError, 
+        except (AttributeError, IOError, OSError,
                 ValueError, ImportError):
             return handler
 
@@ -3766,12 +3766,12 @@ class MDDataAccess(object):
             try:
                 for value in handler:
                     yield value.get_positions()
-            except (AttributeError, IOError, OSError, 
+            except (AttributeError, IOError, OSError,
                     ValueError, ImportError):
                 return handler
         else:
             return handler
-    
+
     def charmm_coor_toporead(self, coorDict=None):
         """Returns the atom list for CHARMM COOR input
         """
@@ -3894,17 +3894,17 @@ class MDDataAccess(object):
                     if "has_velocities" in positions:
                         if positions["has_velocities"]>0:
                             traj.velocities = positions["velocities"]
-                    if("A" in positions and 
-                       "B" in positions and 
+                    if("A" in positions and
+                       "B" in positions and
                        "C" in positions):
                         traj.unitcell_lengths = np.asarray(
                                 [positions["A"],positions["B"],positions["C"]])
                     if("alpha" in positions and
-                       "beta" in positions and 
+                       "beta" in positions and
                        "gamma" in positions):
                         traj.unitcell_angles = np.asarray(
                                 [positions["alpha"],positions["beta"],positions["gamma"]])
-                    if("A" in positions and "B" in positions and 
+                    if("A" in positions and "B" in positions and
                        "C" in positions and "alpha" in positions and
                        "beta" in positions and "gamma" in positions):
                         a = positions["A"]
@@ -4089,7 +4089,7 @@ class MDDataAccess(object):
     def topologyFileHandler(self, filename, fileformatlist=None, interfacelist=None):
         self.init_topo()
         self.set_topo(filename)
-        if (self.interfaceorder is None and 
+        if (self.interfaceorder is None and
             interfacelist is not None):
             self.interfaceorder = interfacelist
         self.load_topology()
@@ -4150,7 +4150,7 @@ class MDDataAccess(object):
     def thermoFileHandler(self, filename, fileformatlist=None, interfacelist=None):
         self.init_thermo()
         self.set_thermo(filename)
-        if (self.interfaceorder is None and 
+        if (self.interfaceorder is None and
             interfacelist is not None):
             self.interfaceorder = interfacelist
         self.load_thermo()
@@ -4264,7 +4264,7 @@ class MDDataAccess(object):
                             if parser_ui.fileDict[fileItem].strDict:
                                 self.trajstream.update(parser_ui.fileDict[fileItem].strDict)
                         traj_loaded = self.load()
-                        try: 
+                        try:
                             self.atompositions = self.iread()
                         except TypeError:
                             pass
@@ -4341,12 +4341,12 @@ class MDDataAccess(object):
         self.topology.topoDict = {}
         if("gromosread" in self.topocode):
             self.topology = gtoConvertTopoDict(self.topology, self.topohandler)
-        if("parmed" in self.topocode or 
+        if("parmed" in self.topocode or
            "charmmcoor" in self.topocode):
             self.topology = pmdConvertTopoDict(self.topology, self.topohandler)
-        if((self.trajcode is not None or 
-            self.incoordcode is not None or 
-            self.outcoordcode is not None) and 
+        if((self.trajcode is not None or
+            self.incoordcode is not None or
+            self.outcoordcode is not None) and
             self.topology is None):
             if "charmmcoor" in self.trajcode:
                 self.topology = pmdConvertTopoDict(self.topology, self.trajhandler)
@@ -4357,13 +4357,13 @@ class MDDataAccess(object):
         if "pymolfile" in self.topocode:
             self.topology = pymConvertTopoDict(self.topology, self.topohandler)
         if(self.trajcode and not self.topology):
-            if ("pymolfile" in self.topocode and 
+            if ("pymolfile" in self.topocode and
                 "pymolfile" in self.trajcode):
                 self.topology = pymConvertTopoDict(self.topology, self.trajhandler.Topology)
         if "mdtraj" in self.topocode:
             self.topology = mdtConvertTopoDict(self.topology, self.topohandler)
         if(self.trajcode and not self.topology):
-            if ("mdtraj" in self.topocode and 
+            if ("mdtraj" in self.topocode and
                 "mdtraj" in self.trajcode):
                 self.topology = mdtConvertTopoDict(self.topology, self.trajhandler.get_topology())
         if "mdanalysis" in self.topocode:
@@ -4439,7 +4439,7 @@ class MDDataAccess(object):
         try:
             self.thermohandler.keys()
             self.thermostats = MDDataThermo()
-            self.thermostats.thermoDict = {} 
+            self.thermostats.thermoDict = {}
         except (AttributeError, IOError, OSError, ValueError, TypeError):
             return
 
@@ -4504,7 +4504,7 @@ class MDDataAccess(object):
                     ]).reshape((3, 3))
 
         # pandas DataFrame columns to keys
-        dfkeys = self.thermohandler.columns.get_values()
+        dfkeys = self.thermohandler.columns.values
         dfid = 0
         for dfkey, val in self.thermohandler.items():
             self.thermostats.thermoDict.update({dfkeys[dfid]:np.asarray(val)})
@@ -4522,7 +4522,7 @@ class MDDataAccess(object):
         """Read the first configuration of the coordinate file to extract the
            number of atoms in it.
         """
-        if(topocode is None and 
+        if(topocode is None and
             self.topocode is not None):
             topocode = self.topocode
         n_atoms=None
@@ -4553,7 +4553,7 @@ class MDDataAccess(object):
         """Read the first configuration of the coordinate file to extract the
            number of atoms in it.
         """
-        if(trajcode is None and 
+        if(trajcode is None and
             self.trajcode is not None):
             trajcode = self.trajcode
         n_atoms=None
diff --git a/common/python/nomadcore/metainfo_storage/MetaInfoStorage.py b/common/python/nomadcore/metainfo_storage/MetaInfoStorage.py
index b6c641f697cc1dadaa5177627c065412a970ea15..5d0a6fe31d97cbd41200cd2ff1b8d81e4bd23c47 100644
--- a/common/python/nomadcore/metainfo_storage/MetaInfoStorage.py
+++ b/common/python/nomadcore/metainfo_storage/MetaInfoStorage.py
@@ -1,4 +1,3 @@
-import setup_paths
 import numpy as np
 from contextlib import contextmanager
 import logging
@@ -8,16 +7,18 @@ import re
 import ast
 from collections import namedtuple
 
+import metainfo
+
 COMMON_META_INFO_PATH = os.path.normpath(os.path.join(
-    os.path.dirname(os.path.abspath(__file__)), 
-    "../../../../../nomad-meta-info/meta_info/nomad_meta_info/common.nomadmetainfo.json"))
+    os.path.dirname(os.path.abspath(metainfo.__file__)),
+    "common.nomadmetainfo.json"))
 
 PUBLIC_META_INFO_PATH = os.path.normpath(os.path.join(
-    os.path.dirname(os.path.abspath(__file__)), 
-    "../../../../../nomad-meta-info/meta_info/nomad_meta_info/public.nomadmetainfo.json"))
+    os.path.dirname(os.path.abspath(metainfo.__file__)),
+    "public.nomadmetainfo.json"))
 
 NOTEXCEPT = re.compile(r'[a-cf-zA-CF-Z!\?,{}\[\]]')
-    
+
 def is_number(val):
         try:
             float(val)
@@ -26,9 +27,9 @@ def is_number(val):
             return False
 
 def strcleaner(val):
-        unwantedkeys = [ 
+        unwantedkeys = [
                 "system", "eval", "return",
-                "ctypes", "setup", "import", 
+                "ctypes", "setup", "import",
                 "git", "swig", "cython"]
         for keyword in unwantedkeys:
             val = val.replace(keyword, '')
@@ -44,9 +45,9 @@ def strisinstance(val, typ):
         anytype=None
         for t in typlist:
             try:
-                if("list" in t and 
-                   "[" in val and 
-                   "]" in val and 
+                if("list" in t and
+                   "[" in val and
+                   "]" in val and
                    "," in val):
                     if isinstance(literal_eval(strcleaner(val)), list):
                         anytype="list"
@@ -54,9 +55,9 @@ def strisinstance(val, typ):
                     elif isinstance(literal_eval(strcleaner(val)), np.ndarray):
                         anytype="np.ndarray"
                         break
-                elif("tuple" in t and 
-                   "(" in val and 
-                   ")" in val and 
+                elif("tuple" in t and
+                   "(" in val and
+                   ")" in val and
                    "," in val):
                     if isinstance(literal_eval(strcleaner(val)), tuple):
                         anytype="tuple"
@@ -97,7 +98,7 @@ class Container(object):
     def add(self, *args):
         for arg in args:
             if isinstance(arg, Container):
-                self.Containers.append(arg) 
+                self.Containers.append(arg)
             if isinstance(arg, Storage):
                 if self.Storage:
                     self.Storage(arg.__dict__)
@@ -113,7 +114,7 @@ class Container(object):
                     self.Storage = Storage(arg)
             if isinstance(arg, JsonMetaInfo):
                 for item in arg.jsonList:
-                    self.add(item) 
+                    self.add(item)
 
     def build(self, metadata, startsection, umaskdict):
         if isinstance(metadata, JsonMetaInfo):
@@ -144,7 +145,7 @@ class Container(object):
     def updaterefs(self, *args):
         for arg in args:
             if isinstance(arg, Container):
-                self.References.extend(arg.Name) 
+                self.References.extend(arg.Name)
 
     def update(self, *args):
         for arg in args:
@@ -260,7 +261,7 @@ class Container(object):
                 if "muteSections" in arg:
                     if self.Name in arg["muteSections"]:
                         self.Active = False
-                        
+
     def resetValues(self, *args):
         for arg in args:
             if isinstance(arg, dict):
@@ -275,23 +276,23 @@ class Container(object):
         """ Updating value with the rules given in depends
 
          Updating values follows the following order:
-          1) If 'depends' is supplied (not empty or not None), 
+          1) If 'depends' is supplied (not empty or not None),
              the tests in the depends list will be checked in order.
-             If one of the tests is successful than the one of the 
+             If one of the tests is successful than the one of the
              values in 'assign' or 'value' will be updated for the item.
              Here 'assign' will assign a new value in the given string and
              'value' will assign the value of the given key item.
-             (Ex. : 'assign' : 'CG' will update the item with 'CG' while 
-             'value' : 'NATOM' will update the item with number of atoms 
+             (Ex. : 'assign' : 'CG' will update the item with 'CG' while
+             'value' : 'NATOM' will update the item with number of atoms
              returned by the value of NATOM key ,which is stored in lookup dict.)
-          2) If 'depends' is supplied but a lookup dictionary is not than 
+          2) If 'depends' is supplied but a lookup dictionary is not than
              only the values of attributes in the sections can be used for test.
              The rest of the tests and assignments are updated as in case (1).
           3) If 'depends' is not supplied, subfunction is used to update value.
-          4) If 'depends' and subfunction are not supplied but value of 
-             MetaInfoMap is supplied, the value will be assign directly from the value 
+          4) If 'depends' and subfunction are not supplied but value of
+             MetaInfoMap is supplied, the value will be assign directly from the value
              item.
-          5) If none of the above items are supplied, this function will return None 
+          5) If none of the above items are supplied, this function will return None
              to not update any values for the selected item.
         """
         # Check whether depends is supplied in the item.
@@ -376,8 +377,8 @@ class Container(object):
         return storeValue, updateValue, localdict
 
     def convertToNumber(self, updateValue, valtype):
-        acceptvals = ["float", "int", "list", "tuple", 
-                      "np.asarray", "np.array", 
+        acceptvals = ["float", "int", "list", "tuple",
+                      "np.asarray", "np.array",
                       "set", "boolean", "bytes"]
         if valtype in acceptvals:
             if(isinstance(updateValue, list) or isinstance(updateValue, tuple)):
@@ -405,7 +406,7 @@ class Container(object):
             except (TypeError,ValueError):
                 newUpdateValue = updateValue
         return newUpdateValue
-    
+
     def convertUnits(self, updateValue, unit, unitdict):
         if(isinstance(updateValue, list) or isinstance(updateValue, tuple)):
             updateValue = [float(ival) * self.unitConverter(
@@ -435,7 +436,7 @@ class Container(object):
 
             The unit names are converted to numbers and the resulting
             expression will be evaluated by python.
-            Ex.: unit = 'electron-volt/Angstrom^3' 
+            Ex.: unit = 'electron-volt/Angstrom^3'
                  will be converted to
                  unit = '1.602176565e-19*1.0/1.0e-10**3'
                  factor = eval(unit) = 160.2176565e+9 Joule/meter^3 (Pascal)
@@ -482,7 +483,7 @@ class Container(object):
                         if depdict['value'] in localdict:
                             checkval = localdict[depdict['value']]
                         else:
-                            accessName, checkval = self.findNameInLookupDict(depdict['value'], 
+                            accessName, checkval = self.findNameInLookupDict(depdict['value'],
                                     item.lookupdict)
                             localdict.update({depdict['value'] : checkval})
                         return storeValue, checkval, localdict
@@ -537,7 +538,7 @@ class Container(object):
 
     def updateBackendStorage(self, backend):
         for itemk in self.Storage.__dict__:
-            if(self.Storage.__dict__[itemk]["act"] or 
+            if(self.Storage.__dict__[itemk]["act"] or
                 self.Storage.__dict__[itemk]["stor"]):
                 self.Storage.__dict__[itemk]["act"] = False
                 self.Storage.__dict__[itemk]["stor"] = False
@@ -559,8 +560,8 @@ class Container(object):
             if updateValue is not None:
                 itemkinlist = True if itemk in self.Storage.__dict__ else False
                 if itemkinlist is False:
-                    if (itemk.startswith('x_') and 
-                        itemv.activeSections): 
+                    if (itemk.startswith('x_') and
+                        itemv.activeSections):
                         attr_update=False
                         for actSect in itemv.activeSections:
                             if actSect in self.Name:
@@ -568,10 +569,10 @@ class Container(object):
                         if attr_update:
                             attr_act = False
                             attrvalues = {
-                                'act' : attr_act, 
-                                'val' : None, 
-                                'stor': False, 
-                                'kind': None, 
+                                'act' : attr_act,
+                                'val' : None,
+                                'stor': False,
+                                'kind': None,
                                 'dtyp': "C",
                                 'unit': None,
                                 'size': [],
@@ -608,8 +609,8 @@ class Container(object):
                         #No need to store, assign the updated value
                         self.Storage.__dict__[itemk]["val"] = updateValue
                     if itemv.activeInfo:
-                        if (itemk.startswith('x_') and 
-                            itemv.activeSections): 
+                        if (itemk.startswith('x_') and
+                            itemv.activeSections):
                             attr_update=False
                             for actSect in itemv.activeSections:
                                 if actSect in self.Name:
@@ -665,16 +666,16 @@ class Container(object):
         else:
             if printok:
                 if color:
-                    string = '%s\033[9%sm-->[' % (decorate + self.Indent, str(color%6 + 1)) 
+                    string = '%s\033[9%sm-->[' % (decorate + self.Indent, str(color%6 + 1))
                     string = string + ','.join(['%s' % (name) for name in self.Name]) + ']\n\033[0m'
                 else:
-                    string = '%s-->[' % (decorate + self.Indent) 
+                    string = '%s-->[' % (decorate + self.Indent)
                     string = string + ','.join(['%s' % (name) for name in self.Name]) + ']\n'
         if printok:
             if color:
-                string = string + '%s\033[9%sm|\033[0m   `.\n' % (decorate + self.Indent, str(color%6 + 1)) 
+                string = string + '%s\033[9%sm|\033[0m   `.\n' % (decorate + self.Indent, str(color%6 + 1))
             else:
-                string = string + '%s|   `.\n' % (decorate + self.Indent) 
+                string = string + '%s|   `.\n' % (decorate + self.Indent)
         if self.Storage:
             for key in self.Storage.__dict__:
                 printattrok = False
@@ -686,19 +687,19 @@ class Container(object):
                 if printattrok and printok:
                     if color:
                         if onlynames:
-                            string = string + '%s\033[9%sm|\033[0m    |__.%s\n' % (decorate 
+                            string = string + '%s\033[9%sm|\033[0m    |__.%s\n' % (decorate
                                     + self.Indent, str(color%6 + 1), key)
                         else:
-                            string = string + '%s\033[9%sm|\033[0m    |__.%s : Active=%s Value=%s\n' % (decorate 
-                                    + self.Indent, str(color%6 + 1), key, self.Storage.__dict__[key]["act"], 
+                            string = string + '%s\033[9%sm|\033[0m    |__.%s : Active=%s Value=%s\n' % (decorate
+                                    + self.Indent, str(color%6 + 1), key, self.Storage.__dict__[key]["act"],
                                     self.Storage.__dict__[key]["val"])
                     else:
                         if onlynames:
-                            string = string + '%s|    |__.%s\n' % (decorate + 
+                            string = string + '%s|    |__.%s\n' % (decorate +
                                     self.Indent, key)
                         else:
-                            string = string + '%s|    |__.%s : Active=%s Value=%s\n' % (decorate + 
-                                    self.Indent, key, self.Storage.__dict__[key]["act"], 
+                            string = string + '%s|    |__.%s : Active=%s Value=%s\n' % (decorate +
+                                    self.Indent, key, self.Storage.__dict__[key]["act"],
                                     self.Storage.__dict__[key]["val"])
             if color:
                 string = string + '%s\033[9%sm|\033[0m\n' % (decorate + self.Indent, str(color%6 + 1))
@@ -707,13 +708,13 @@ class Container(object):
         if self.Containers:
             for module in self.Containers:
                 if color:
-                    string = string + '%s\033[9%sm|\033[0m\n' % (decorate + self.Indent, 
-                            str(color%6 + 1)) + module.__str__(self.Name, 
-                            '%s\033[9%sm|\033[0m' % (decorate + self.Indent, str(color%6 + 1)), 
+                    string = string + '%s\033[9%sm|\033[0m\n' % (decorate + self.Indent,
+                            str(color%6 + 1)) + module.__str__(self.Name,
+                            '%s\033[9%sm|\033[0m' % (decorate + self.Indent, str(color%6 + 1)),
                             color, printactive, onlynames)
                 else:
-                    string = string + '%s|\n' % (decorate + self.Indent) + module.__str__(self.Name, 
-                            '%s|' % (decorate + self.Indent), 
+                    string = string + '%s|\n' % (decorate + self.Indent) + module.__str__(self.Name,
+                            '%s|' % (decorate + self.Indent),
                             color, printactive, onlynames)
         return string
 
@@ -752,7 +753,7 @@ class Storage(dict):
 
 class JsonMetaInfo(object):
     """ Json file loader for meta info data of NOMAD.
-        Loads data and extracts values of items 
+        Loads data and extracts values of items
         with specified superNames
     """
     def __init__(self, *args):
@@ -800,14 +801,14 @@ class JsonMetaInfo(object):
                 refs = item['referencedSections']
             except:
                 refs = []
-            if ('type_section' in kindname or 
+            if ('type_section' in kindname or
                 sectionname not in superlist):
                 continue
             attrvalues = {
-                    'act' : False, 
-                    'stor': False, 
-                    'val' : None, 
-                    'kind': kindname, 
+                    'act' : False,
+                    'stor': False,
+                    'val' : None,
+                    'kind': kindname,
                     'dtyp': dtyp,
                     'unit': unit,
                     'size': size,
@@ -828,14 +829,14 @@ class JsonMetaInfo(object):
                 kindname = item['kindStr']
             except:
                 kindname = []
-            if ('type_section' in kindname or 
-                'type_abstract_document_content' in kindname): 
+            if ('type_section' in kindname or
+                'type_abstract_document_content' in kindname):
                 if sectionname in superlist:
                     searchList.append(itemname)
                 if itemname not in nameList:
                     nameList.append(itemname)
         for name in searchList:
-            if (set([name]) not in set(nameList) and 
+            if (set([name]) not in set(nameList) and
                 self.isparent(name)):
                 siblings.append(name)
         return siblings
@@ -880,10 +881,10 @@ class JsonMetaInfo(object):
 
 if __name__ == "__main__":
     run = Container('section_run')
-    exclude_dict = { 
+    exclude_dict = {
             'section_run' : [
-            'section_processor_info', 
-            'section_processor_log', 
+            'section_processor_info',
+            'section_processor_log',
             'section_springer_material',
             'section_repository_info'
             ]}
@@ -899,7 +900,7 @@ if __name__ == "__main__":
                 'topology_force_field_name' : {'depends' : [[]], 'assign' : "ReaxFF"}
                 }
             }
-    
+
     run.populate(jsonmetadata, 'section_run', exclude_dict, updateDict)
     run.Color = 4
     for container in run.Containers:
diff --git a/common/python/nomadcore/smart_parser/SmartParserCommon.py b/common/python/nomadcore/smart_parser/SmartParserCommon.py
index 3dcb371fb07a7aa05dd23935f0f6528e63aecbc4..401b9ae105ea67cf85e5921c0f58ff40957128a9 100644
--- a/common/python/nomadcore/smart_parser/SmartParserCommon.py
+++ b/common/python/nomadcore/smart_parser/SmartParserCommon.py
@@ -1,4 +1,3 @@
-import setup_paths
 from nomadcore.local_meta_info import loadJsonFile, InfoKindEl
 from nomadcore.unit_conversion.unit_conversion import convert_unit
 from nomadcore.caching_backend import CachingLevel
@@ -6,7 +5,7 @@ from nomadcore.simple_parser import mainFunction
 from nomadcore.simple_parser import SimpleMatcher as SM
 from nomadcore.metainfo_storage.MetaInfoStorage import COMMON_META_INFO_PATH, PUBLIC_META_INFO_PATH
 from nomadcore.metainfo_storage import MetaInfoStorage as mStore
-from nomadcore.metainfo_storage.MetaInfoStorage import strcleaner, strisinstance, literal_eval 
+from nomadcore.metainfo_storage.MetaInfoStorage import strcleaner, strisinstance, literal_eval
 from nomadcore.smart_parser.SmartParserDictionary import getDict_MetaStrInDict, getList_MetaStrInDict, get_unitDict
 from contextlib import contextmanager
 import numpy as np
@@ -46,9 +45,9 @@ def conv_float(n, default=None):
             return None
 
 def get_metaInfo(self):
-    metaInfoEnv, warnings = loadJsonFile(filePath=self.META_INFO_PATH, 
-                                         dependencyLoader=None, 
-                                         extraArgsHandling=InfoKindEl.ADD_EXTRA_ARGS, 
+    metaInfoEnv, warnings = loadJsonFile(filePath=self.META_INFO_PATH,
+                                         dependencyLoader=None,
+                                         extraArgsHandling=InfoKindEl.ADD_EXTRA_ARGS,
                                          uri=None)
     metaInfoEnv = set_section_metaInfoEnv(self.PARSERTAG, metaInfoEnv, self.sectionDict)
     metaInfoEnv = setDict_metaInfoEnv(metaInfoEnv, self.metaDicts)
@@ -71,7 +70,7 @@ def set_section_metaInfoEnv(parsertag, infoEnv, sectionDict):
                 name=parsertag+'_%s_%s' % (nameDict["metaNameTag"], newName),
                 kindStr=nameDict["listTypStr"],
                 repeats=nameDict["repeatingSection"],
-                superNames=nameDict["supraNames"])) 
+                superNames=nameDict["supraNames"]))
 
     return infoEnv
 
@@ -93,7 +92,7 @@ def setDict_metaInfoEnv(infoEnv, metaDicts):
                     description='auto generated meta info data',
                     dtypeStr=nameDict[keyName].metaInfoType,
                     shape=[],
-                    superNames=nameDict[keyName].activeSections)) 
+                    superNames=nameDict[keyName].activeSections))
 
     return infoEnv
 
@@ -114,7 +113,7 @@ def set_metaInfoEnv(parsertag, infoEnv, metaNameTag, newList, listTypStr, supraN
                 description='auto generated meta info data',
                 dtypeStr=listTypStr,
                 shape=[],
-                superNames=supraNames)) 
+                superNames=supraNames))
 
     return infoEnv
 
@@ -128,7 +127,7 @@ def namelist_matcher(cText, key, matchWith):
         delimeter = matchWith.replace('UD', '')
         matchThisParsy = re.compile(r"(?:%s)\s*(?:\s|=|:)\s*(?:'|\")?"
                                      "(?P<%s>[\-+0-9.a-zA-Z:]+)"
-                                     "(?:'|\")?\s*[%s]" 
+                                     "(?:'|\")?\s*[%s]"
                                      % (cText, key, delimeter))
     elif 'BOL' in matchWith:
         matchThisParsy = re.compile(r"(?:'|\")?(?P<%s>.*)(?:'|\")?"
@@ -139,7 +138,7 @@ def namelist_matcher(cText, key, matchWith):
                                      "(?:'|\")?\s*(?:%s)\s*"
                                      % (key, cText))
     elif 'MW' in matchWith:
-        matchThisParsy = re.compile(r"(?P<%s>[\S]*(?=%s)[\S]*)" 
+        matchThisParsy = re.compile(r"(?P<%s>[\S]*(?=%s)[\S]*)"
                                      % (key, cText))
     elif 'FD' in matchWith:
         delimeter = matchWith.replace('FD', '')
@@ -150,7 +149,7 @@ def namelist_matcher(cText, key, matchWith):
     else: # Default matchWith 'NW'
         matchThisParsy = re.compile(r"(?:%s)\s*(?:\s|=|:)\s*(?:'|\"|{|\[|\()?"
                                      "(?P<%s>[\S]+)"
-                                     "(?:'|\"|}|\]|\))?\s*" 
+                                     "(?:'|\"|}|\]|\))?\s*"
                                      % (cText, key))
     return matchThisParsy
 
@@ -162,17 +161,17 @@ class ParserBase(object):
         self.PARSERTAG = parsertag
         self.metaStorage = mStore.Container('section_run')
         self.metaStorageRestrict = mStore.Container('section_restricted_uri')
-        exclude_dict = { 
+        exclude_dict = {
             'section_run' : [
-            'section_processor_info', 
-            'section_processor_log', 
+            'section_processor_info',
+            'section_processor_log',
             'section_springer_material',
             'section_repository_info'
             ]}
         self.META_INFO_PATH = metainfopath
         PARSER_INFO_DEFAULT = parserinfodef
         jsonmetadata = mStore.JsonMetaInfo(
-                COMMON_META_INFO_PATH, 
+                COMMON_META_INFO_PATH,
                 PUBLIC_META_INFO_PATH,
                 self.META_INFO_PATH
                 )
@@ -199,8 +198,8 @@ class ParserBase(object):
 
     def parse(self):
         self.coverageIgnore = re.compile(r"^(?:" + r"|".join(self.coverageIgnoreList) + r")$")
-        mainFunction(mainFileDescription=self.mainFileDescription(), 
-                     metaInfoEnv=self.metaInfoEnv, 
+        mainFunction(mainFileDescription=self.mainFileDescription(),
+                     metaInfoEnv=self.metaInfoEnv,
                      parserInfo=self.parserInfo,
                      cachingLevelForMetaName=self.cachingLevelForMetaName,
                      superContext=self)
@@ -229,10 +228,10 @@ class ParserBase(object):
             if "system_name" in topoDict:
                 system_name = topoDict["system_name"]
         if system_name is not None:
-            supB.addValue('system_name', str(system_name))    
+            supB.addValue('system_name', str(system_name))
 
     def topology_atom_type_and_interactions(self, backend, gIndex):
-        """ Function to generate data for atom_to_molecule 
+        """ Function to generate data for atom_to_molecule
         """
         sO = open_section
         supB = backend.superBackend
@@ -286,63 +285,63 @@ class ParserBase(object):
                 interTypeDict = []
             for elm in range(len(types_list)):
                 with sO(supB, 'section_atom_type'):
-                    # Atom Type NOMAD Name 
-                    # Q: Why different than name or type? 
-                    # A: Because atom name in topology is used to define a specific atom 
-                    #    with properties may be same or different with the other name-sharers. 
-                    #    On the other hand, atom type defines a type that mostly fits 
-                    #    with the expectations of a Force Field parameter set where a 
-                    #    specific atom-atom pair or atom-mutliatom set have an interaction. 
-                    #    A unique identification of an atom may be in both worlds not solely 
-                    #    atom type or atom name. While there are cases where atom name, 
-                    #    atom element symbol and even atom type are same. Such cases happen 
+                    # Atom Type NOMAD Name
+                    # Q: Why different than name or type?
+                    # A: Because atom name in topology is used to define a specific atom
+                    #    with properties may be same or different with the other name-sharers.
+                    #    On the other hand, atom type defines a type that mostly fits
+                    #    with the expectations of a Force Field parameter set where a
+                    #    specific atom-atom pair or atom-mutliatom set have an interaction.
+                    #    A unique identification of an atom may be in both worlds not solely
+                    #    atom type or atom name. While there are cases where atom name,
+                    #    atom element symbol and even atom type are same. Such cases happen
                     #    quiet often in ab-inito calculations.
-                    #    But this is not the case that we are talking about here in MD 
-                    #    calculations since what we expect from a NOMAD meta info that 
+                    #    But this is not the case that we are talking about here in MD
+                    #    calculations since what we expect from a NOMAD meta info that
                     #    it should have a lossless translation of a topology not more.
                     #
-                    supB.addValue('atom_type_name', str(types_list[elm]) + ':' + str(elm+1))    
+                    supB.addValue('atom_type_name', str(types_list[elm]) + ':' + str(elm+1))
                     # Atom Name (Topology Name)
-                    supB.addValue(self.PARSERTAG + '_atom_name', name_list[elm]) 
+                    supB.addValue(self.PARSERTAG + '_atom_name', name_list[elm])
                     # Atom Type Name (Force Field Name)
-                    supB.addValue(self.PARSERTAG + '_atom_type', atom_type_list[elm]) 
+                    supB.addValue(self.PARSERTAG + '_atom_type', atom_type_list[elm])
                     # Atomic element/symbol
-                    supB.addValue(self.PARSERTAG + '_atom_element', elementList[elm]) 
+                    supB.addValue(self.PARSERTAG + '_atom_element', elementList[elm])
                     # Atomic mass
                     if massesList[elm] is not None:
-                        supB.addValue('atom_type_mass', massesList[elm])             
+                        supB.addValue('atom_type_mass', massesList[elm])
                     if radiusList[elm]:
                         # Atomic van der Waals radius
-                        supB.addValue(self.PARSERTAG + '_atom_radius', radiusList[elm])   
+                        supB.addValue(self.PARSERTAG + '_atom_radius', radiusList[elm])
                     if chargeList[elm]:
                         # Atomic charge
                         supB.addValue('atom_type_charge', chargeList[elm])
 #                    if atomlabelList[elm]:
-#                        supB.addArrayValues(self.PARSERTAG + '_atom_to_atom_type_ref', 
-#                                np.asarray(atomlabelList[elm]))   
+#                        supB.addArrayValues(self.PARSERTAG + '_atom_to_atom_type_ref',
+#                                np.asarray(atomlabelList[elm]))
                     pass
 
             with sO(supB, self.PARSERTAG + '_section_atom_to_atom_type_ref'):
                 if atomlabelList:
-                    supB.addArrayValues(self.PARSERTAG + '_atom_to_atom_type_ref', 
-                            np.asarray(atomlabelList))   
+                    supB.addArrayValues(self.PARSERTAG + '_atom_to_atom_type_ref',
+                            np.asarray(atomlabelList))
 
             for inum in range(len(interDict.keys())):
                 with sO(supB, 'section_interaction'):
                     # atom indexes of bound pairs for a specific atom type
-                    supB.addArrayValues('interaction_atoms', np.asarray(interDict[inum]))     
+                    supB.addArrayValues('interaction_atoms', np.asarray(interDict[inum]))
                     # number of bonds for this type
-                    supB.addValue('number_of_interactions', len(interDict[inum]))             
+                    supB.addValue('number_of_interactions', len(interDict[inum]))
                     # number of atoms involved (2 for bonds, 3 for angles ,and so on)
-                    supB.addValue('number_of_atoms_per_interaction', len(interDict[inum][0])) 
-                
+                    supB.addValue('number_of_atoms_per_interaction', len(interDict[inum][0]))
+
                     #if bondFunctional:
                     #    self.superP.addValue('interaction_kind', bondFunctional)  # functional form of the interaction
                     # this points to the relative section_atom_type
-                    supB.addArrayValues(self.PARSERTAG + '_interaction_atom_to_atom_type_ref', 
-                            np.asarray(interTypeDict[inum]))  
+                    supB.addArrayValues(self.PARSERTAG + '_interaction_atom_to_atom_type_ref',
+                            np.asarray(interTypeDict[inum]))
                     # interaction parameters for the functional
-                    #self.superP.addValue('interaction_parameters', bondParameters)  
+                    #self.superP.addValue('interaction_parameters', bondParameters)
 
 
 #            for i in range(len(moleculeTypeInfo)):
@@ -365,7 +364,7 @@ class ParserBase(object):
 #    for res in mytopology.bonds:
 #        molname1, molatom1 = str(res[0]).split('-')
 #        molname2, molatom2 = str(res[1]).split('-')
-#        if molname1 in molname2: 
+#        if molname1 in molname2:
 #            atom_in_mol_bond.append(res)
 #            atom_in_mol_bond_num.append(bonds[index])
 #        else:
@@ -384,8 +383,8 @@ class ParserBase(object):
                         parser.lastMatch[self.PARSERTAG+'_program_name'],
                         type(self).__name__))
 
-    def dictionary_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList, 
-            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions, 
+    def dictionary_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList,
+            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions,
             entryline=None, parserID=None, parsername=None, globalDict=None, localDict=None, parent=None):
         record = None
         replay = None
@@ -436,9 +435,9 @@ class ParserBase(object):
                 rnewline = True
         if "dictionary" in parserOptions.keys():
             if isinstance(parserOptions["dictionary"], dict):
-                dictionary = parserOptions["dictionary"] 
+                dictionary = parserOptions["dictionary"]
             if isinstance(parserOptions["dictionary"], str):
-                dictionary = getattr(self,parserOptions["dictionary"]) 
+                dictionary = getattr(self,parserOptions["dictionary"])
             else:
                 dictionary = None
         if "dicttype" in parserOptions.keys():
@@ -663,8 +662,8 @@ class ParserBase(object):
                     self.recordList.write(lastLine)
         return lastLine, parserSuccess, globalDict, localDict
 
-    def readline_control_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList, 
-            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions, entryline=None, 
+    def readline_control_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList,
+            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions, entryline=None,
             parserID=None, parsername=None, globalDict=None, localDict=None, parent=None):
         record = None
         replay = None
@@ -734,7 +733,7 @@ class ParserBase(object):
                     controlin = getattr(self,parserOptions["controlin"])
                 else:
                     controlin = parserOptions["controlin"]
-            if("controlskip" in parserOptions and 
+            if("controlskip" in parserOptions and
                 controlin is not None):
                 controlskip = [
                         controlin[cs] for cs in parserOptions[
@@ -799,7 +798,7 @@ class ParserBase(object):
 
         #print("PRINTING nextlogsteps, MDnextstep, targetstep:",controlwait,controlnextattr,controlcounter)
         #print("PRINTING continuenewline:",continuenewline)
-        
+
         if continuenewline:
             #print("PRINTING: readline_control lastLine:",lastLine)
             parserSuccess=True
@@ -854,8 +853,8 @@ class ParserBase(object):
             setattr(self,parserOptions["controldict"],controldict)
         return lastLine, parserSuccess, globalDict, localDict
 
-    def section_control_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList, 
-            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions, entryline=None, 
+    def section_control_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList,
+            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions, entryline=None,
             parserID=None, parsername=None, globalDict=None, localDict=None, parent=None):
         if parent is None:
             parent = 0
@@ -955,8 +954,8 @@ class ParserBase(object):
                 if sectionname in activatesection:
                     activate = activatesection[sectionname]
             if activate:
-                if(sectionopen is not None and 
-                    sectionopenattr is not None and 
+                if(sectionopen is not None and
+                    sectionopenattr is not None and
                     sectionopenrecord is not None):
                     if str(sectionopenattr) in sectionopenrecord:
                         if sectionopenrecord[str(sectionopenattr)] is False:
@@ -964,8 +963,8 @@ class ParserBase(object):
                             gIndex = backend.openSection(sectionname)
                             self.secGIndexDict.update({sectionname : [1, gIndex]})
                             sectionopenrecord[str(sectionopenattr)] = True
-                if(sectionclose is not None and 
-                    sectioncloseattr is not None and 
+                if(sectionclose is not None and
+                    sectioncloseattr is not None and
                     sectioncloserecord is not None):
                     #print("PRINTING: section close:",str(sectioncloseattr))
                     if str(sectioncloseattr) in sectioncloserecord:
@@ -992,17 +991,17 @@ class ParserBase(object):
 
         return lastLine, parserSuccess, globalDict, localDict
 
-    def table_store(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe, 
-            metaNameStart, matchNameList, matchNameDict, updateMatchDict, 
-            onlyCaseSensitive, stopOnFirstLine, parserDict, parserOptions, 
+    def table_store(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe,
+            metaNameStart, matchNameList, matchNameDict, updateMatchDict,
+            onlyCaseSensitive, stopOnFirstLine, parserDict, parserOptions,
             globalDict=None, localDict=None):
         """
             header = False     if True, the header titles will be read
             wrap = False       if True, table titles and values are assumed to be wrapped
-            tablelines = 0     Number of lines that are cycled in the table (Default: 0= process 
+            tablelines = 0     Number of lines that are cycled in the table (Default: 0= process
                                every line) Ex.: 1= process after 1 line, useful if table is wraped
             lineFilter = None  A pass through (converter) filter before parsing
-            headerList = None  if header is not True, values will be assigned to 
+            headerList = None  if header is not True, values will be assigned to
                                titles in the headerList in order as seen in line
         """
         record = None
@@ -1032,7 +1031,7 @@ class ParserBase(object):
             if stopOnMatchRe.findall(lastLine):
                 stopOnMatch = True
                 if parserDict["firstLine"] == 0:
-                    if stopOnFirstLine: 
+                    if stopOnFirstLine:
                         stopOnMatch = True
                     else:
                         stopOnMatch = False
@@ -1048,23 +1047,23 @@ class ParserBase(object):
         if stopOnMatch:
             return True, parserDict, globalDict, localDict
         else:
-            # Check parser options to determine 
+            # Check parser options to determine
             # the table properties
-            header = False 
-            wrap = False 
+            header = False
+            wrap = False
             storeLines = 0
-            lineFilter = None 
-            headerList = None 
-            headerListCheck = None 
-            headersave = None 
+            lineFilter = None
+            headerList = None
+            headerListCheck = None
+            headersave = None
             skipLines = 0
             if "header" in parserOptions.keys():
-                header = parserOptions["header"] if (parserOptions["header"] is not None or 
+                header = parserOptions["header"] if (parserOptions["header"] is not None or
                         parserOptions["header"] is not False) else False
             if "headersave" in parserOptions.keys():
                 if parserOptions["headersave"]:
                     try:
-                        headersave = getattr(self, parserOptions["headersave"]) 
+                        headersave = getattr(self, parserOptions["headersave"])
                     except(AttributeError):
                         headersave = {}
                         setattr(self, parserOptions["headersave"], headersave)
@@ -1072,7 +1071,7 @@ class ParserBase(object):
                     headersave = None
             if "headerList" in parserOptions.keys():
                 if parserOptions["headerList"]:
-                    headerListCheck = getattr(self, parserOptions["headerList"]) 
+                    headerListCheck = getattr(self, parserOptions["headerList"])
                     if headerListCheck is not None:
                         headerList = headerListCheck
                     else:
@@ -1122,7 +1121,7 @@ class ParserBase(object):
                 hlist = []
                 vlist = []
                 for line in storedText.splitlines():
-                    if header: 
+                    if header:
                         if lcount<1:
                             hlist.extend(line.split())
                         else:
@@ -1236,8 +1235,8 @@ class ParserBase(object):
                 setattr(self, parserOptions["headersave"], headersave)
             return False, parserDict, globalDict, localDict
 
-    def table_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList, 
-            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions, 
+    def table_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList,
+            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions,
             entryline=None, parserID=None, parsername=None, globalDict=None, localDict=None, parent=None):
         record = None
         replay = None
@@ -1308,12 +1307,12 @@ class ParserBase(object):
             quitOnMatchRe = None
             if quitOnMatchStr is not None:
                 quitOnMatchRe = re.compile(quitOnMatchStr)
-            rtn, parserDict, globalDict, localDict = self.table_store(parser, lastLine, 
+            rtn, parserDict, globalDict, localDict = self.table_store(parser, lastLine,
                     stopOnMatchRe, quitOnMatchRe,
-                    metaNameStart, matchNameList, 
-                    matchNameDict, updateMatchDict, 
-                    onlyCaseSensitive, stopOnFirstLine, 
-                    parserDict, parserOptions, globalDict, localDict) 
+                    metaNameStart, matchNameList,
+                    matchNameDict, updateMatchDict,
+                    onlyCaseSensitive, stopOnFirstLine,
+                    parserDict, parserOptions, globalDict, localDict)
             if maxLines is not None:
                 if maxLines >= parserDict["numStoredLines"]:
                     rtn = True
@@ -1327,15 +1326,15 @@ class ParserBase(object):
                     if not lastLine:
                         break
                     else:
-                        # Matched with stopOnMatch. Discarding the line and 
-                        # return SimpleMatcher context. Can this line be 
-                        # discarded since it is the end of line for input 
+                        # Matched with stopOnMatch. Discarding the line and
+                        # return SimpleMatcher context. Can this line be
+                        # discarded since it is the end of line for input
                         # control variables or end of namelist ?
-                        rtn, parserDict, globalDict, localDict = self.table_store(parser, lastLine, 
-                                stopOnMatchRe, quitOnMatchRe, 
-                                metaNameStart, matchNameList, 
-                                matchNameDict, updateMatchDict, 
-                                onlyCaseSensitive, stopOnFirstLine, 
+                        rtn, parserDict, globalDict, localDict = self.table_store(parser, lastLine,
+                                stopOnMatchRe, quitOnMatchRe,
+                                metaNameStart, matchNameList,
+                                matchNameDict, updateMatchDict,
+                                onlyCaseSensitive, stopOnFirstLine,
                                 parserDict, parserOptions, globalDict, localDict)
                         if maxLines is not None:
                             if maxLines >= parserDict["numStoredLines"]:
@@ -1362,8 +1361,8 @@ class ParserBase(object):
                                     self.recordList.write(lastLine)
         return lastLine, parserDict["parserSuccess"], globalDict, localDict
 
-    def namelist_store(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe, 
-            metaNameStart, matchNameList, matchNameDict, updateMatchDict, 
+    def namelist_store(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe,
+            metaNameStart, matchNameList, matchNameDict, updateMatchDict,
             onlyCaseSensitive, stopOnFirstLine, parserDict, parserOptions,
             globalDict=None, localDict=None):
         skipThis = False
@@ -1385,7 +1384,7 @@ class ParserBase(object):
         if stopOnMatchRe.findall(lastLine):
             stopOnMatch = True
             if self.firstLine==0:
-                if stopOnFirstLine: 
+                if stopOnFirstLine:
                     stopOnMatch = True
                 else:
                     stopOnMatch = False
@@ -1401,7 +1400,7 @@ class ParserBase(object):
         else:
             anythingmatched = False
             if skipThis is False:
-                # If there is at least one namelist in the line, 
+                # If there is at least one namelist in the line,
                 # search for all others in the dictionary.
                 mNameDict = getattr(self, matchNameDict)
                 for cName, key in getDict_MetaStrInDict(mNameDict).items():
@@ -1472,7 +1471,7 @@ class ParserBase(object):
                             reDict=None
                     if reDict:
                         for k,v in reDict.items():
-                            if k == key: 
+                            if k == key:
                                 if isinstance(v, str):
                                     if replaceDict is not None:
                                         for repK, repV in replaceDict.items():
@@ -1548,8 +1547,8 @@ class ParserBase(object):
                     parserDict["skipThis"] = False
             return False, parserDict, globalDict, localDict
 
-    def namelist_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList, 
-            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions, 
+    def namelist_parser(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, matchNameList,
+            matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, parserOptions,
             entryline=None, parserID=None, parsername=None, globalDict=None, localDict=None, parent=None):
         record = None
         replay = None
@@ -1658,11 +1657,11 @@ class ParserBase(object):
                 parserDict["oneLine"] = None
                 parserDict["oneLining"] = False
                 parserDict["oneLineId"] = None
-        rtn, parserDict, globalDict, localDict = self.namelist_store(parser, lastLine, 
+        rtn, parserDict, globalDict, localDict = self.namelist_store(parser, lastLine,
                 stopOnMatchRe, quitOnMatchRe,
-                metaNameStart, matchNameList, 
-                matchNameDict, updateMatchDict, 
-                onlyCaseSensitive, stopOnFirstLine, 
+                metaNameStart, matchNameList,
+                matchNameDict, updateMatchDict,
+                onlyCaseSensitive, stopOnFirstLine,
                 parserDict, parserOptions, globalDict, localDict)
         lastLine = parserDict["saveLastLine"]
         if rtn is not True:
@@ -1730,11 +1729,11 @@ class ParserBase(object):
                             parserDict["oneLine"] = None
                             parserDict["oneLining"] = False
                             parserDict["oneLineId"] = None
-                    rtn, parserDict, globalDict, localDict = self.namelist_store(parser, lastLine, 
-                            stopOnMatchRe, quitOnMatchRe, 
-                            metaNameStart, matchNameList, 
-                            matchNameDict, updateMatchDict, 
-                            onlyCaseSensitive, stopOnFirstLine, 
+                    rtn, parserDict, globalDict, localDict = self.namelist_store(parser, lastLine,
+                            stopOnMatchRe, quitOnMatchRe,
+                            metaNameStart, matchNameList,
+                            matchNameDict, updateMatchDict,
+                            onlyCaseSensitive, stopOnFirstLine,
                             parserDict, parserOptions, globalDict, localDict)
                     lastLine = parserDict["saveLastLine"]
                     if rtn:
@@ -1759,13 +1758,13 @@ class ParserBase(object):
                                 self.recordList.write(lastLine)
         return lastLine, parserDict["parserSuccess"], globalDict, localDict
 
-    def check_namelist_store(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe, metaNameStart, 
+    def check_namelist_store(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe, metaNameStart,
             matchNameList, matchNameDict, onlyCaseSensitive, stopOnFirstLine, parserDict):
         stopOnMatch = False
         if stopOnMatchRe.findall(lastLine):
             stopOnMatch = True
             if self.firstLine==0:
-                if stopOnFirstLine: 
+                if stopOnFirstLine:
                     stopOnMatch = True
                 else:
                     stopOnMatch = False
@@ -1775,25 +1774,25 @@ class ParserBase(object):
         if stopOnMatch:
             return True
         else:
-            # If there is at least one namelist in the line, 
+            # If there is at least one namelist in the line,
             # search for all others in the dictionary.
             if self.MD is not True:
                 newLine = parser.fIn.readline()
                 lastLine = ' = '.join([ "%s" % str(line) for line in zip(lastLine, newLine)])
             for cName, key in getDict_MetaStrInDict(matchNameDict).items():
-                reDict={key:value for value in 
+                reDict={key:value for value in
                         re.compile(r"(?:\s%s|^%s|,%s)\s*=\s*(?:'|\")?"
-                                    "(?P<%s>[\-+0-9.a-zA-Z:]+)(?:'|\")?\s*,?" 
+                                    "(?P<%s>[\-+0-9.a-zA-Z:]+)(?:'|\")?\s*,?"
                         % (cName, cName, cName, key)).findall(lastLine)}
                 if onlyCaseSensitive is not True:
-                    reDict.update({key:value for value in 
+                    reDict.update({key:value for value in
                                    re.compile(r"(?:\s%s|^%s|,%s)\s*=\s*(?:'|\")?"
-                                               "(?P<%s>[\-+0-9.a-zA-Z:]+)(?:'|\")?\s*,?" 
-                                   % (cName.upper(), cName.upper(), 
+                                               "(?P<%s>[\-+0-9.a-zA-Z:]+)(?:'|\")?\s*,?"
+                                   % (cName.upper(), cName.upper(),
                                       cName.upper(), key)).findall(lastLine)})
                 if reDict:
                     for k,v in reDict.items():
-                        if k == key: 
+                        if k == key:
                             if k in list(parser.lastMatch.keys()):
                                 parser.lastMatch[k]=v
                             else:
@@ -1801,7 +1800,7 @@ class ParserBase(object):
                                 matchNameDict[k].activeInfo=True
             return False
 
-    def adHoc_read_namelist_stop_parsing(self, parser, stopOnMatchStr, quitOnMatchStr, 
+    def adHoc_read_namelist_stop_parsing(self, parser, stopOnMatchStr, quitOnMatchStr,
             metaNameStart, matchNameList, matchNameDict, onlyCaseSensitive, stopOnFirstLine):
         lastLine = parser.fIn.fInLine
         parserDict = {
@@ -1815,10 +1814,10 @@ class ParserBase(object):
         quitOnMatchRe = None
         if quitOnMatchStr is not None:
             quitOnMatchRe = re.compile(quitOnMatchStr)
-        if self.check_namelist_store(parser, lastLine, 
+        if self.check_namelist_store(parser, lastLine,
                 stopOnMatchRe, quitOnMatchRe,
-                metaNameStart, matchNameList, 
-                matchNameDict, onlyCaseSensitive, 
+                metaNameStart, matchNameList,
+                matchNameDict, onlyCaseSensitive,
                 stopOnFirstLine, parserDict) is not True:
             while True:
                 lastLine = self.peekline(parser)
@@ -1829,30 +1828,30 @@ class ParserBase(object):
                     # Matched with stopOnMatch. Discarding the line and return SimpleMatcher context.
                     # Can this line be discarded since it is the end of line for input control
                     # variables or end of namelist ?
-                    if self.check_namelist_store(parser, lastLine, 
-                            stopOnMatchRe, quitOnMatchRe, 
-                            metaNameStart, matchNameList, 
+                    if self.check_namelist_store(parser, lastLine,
+                            stopOnMatchRe, quitOnMatchRe,
+                            metaNameStart, matchNameList,
                             matchNameDict, onlyCaseSensitive,
                             stopOnFirstLine, parserDict):
                         break
                     else:
                         lastLine = parser.fIn.readline()
 
-    def check_commandline_args(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe, 
+    def check_commandline_args(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe,
             metaNameStart, matchNameList, matchNameDict, onlyCaseSensitive, stopOnFirstLine,
             parserDict, commandLineMatchRe, commandLineFunction):
         stopOnMatch = False
         if stopOnMatchRe.findall(lastLine):
             stopOnMatch = True
             if parserDict["firstLine"]==0:
-                if stopOnFirstLine: 
+                if stopOnFirstLine:
                     stopOnMatch = True
                 else:
                     stopOnMatch = False
         if quitOnMatchRe is not None:
             if quitOnMatchRe.findall(lastLine):
                 stopOnMatch = True
-        # If there is at least one namelist in the line, 
+        # If there is at least one namelist in the line,
         # search for all others in the dictionary.
         checkDict = getDict_MetaStrInDict(matchNameDict, matchNameList)
         cmdMatch = commandLineMatchRe.findall(lastLine)
@@ -1862,7 +1861,7 @@ class ParserBase(object):
                 cmdName = cName.replace("-", "")
                 if cmdName in cmdDict:
                     for k,v in cmdDict.items():
-                        if k == cmdName: 
+                        if k == cmdName:
                             if k in list(parser.lastMatch.keys()):
                                 parser.lastMatch[key]=v
                             else:
@@ -1873,7 +1872,7 @@ class ParserBase(object):
         else:
             return False
 
-    def adHoc_read_commandline_stop_parsing(self, parser, stopOnMatchStr, quitOnMatchStr, 
+    def adHoc_read_commandline_stop_parsing(self, parser, stopOnMatchStr, quitOnMatchStr,
             metaNameStart, matchNameList, matchNameDict, onlyCaseSensitive, stopOnFirstLine,
             commandLineMatchStr, commandLineFunction):
         lastLine = parser.fIn.fInLine
@@ -1889,11 +1888,11 @@ class ParserBase(object):
         quitOnMatchRe = None
         if quitOnMatchStr is not None:
             quitOnMatchRe = re.compile(quitOnMatchStr)
-        if self.check_commandline_args(parser, lastLine, 
+        if self.check_commandline_args(parser, lastLine,
                 stopOnMatchRe, quitOnMatchRe,
-                metaNameStart, matchNameList, 
-                matchNameDict, onlyCaseSensitive, 
-                stopOnFirstLine, parserDict, 
+                metaNameStart, matchNameList,
+                matchNameDict, onlyCaseSensitive,
+                stopOnFirstLine, parserDict,
                 commandLineMatchRe, commandLineFunction) is not True:
             while True:
                 lastLine = self.peekline(parser)
@@ -1904,17 +1903,17 @@ class ParserBase(object):
                     # Matched with stopOnMatch. Discarding the line and return SimpleMatcher context.
                     # Can this line be discarded since it is the end of line for input control
                     # variables or end of namelist ?
-                    if self.check_commandline_args(parser, lastLine, 
-                            stopOnMatchRe, quitOnMatchRe, 
-                            metaNameStart, matchNameList, 
+                    if self.check_commandline_args(parser, lastLine,
+                            stopOnMatchRe, quitOnMatchRe,
+                            metaNameStart, matchNameList,
                             matchNameDict, onlyCaseSensitive,
-                            stopOnFirstLine, parserDict, 
+                            stopOnFirstLine, parserDict,
                             commandLineMatchRe, commandLineFunction):
                         break
                     else:
                         lastLine = parser.fIn.readline()
 
-    def adHoc_find_textfile_store(self, parser, 
+    def adHoc_find_textfile_store(self, parser,
             commandLineMatchStr, commandLineFunction):
         lastLine = parser.fIn.fInLine
         parserDict = {
@@ -1929,11 +1928,11 @@ class ParserBase(object):
         quitOnMatchRe = None
         if quitOnMatchStr is not None:
             quitOnMatchRe = re.compile(quitOnMatchStr)
-        if self.check_commandline_args(parser, lastLine, 
+        if self.check_commandline_args(parser, lastLine,
                 stopOnMatchRe, quitOnMatchRe,
-                metaNameStart, matchNameList, 
-                matchNameDict, onlyCaseSensitive, 
-                stopOnFirstLine, parserDict, 
+                metaNameStart, matchNameList,
+                matchNameDict, onlyCaseSensitive,
+                stopOnFirstLine, parserDict,
                 commandLineMatchRe, commandLineFunction) is not True:
             while True:
                 lastLine = self.peekline(parser)
@@ -1944,18 +1943,18 @@ class ParserBase(object):
                     # Matched with stopOnMatch. Discarding the line and return SimpleMatcher context.
                     # Can this line be discarded since it is the end of line for input control
                     # variables or end of namelist ?
-                    if self.check_commandline_args(parser, lastLine, 
-                            stopOnMatchRe, quitOnMatchRe, 
-                            metaNameStart, matchNameList, 
+                    if self.check_commandline_args(parser, lastLine,
+                            stopOnMatchRe, quitOnMatchRe,
+                            metaNameStart, matchNameList,
                             matchNameDict, onlyCaseSensitive,
-                            stopOnFirstLine, parserDict, 
+                            stopOnFirstLine, parserDict,
                             commandLineMatchRe, commandLineFunction):
                         break
                     else:
                         lastLine = parser.fIn.readline()
 
-    def check_text_store(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe, 
-            metaNameStart, metaNameStore, matchNameList, matchNameDict, onlyCaseSensitive, 
+    def check_text_store(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe,
+            metaNameStart, metaNameStore, matchNameList, matchNameDict, onlyCaseSensitive,
             stopOnFirstLine, storeFirstLine, storeStopQuitLine, parserDict):
         stopOnMatch = False
         key = metaNameStore
@@ -1963,7 +1962,7 @@ class ParserBase(object):
         if stopOnMatchRe.findall(lastLine):
             stopOnMatch = True
             if parserDict["firstLine"]==0:
-                if stopOnFirstLine: 
+                if stopOnFirstLine:
                     stopOnMatch = True
                 else:
                     stopOnMatch = False
@@ -1985,8 +1984,8 @@ class ParserBase(object):
             # If parsing is not stop or quit,
             # store the parsed string in text
             # with the given meta info name.
-            # This will also copy the value to 
-            # matchNameDict if the metaNameStore 
+            # This will also copy the value to
+            # matchNameDict if the metaNameStore
             # exists in the dictionary keys.
             if(parserDict["firstLine"]==0 and storeFirstLine):
                 parserDict.update({"storedLines" : conv_str(lastLine)})
@@ -2009,8 +2008,8 @@ class ParserBase(object):
                         matchNameDict[key].activeInfo=True
             return False
 
-    def adHoc_read_store_text_stop_parsing(self, parser, stopOnMatchStr, quitOnMatchStr, 
-            metaNameStart, metaNameStore, matchNameList, matchNameDict, onlyCaseSensitive, 
+    def adHoc_read_store_text_stop_parsing(self, parser, stopOnMatchStr, quitOnMatchStr,
+            metaNameStart, metaNameStore, matchNameList, matchNameDict, onlyCaseSensitive,
             stopOnFirstLine, storeFirstLine, storeStopQuitLine, onQuitRunFunction):
         lastLine = parser.fIn.fInLine
         parserDict = {
@@ -2024,11 +2023,11 @@ class ParserBase(object):
         if quitOnMatchStr is not None:
             quitOnMatchRe = re.compile(quitOnMatchStr)
         # Check first line to store and stop/quit
-        if self.check_text_store(parser, lastLine, 
+        if self.check_text_store(parser, lastLine,
                 stopOnMatchRe, quitOnMatchRe,
-                metaNameStart, metaNameStore, 
-                matchNameList, matchNameDict, 
-                onlyCaseSensitive, stopOnFirstLine, 
+                metaNameStart, metaNameStore,
+                matchNameList, matchNameDict,
+                onlyCaseSensitive, stopOnFirstLine,
                 storeFirstLine, storeStopQuitLine,
                 parserDict) is not True:
             # Check all other lines to store and stop/quit
@@ -2038,13 +2037,13 @@ class ParserBase(object):
                 if not lastLine:
                     break
                 else:
-                    # Matched with stopOnMatch. The line will be procesed and 
+                    # Matched with stopOnMatch. The line will be procesed and
                     # parsing will continue by returning SimpleMatcher context.
-                    if self.check_text_store(parser, lastLine, 
-                            stopOnMatchRe, quitOnMatchRe, 
-                            metaNameStart, metaNameStore, 
-                            matchNameList, matchNameDict, 
-                            onlyCaseSensitive, stopOnFirstLine, 
+                    if self.check_text_store(parser, lastLine,
+                            stopOnMatchRe, quitOnMatchRe,
+                            metaNameStart, metaNameStore,
+                            matchNameList, matchNameDict,
+                            onlyCaseSensitive, stopOnFirstLine,
                             storeFirstLine, storeStopQuitLine,
                             parserDict):
                         break
@@ -2053,8 +2052,8 @@ class ParserBase(object):
         if onQuitRunFunction is not None:
             onQuitRunFunction(parser)
 
-    def check_subparsers(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe, subParsers, 
-            ordered, parserDict, secDict, onStartRunFunction, onQuitRunFunction, 
+    def check_subparsers(self, parser, lastLine, stopOnMatchRe, quitOnMatchRe, subParsers,
+            ordered, parserDict, secDict, onStartRunFunction, onQuitRunFunction,
             onlySubParsersReadLine, maxcycle=None, parent=None):
         record = None
         replay = None
@@ -2091,8 +2090,8 @@ class ParserBase(object):
                 if ordered:
                     for parser_id in range(len(subParsers)):
                         ismatched=False
-                        if(subParsers[parser_id]["startReStr"] is None or 
-                            "AlwaysMatch" in subParsers[parser_id]["startReStr"] or 
+                        if(subParsers[parser_id]["startReStr"] is None or
+                            "AlwaysMatch" in subParsers[parser_id]["startReStr"] or
                             "AlwaysRun" in subParsers[parser_id]["startReStr"]):
                             ismatched=True
                         else:
@@ -2111,8 +2110,8 @@ class ParserBase(object):
                                         if False not in waitcheck:
                                             skipme = True
                                             break
-                            if(parserDict["parserCheckList"][parser_id] is False and 
-                                False not in parserDict["parserCheckList"][0:parser_id] and 
+                            if(parserDict["parserCheckList"][parser_id] is False and
+                                False not in parserDict["parserCheckList"][0:parser_id] and
                                 skipme is False):
                                 parserSuccess = False
                                 parserName = None
@@ -2149,7 +2148,7 @@ class ParserBase(object):
                     parser_num = 0
                     for subParser in subParsers:
                         ismatched=False
-                        if(subParser["startReStr"] is None or 
+                        if(subParser["startReStr"] is None or
                             "AlwaysMatch" in subParser["startReStr"] or
                             "AlwaysRun" in subParser["startReStr"]):
                             ismatched=True
@@ -2215,7 +2214,7 @@ class ParserBase(object):
                 # if stop match is reached quit loop
                 if stopOnMatch:
                     break
-                # or if maximum loop cycle is 
+                # or if maximum loop cycle is
                 # reached than quit loop
                 if cyclenum > maxcycle:
                     break
@@ -2234,8 +2233,8 @@ class ParserBase(object):
         else:
             return False, lastLine, parserDict, secDict
 
-    def subparser_caller(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart, 
-            matchNameList, matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine, 
+    def subparser_caller(self, parser, stopOnMatchStr, quitOnMatchStr, metaNameStart,
+            matchNameList, matchNameDict, updateMatchDict, onlyCaseSensitive, stopOnFirstLine,
             parserOptions, entryline=None, parserID=None, parsername=None, globalDict=None,
             localDict=None, parent=None):
         record = None
@@ -2275,7 +2274,7 @@ class ParserBase(object):
         parserSuccess = False
         ordered=False
         onStartRunFunction=None
-        onQuitRunFunction=None 
+        onQuitRunFunction=None
         onlySubParsersReadLine=False
         lookupdict = None
         lookupvals = None
@@ -2355,31 +2354,31 @@ class ParserBase(object):
                                     with sO(supB, sectionAct[0]):
                                         rtn, lastLine, globalDict, parserDict = self.check_subparsers(
                                                 parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                                                subParsers, ordered, globalDict, parserDict, 
-                                                onStartRunFunction, onQuitRunFunction, 
-                                                onlySubParsersReadLine, parent=rank) 
+                                                subParsers, ordered, globalDict, parserDict,
+                                                onStartRunFunction, onQuitRunFunction,
+                                                onlySubParsersReadLine, parent=rank)
                                 else:
                                     sectionGIndex = supB.openSection(sectionAct[0])
                                     setattr(self,sectionAct[3], sectionGIndex)
                                     rtn, lastLine, globalDict, parserDict = self.check_subparsers(
                                             parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                                            subParsers, ordered, globalDict, parserDict, 
-                                            onStartRunFunction, onQuitRunFunction, 
-                                            onlySubParsersReadLine, parent=rank) 
+                                            subParsers, ordered, globalDict, parserDict,
+                                            onStartRunFunction, onQuitRunFunction,
+                                            onlySubParsersReadLine, parent=rank)
                             elif "OPEN" in sectionOpenClose.upper():
                                 sectionGIndex = supB.openSection(sectionAct[0])
                                 setattr(self,sectionAct[3], sectionGIndex)
                                 rtn, lastLine, globalDict, parserDict = self.check_subparsers(
                                         parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                                        subParsers, ordered, globalDict, parserDict, 
-                                        onStartRunFunction, onQuitRunFunction, 
-                                        onlySubParsersReadLine, parent=rank) 
+                                        subParsers, ordered, globalDict, parserDict,
+                                        onStartRunFunction, onQuitRunFunction,
+                                        onlySubParsersReadLine, parent=rank)
                             else:
                                 rtn, lastLine, globalDict, parserDict = self.check_subparsers(
                                         parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                                        subParsers, ordered, globalDict, parserDict, 
-                                        onStartRunFunction, onQuitRunFunction, 
-                                        onlySubParsersReadLine, parent=rank) 
+                                        subParsers, ordered, globalDict, parserDict,
+                                        onStartRunFunction, onQuitRunFunction,
+                                        onlySubParsersReadLine, parent=rank)
                         else:
                             if "AUTO" in sectionOpenClose.upper():
                                 if sectionMatchCount>0:
@@ -2389,16 +2388,16 @@ class ParserBase(object):
                                         setattr(self,sectionAct[3], sectionGIndex)
                             rtn, lastLine, globalDict, parserDict = self.check_subparsers(
                                     parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                                    subParsers, ordered, globalDict, parserDict, 
-                                    onStartRunFunction, onQuitRunFunction, 
-                                    onlySubParsersReadLine, parent=rank) 
+                                    subParsers, ordered, globalDict, parserDict,
+                                    onStartRunFunction, onQuitRunFunction,
+                                    onlySubParsersReadLine, parent=rank)
                             if "CLOSE" in sectionOpenClose.upper():
                                 supB.closeSection(sectionAct[0], sectionGIndex)
                             if "AUTO" in sectionOpenClose.upper():
                                 if sectionOnDict[sectionAct[0]][0] is False:
                                     supB.closeSection(sectionAct[0], sectionGIndex)
 
-                        # Infuse the correct gIndex to SimpleMatcher parsers to prevent 
+                        # Infuse the correct gIndex to SimpleMatcher parsers to prevent
                         # them fail before leaving the SmartParser.
                         #     Check if GIndex changed so far for the selected section
                         #     and update it if there is an open section left.
@@ -2408,9 +2407,9 @@ class ParserBase(object):
                     else:
                         rtn, lastLine, globalDict, parserDict = self.check_subparsers(
                                 parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                                subParsers, ordered, globalDict, parserDict, 
-                                onStartRunFunction, onQuitRunFunction, 
-                                onlySubParsersReadLine, parent=rank) 
+                                subParsers, ordered, globalDict, parserDict,
+                                onStartRunFunction, onQuitRunFunction,
+                                onlySubParsersReadLine, parent=rank)
                         parserDict.update({"firstLine" : parserDict["firstLine"] + 1})
                         if rtn:
                             break
@@ -2427,26 +2426,26 @@ class ParserBase(object):
 #        if stopOnMatchRe.findall(lastLine):
 #            stopOnMatch = True
 #            if parserDict["firstLine"] == 0:
-#                if stopOnFirstLine: 
+#                if stopOnFirstLine:
 #                    stopOnMatch = True
 #                else:
 #                    stopOnMatch = False
 #        if quitOnMatchRe is not None:
 #            if quitOnMatchRe.findall(lastLine):
-#                stopOnMatch = True 
+#                stopOnMatch = True
 
         if globalDict is not None:
             globalDict.update({"sectionOnDict":sectionOnDict})
-       
+
 
         if onQuitRunFunction is not None:
             onQuitRunFunction(parser)
 
         return lastLine, parserSuccess, globalDict, localDict
 
-    def adHoc_takingover_parsing(self, parser, stopOnMatchStr, quitOnMatchStr, stopControl=None, 
-            record=False, replay=None, parseOnlyRecorded=False, subParsers=None, ordered=None, 
-            onStartRunFunction=None, onQuitRunFunction=None, onlySubParsersReadLine=False, 
+    def adHoc_takingover_parsing(self, parser, stopOnMatchStr, quitOnMatchStr, stopControl=None,
+            record=False, replay=None, parseOnlyRecorded=False, subParsers=None, ordered=None,
+            onStartRunFunction=None, onQuitRunFunction=None, onlySubParsersReadLine=False,
             onStartReplayRunFunction=None, onQuitReplayRunFunction=None, parent=None):
         if onStartRunFunction is not None:
             onStartRunFunction(parser)
@@ -2499,9 +2498,9 @@ class ParserBase(object):
             # Check first line to store and stop/quit
             rtn, lastLine, parserDict, secDict = self.check_subparsers(
                 parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                subParsers, ordered, parserDict, secDict, 
-                onStartRunFunction, onQuitRunFunction, 
-                onlySubParsersReadLine, parent=rank) 
+                subParsers, ordered, parserDict, secDict,
+                onStartRunFunction, onQuitRunFunction,
+                onlySubParsersReadLine, parent=rank)
         else:
             rtn = False
             if stopOnMatchRe is not None:
@@ -2521,12 +2520,12 @@ class ParserBase(object):
                     break
                 else:
                     if parseOnlyRecorded is False:
-                        # Matched with stopOnMatch. The line will be procesed and 
+                        # Matched with stopOnMatch. The line will be procesed and
                         # parsing will continue by returning SimpleMatcher context.
                         rtn, lastLine, parserDict, secDict = self.check_subparsers(
-                            parser, lastLine, stopOnMatchRe, quitOnMatchRe, 
-                            subParsers, ordered, parserDict, secDict, 
-                            onStartRunFunction, onQuitRunFunction, 
+                            parser, lastLine, stopOnMatchRe, quitOnMatchRe,
+                            subParsers, ordered, parserDict, secDict,
+                            onStartRunFunction, onQuitRunFunction,
                             onlySubParsersReadLine, parent=rank)
                     else:
                         rtn = False
@@ -2563,9 +2562,9 @@ class ParserBase(object):
                     parserDict["replayCount"]=replayCount
                     rtn, lastLine, parserDict, secDict = self.check_subparsers(
                             parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                            subParsers, ordered, parserDict, secDict, 
-                            onStartRunFunction, onQuitRunFunction, 
-                            onlySubParsersReadLine, parent=rank) 
+                            subParsers, ordered, parserDict, secDict,
+                            onStartRunFunction, onQuitRunFunction,
+                            onlySubParsersReadLine, parent=rank)
                     if secDict is not None:
                         parserDict.update(secDict)
                     if rtn is not True:
@@ -2576,9 +2575,9 @@ class ParserBase(object):
                                 break
                             else:
                                 rtn, lastLine, parserDict, secDict = self.check_subparsers(
-                                        parser, lastLine, stopOnMatchRe, quitOnMatchRe, 
-                                        subParsers, ordered, parserDict, secDict, 
-                                        onStartRunFunction, onQuitRunFunction, 
+                                        parser, lastLine, stopOnMatchRe, quitOnMatchRe,
+                                        subParsers, ordered, parserDict, secDict,
+                                        onStartRunFunction, onQuitRunFunction,
                                         onlySubParsersReadLine, parent=rank)
                                 if secDict is not None:
                                     parserDict.update(secDict)
@@ -2608,9 +2607,9 @@ class ParserBase(object):
                     parserDict["replayCount"]=repeat+1
                     rtn, lastLine, parserDict, secDict = self.check_subparsers(
                             parser, lastLine, stopOnMatchRe, quitOnMatchRe,
-                            subParsers, ordered, parserDict, secDict, 
-                            onStartRunFunction, onQuitRunFunction, 
-                            onlySubParsersReadLine, parent=rank) 
+                            subParsers, ordered, parserDict, secDict,
+                            onStartRunFunction, onQuitRunFunction,
+                            onlySubParsersReadLine, parent=rank)
                     if secDict is not None:
                         parserDict.update(secDict)
                     if rtn is not True:
@@ -2621,9 +2620,9 @@ class ParserBase(object):
                                 break
                             else:
                                 rtn, lastLine, parserDict, secDict = self.check_subparsers(
-                                        parser, lastLine, stopOnMatchRe, quitOnMatchRe, 
-                                        subParsers, ordered, parserDict, secDict, 
-                                        onStartRunFunction, onQuitRunFunction, 
+                                        parser, lastLine, stopOnMatchRe, quitOnMatchRe,
+                                        subParsers, ordered, parserDict, secDict,
+                                        onStartRunFunction, onQuitRunFunction,
                                         onlySubParsersReadLine, parent=rank)
                                 if secDict is not None:
                                     parserDict.update(secDict)
@@ -2644,8 +2643,8 @@ class ParserBase(object):
         """
         return [r"(?:\||\s*)\s*(?:"
                 "|".join(
-                    ["%s(?:=|:)\s*(?P<%s>[0-9a-zA-Z_./\-]+)" % (fileNL.matchStr, 
-                        fileNL.metaHeader + '_' + fileNL.metaNameTag + '_' + fileNL.metaName) 
+                    ["%s(?:=|:)\s*(?P<%s>[0-9a-zA-Z_./\-]+)" % (fileNL.matchStr,
+                        fileNL.metaHeader + '_' + fileNL.metaNameTag + '_' + fileNL.metaName)
                         for fileNL in self.fileDict.values()
                     ]) + ")"
                 ]
diff --git a/common/python/nomadcore/smart_parser/SmartParserDictionary.py b/common/python/nomadcore/smart_parser/SmartParserDictionary.py
index 1b8818dc6e67c0ea62ab746d854609c1568218af..b25aa5af9de47e77c2489cf9d4f5fc9f85a0a90c 100644
--- a/common/python/nomadcore/smart_parser/SmartParserDictionary.py
+++ b/common/python/nomadcore/smart_parser/SmartParserDictionary.py
@@ -1,19 +1,18 @@
-import setup_paths
 
 def get_unitDict(keyname):
     """ Unit dictionary for convertions
 
         Unit names will be converted to values.
-        When defining units in translator dictionary, 
+        When defining units in translator dictionary,
         the unit names in the dictionary should be used.
         The unit convertion values are written for SI units.
-        If you would like to change it, just add another key 
+        If you would like to change it, just add another key
         to the dictionary and change the key at parser base.
         Usage:
-           Natively support python language in definitions. 
-           You can use any python math operator and function. 
-           Moreover, you can use space or - for multiplication 
-           and ^ for power of values. 
+           Natively support python language in definitions.
+           You can use any python math operator and function.
+           Moreover, you can use space or - for multiplication
+           and ^ for power of values.
         Example:
             kilogram/meter^2 can be written as
             kilo-gram/meter^2 or kilo-gram/meter**2
@@ -133,18 +132,18 @@ class MetaInfoMap(dict):
     matchStr=None # this string will be matched (can be altered after initialized)
     matchNames=None # Control list/tuple/dict/str for matchStr (See getDict_MetaStrInDict)
     nextMatch=None # Do not alter the match of item until the nextMatch string is matched
-    concatMatch=None # If there is an other match in list (Ex.:nextMatch) concatanate it 
+    concatMatch=None # If there is an other match in list (Ex.:nextMatch) concatanate it
     replaceTag=None # Replace the original tag with this value but still use the original matchStr
     matchWith=None # match control: Next word (NW), Previous word (PW), End of line(EOL), Until delimeter (UD)
     removeText=None # remove the given text to generate meta info name
     alsoMatch=None # also match the strings in this list with the given key in same line (any where)
     metaHeader=None # the general meta name header (See xxxDictionary for usage)
     metaName=None # this will be the meta info name that will be passed to backend and written output
-    metaNameTag=None # the meta info name body 
+    metaNameTag=None # the meta info name body
     metaInfoType=None # Float, string or other that have to be defined in meta info core
     value=None # The final value that will be passed to backend and written to output
     valueSize=None # The size of the value that will be pased to backend and written to output
-    sizeMetaName=None 
+    sizeMetaName=None
     depends=None # list/dict for altering values in this or other dictionaries
     lookupdict=None # the lookup dict for values in depends (can also be itself)
     subfunction=None # an extra update function before passing value to backend or write output
@@ -157,7 +156,7 @@ class MetaInfoMap(dict):
             if isinstance(arg, dict):
                 for k, v in arg.items():
                     if k in self:
-                        self[k] = v 
+                        self[k] = v
         if kwargs:
             for k, v in kwargs.items():
                 if k in self:
@@ -179,19 +178,19 @@ class FileInfoMap(dict):
     activeInfo=False
     infoPurpose=None
     fileName=None
-    fileFormat=None 
+    fileFormat=None
     fileSupplied=False
     fileHolder=None
-    nameTranslate=None 
-    matchStr=None 
-    matchNames=None 
-    metaHeader=None 
-    metaName=None 
+    nameTranslate=None
+    matchStr=None
+    matchNames=None
+    metaHeader=None
+    metaName=None
     metaNameTag=None
     metaInfoType=None
-    value=None 
-    valueSize=None 
-    sizeMetaName=None 
+    value=None
+    valueSize=None
+    sizeMetaName=None
     depends=None
     lookupdict=None
     subfunction=None
@@ -203,7 +202,7 @@ class FileInfoMap(dict):
             if isinstance(arg, dict):
                 for k, v in arg.items():
                     if k in self:
-                        self[k] = v 
+                        self[k] = v
         if kwargs:
             for k, v in kwargs.items():
                 if k in self:
@@ -292,30 +291,30 @@ class MapDictionary(dict):
         return [val.metaName for val in self.__dict__.values()]
 
 def getList_MetaStrInDict(sourceDict):
-    """Returns a list that includes all meta name 
+    """Returns a list that includes all meta name
        strings for the given dictionary.
-       Meta name strings are not actual meta names but 
+       Meta name strings are not actual meta names but
        used as the keywords in the parsing.
     """
     return [sourceDict[item].matchStr for item in sourceDict]
 
-def copyMetaDictToDict(sourceObj, sourceDict, sourceType=None, targetObj=None, 
+def copyMetaDictToDict(sourceObj, sourceDict, sourceType=None, targetObj=None,
                        targetDict=None, targetType=None, copyDict=None):
-    """ Copy values of a source dictionary to a target one 
+    """ Copy values of a source dictionary to a target one
         by matching the keys in bith dictionaries.
         Dictionary types can be python or SmartParser
 
         Inputs:
             sourceObj  (object) : python object that holds sourceDict
-            sourceDict (string) : the name of the source dictionary 
+            sourceDict (string) : the name of the source dictionary
             sourceType (string) : type can be 'standard' or 'smartparser'
             targetObj  (object) : python object that holds targetDict
                                   if None, sourceObj will be used
-            targetDict (string) : the name of the target dictionary 
+            targetDict (string) : the name of the target dictionary
                                   if None, sourceDict will be used
             targetType (string) : type can be 'standard' or 'smartparser'
                                   if targetDict is None, this equals to sourceType
-            copyDict (dict)     : includes items for mapping 
+            copyDict (dict)     : includes items for mapping
                                   keys from source to target
                                   Ex. : {"ener0" : "Energy", "step" : "Timestep"}
 
@@ -386,32 +385,32 @@ def copyMetaDictToDict(sourceObj, sourceDict, sourceType=None, targetObj=None,
             setattr(targetObj, tDict, targetDict)
     return rtn
 
-def setMetaStrInDict(objectName, sourceDict, mStr, mVal, matchExact=False, 
+def setMetaStrInDict(objectName, sourceDict, mStr, mVal, matchExact=False,
         caseSensitive=False, multipleMatch=True, matchFirst=None, matchLast=None):
     """Find mStr in a given sourceDict and set it to mVal.
-       If the value is set in a dictionary item, activeInfo of the 
+       If the value is set in a dictionary item, activeInfo of the
            item is also set to True
-     
-       Inputs: 
-           objectName (String) : Python object that holds this attribute 
+
+       Inputs:
+           objectName (String) : Python object that holds this attribute
                                  Ex. : self
            sourceDict (String) : The name string of the SmartParser dictionary
                                  Ex. : 'myDict'  (this will be self.myDict)
            mStr (String) : The string to be matched with the matchStr in dict.
            mVal (Any type that is accepted by metaInfoStorage) : The value
-       Output 
+       Output
            A list with [Success, MatchKey, Status]:
                Success:  True : there is at least one match
                          False: there is an error
-               MatchKey       : a list with the matched 
+               MatchKey       : a list with the matched
                                   keys in the dict
-               Status :  -1   : there is no dictionary 
+               Status :  -1   : there is no dictionary
                                   with the given name
-                          0   : there is dict but there is no 
+                          0   : there is dict but there is no
                                   item that matchs with mStr
-                          1   : there is a match in the dict 
+                          1   : there is a match in the dict
                                   with a successful value set
-                          2-N : there are multiple matches 
+                          2-N : there are multiple matches
                                   with successful value settings
                                   (Only available if multipleMatch is True)
     """
@@ -430,7 +429,7 @@ def setMetaStrInDict(objectName, sourceDict, mStr, mVal, matchExact=False,
                 matchLast = None
             else:
                 macthExact = False
-        for k,v in theDict.items(): 
+        for k,v in theDict.items():
             assign = False
             if matchExact is True:
                 if caseSensitive:
@@ -482,13 +481,13 @@ def setMetaStrInDict(objectName, sourceDict, mStr, mVal, matchExact=False,
     return rtnList
 
 def isMetaStrInDict(nameStr, sourceDict):
-    """Returns a list that includes all meta name 
+    """Returns a list that includes all meta name
        strings for the given dictionary.
-       Meta name strings are not actual meta names but 
+       Meta name strings are not actual meta names but
        used as the keywords in the parsing.
     """
     val = None
-    for k,v in sourceDict.items(): 
+    for k,v in sourceDict.items():
         if nameStr in v.matchStr:
             val = k
             break
@@ -496,9 +495,9 @@ def isMetaStrInDict(nameStr, sourceDict):
     #return [k for k,v in sourceDict.items() if nameStr in v.matchStr][0]
 
 def getDict_MetaStrInDict(sourceDict, nameList=None):
-    """Returns a dict that includes all meta name 
+    """Returns a dict that includes all meta name
        strings and corresponding values for the given dictionary.
-       Meta name strings are not actual meta names but 
+       Meta name strings are not actual meta names but
        used as the keywords in the parsing.
     """
     newDict = {}
@@ -506,26 +505,26 @@ def getDict_MetaStrInDict(sourceDict, nameList=None):
         if isinstance(nameList, (list, tuple)):
             for key in nameList:
                 if key in sourceDict:
-                    newDict.update({sourceDict[key].matchStr : key}) 
+                    newDict.update({sourceDict[key].matchStr : key})
         elif isinstance(nameList, dict):
             for key, value in nameList:
                 if value in sourceDict:
-                    newDict.update({key : value}) 
+                    newDict.update({key : value})
         elif isinstance(nameList, str):
             for key, value in sourceDict.items():
                 if nameList in sourceDict[key]:
                     if isinstance(sourceDict[key][nameList], (list, tuple)):
                         for item in sourceDict[key][nameList]:
-                            newDict.update({item : key}) 
+                            newDict.update({item : key})
                     elif isinstance(sourceDict[key][nameList], str):
-                        newDict.update({sourceDict[key][nameList] : key}) 
+                        newDict.update({sourceDict[key][nameList] : key})
                     else:
-                        newDict.update({sourceDict[key].matchStr : key}) 
+                        newDict.update({sourceDict[key].matchStr : key})
                 else:
-                    newDict.update({sourceDict[key].matchStr : key}) 
+                    newDict.update({sourceDict[key].matchStr : key})
     else:
         for key, value in sourceDict.items():
-            newDict.update({sourceDict[key].matchStr : key}) 
+            newDict.update({sourceDict[key].matchStr : key})
     return newDict
 
 
diff --git a/setup.py b/setup.py
index f96d5c253876eeb6e5ed1b78dccf7bcc08136630..551fdea0f4087d2b8d48f3f849be9efaae00561c 100644
--- a/setup.py
+++ b/setup.py
@@ -4,6 +4,7 @@ also just setup the paths and necessary dependencies in any other way you wish.
 """
 from setuptools import setup, find_packages
 
+print('###', find_packages("common/python"))
 
 #===============================================================================
 def main():