diff --git a/tfields/core.py b/tfields/core.py
index 2eef22e83a7b2d8bbc81e884782152e91eda40a6..9c30abb37d3174e8dceca379f41b64c45e5d467f 100644
--- a/tfields/core.py
+++ b/tfields/core.py
@@ -1,4 +1,4 @@
-#!/usr/bin/env
+#!/usr/bin/env  # pylint: disable=too-many-lines,super-with-arguments
 # encoding: utf-8
 """
 Author:     Daniel Boeckenhoff
@@ -15,16 +15,16 @@ Notes:
 # builtin
 import warnings
 import pathlib
-from six import string_types
 from contextlib import contextmanager
 from collections import Counter
 from copy import deepcopy
 import logging
+from six import string_types
 
 # 3rd party
 import numpy as np
 import sympy
-import scipy as sp
+import scipy
 import sortedcontainers
 import rna
 
@@ -51,15 +51,18 @@ def dim(tensor):
     return tensor.shape[1]
 
 
-class AbstractObject(object):
+class AbstractObject(object):  # pylint: disable=useless-object-inheritance
+    """
+    Abstract base class for all tfields objects implementing polymorphisms
+    """
+
     def save(self, path, *args, **kwargs):
         """
         Saving by redirecting to the correct save method depending on path
 
         Args:
             path (str or buffer)
-            *args:
-                forwarded to extension specific method
+            *args: joined with path
             **kwargs:
                 extension (str): only needed if path is buffer
                 ... remaining:forwarded to extension specific method
@@ -73,14 +76,14 @@ class AbstractObject(object):
 
         # get the save method
         try:
-            save_method = getattr(self, "_save_{extension}".format(**locals()))
-        except AttributeError:
+            save_method = getattr(self, "_save_" + extension)
+        except AttributeError as err:
             raise NotImplementedError(
                 "Can not find save method for extension: "
                 "{extension}.".format(**locals())
-            )
+            ) from err
 
-        path = rna.path.resolve(path)
+        path = rna.path.resolve(path, *args)
         rna.path.mkdir(path)
         return save_method(path, **kwargs)
 
@@ -91,8 +94,7 @@ class AbstractObject(object):
 
         Args:
             path (str or buffer)
-            *args:
-                forwarded to extension specific method
+            *args: joined with path
             **kwargs:
                 extension (str): only needed if path is buffer
                 ... remaining:forwarded to extension specific method
@@ -106,14 +108,14 @@ class AbstractObject(object):
 
         try:
             load_method = getattr(cls, "_load_{e}".format(e=extension))
-        except AttributeError:
+        except AttributeError as err:
             raise NotImplementedError(
                 "Can not find load method for extension: "
                 "{extension}.".format(**locals())
-            )
+            ) from err
         return load_method(path, *args, **kwargs)
 
-    def _save_npz(self, path, **kwargs):
+    def _save_npz(self, path):
         """
         Args:
             path (open file or str/unicode): destination to save file to.
@@ -164,27 +166,40 @@ class AbstractObject(object):
         Factory method
         Given a path to a npz file, construct the object
         """
-        # TODO: think about allow_pickle, wheter it really should be True or
+        # Note: think about allow_pickle, wheter it really should be True or
         # wheter we could avoid pickling (potential security issue)
         load_kwargs.setdefault("allow_pickle", True)
         np_file = np.load(path, **load_kwargs)
-        d = dict(np_file)
-        d.pop("tfields_version", None)
-        return cls._from_dict(d)
+        content = dict(np_file)
+        content.pop("tfields_version", None)
+        return cls._from_dict(content)
 
-    def _args(self) -> tuple:
+    def _args(self) -> tuple:  # pylint: disable=no-self-use
+        """
+        Used for allowing the polymorphic signature Class(obj) as a copy/casting constructor
+        """
         return tuple()
 
-    def _kwargs(self) -> dict:
+    def _kwargs(self) -> dict:  # pylint: disable=no-self-use
+        """
+        Used for allowing the polymorphic signature Class(obj) as a copy/casting constructor
+        """
         return dict()
 
     _HIERARCHY_SEPARATOR = "::"
 
-    def _as_dict(self):
-        d = {}
+    def _as_dict(self) -> dict:
+        """
+        Get an object represenation in a dict format. This is necessary e.g. for saving the full
+        file uniquely in the npz format
+
+        Returns:
+            dict: object packed as nested dictionary
+        """
+        content = {}
 
         # type
-        d["type"] = type(self).__name__
+        content["type"] = type(self).__name__
 
         # args and kwargs
         for base_attr, iterable in [
@@ -194,24 +209,26 @@ class AbstractObject(object):
             for attr, value in iterable:
                 attr = base_attr + self._HIERARCHY_SEPARATOR + attr
                 if hasattr(value, "_as_dict"):
-                    part_dict = value._as_dict()
+                    part_dict = value._as_dict()  # pylint: disable=protected-access
                     for part_attr, part_value in part_dict.items():
-                        d[attr + self._HIERARCHY_SEPARATOR + part_attr] = part_value
+                        content[
+                            attr + self._HIERARCHY_SEPARATOR + part_attr
+                        ] = part_value
                 else:
-                    d[attr] = value
-        return d
+                    content[attr] = value
+        return content
 
     @classmethod
-    def _from_dict(cls, d: dict):
+    def _from_dict(cls, content: dict):
         try:
-            d.pop("type")
+            content.pop("type")
         except KeyError:
             # legacy
-            return cls._from_dict_legacy(**d)
+            return cls._from_dict_legacy(**content)
 
         here = {}
-        for string in d:  # TOO no sortelist
-            value = d[string]
+        for string in content:  # TOO no sortelist
+            value = content[string]
 
             attr, _, end = string.partition(cls._HIERARCHY_SEPARATOR)
             key, _, end = end.partition(cls._HIERARCHY_SEPARATOR)
@@ -221,9 +238,7 @@ class AbstractObject(object):
                 here[attr][key] = {}
             here[attr][key][end] = value
 
-        """
-        Do the recursion
-        """
+        # Do the recursion
         for attr in here:
             for key in here[attr]:
                 if "type" in here[attr][key]:
@@ -235,14 +250,14 @@ class AbstractObject(object):
                         # Found under nt. ???
                         obj_type = obj_type.decode("UTF-8")
                     obj_type = getattr(tfields, obj_type)
-                    attr_value = obj_type._from_dict(here[attr][key])
+                    attr_value = obj_type._from_dict(
+                        here[attr][key]
+                    )  # noqa: E501 pylint: disable=protected-access
                 else:  # if len(here[attr][key]) == 1:
                     attr_value = here[attr][key].pop("")
                 here[attr][key] = attr_value
 
-        """
-        Build the generic way
-        """
+        # Build the generic way
         args = here.pop("args", tuple())
         args = tuple(args[key] for key in sorted(args))
         kwargs = here.pop("kwargs", {})
@@ -251,17 +266,15 @@ class AbstractObject(object):
         return obj
 
     @classmethod
-    def _from_dict_legacy(cls, **d):
+    def _from_dict_legacy(cls, **content):
         """
         legacy method of _from_dict - Opposite of old _as_dict method
         which is overridden in this version
         """
         list_dict = {}
         kwargs = {}
-        """
-        De-Flatten the first layer of lists
-        """
-        for key in sorted(list(d)):
+        # De-Flatten the first layer of lists
+        for key in sorted(list(content)):
             if "::" in key:
                 attr, _, end = key.partition("::")
                 if attr not in list_dict:
@@ -273,13 +286,11 @@ class AbstractObject(object):
                 index = int(index)
                 if index not in list_dict[attr]:
                     list_dict[attr][index] = {}
-                list_dict[attr][index][end] = d[key]
+                list_dict[attr][index][end] = content[key]
             else:
-                kwargs[key] = d[key]
+                kwargs[key] = content[key]
 
-        """
-        Build the lists (recursively)
-        """
+        # Build the lists (recursively)
         for key in list(list_dict):
             sub_dict = list_dict[key]
             list_dict[key] = []
@@ -291,19 +302,19 @@ class AbstractObject(object):
                     # Found under nt. ???
                     bulk_type = bulk_type.decode("UTF-8")
                 bulk_type = getattr(tfields, bulk_type)
-                list_dict[key].append(bulk_type._from_dict_legacy(**sub_dict[index]))
-
-        with cls._bypass_setters("fields", demand_existence=False):
-            """
-            Build the normal way
-            """
+                list_dict[key].append(
+                    bulk_type._from_dict_legacy(**sub_dict[index])
+                )  # noqa: E501 pylint: disable=protected-access
+
+        with cls._bypass_setters(
+            "fields", demand_existence=False
+        ):  # noqa: E501 pylint: disable=protected-access,no-member
+            # Build the normal way
             bulk = kwargs.pop("bulk")
             bulk_type = kwargs.pop("bulk_type")
             obj = cls.__new__(cls, bulk, **kwargs)
 
-            """
-            Set list attributes
-            """
+            # Set list attributes
             for attr, list_value in list_dict.items():
                 setattr(obj, attr, list_value)
         return obj
@@ -354,8 +365,10 @@ class AbstractNdarray(np.ndarray, AbstractObject):
         for attr in self._iter_slots():
             setattr(self, attr, getattr(obj, attr, None))
 
-    def __array_wrap__(self, out_arr, context=None):
-        return np.ndarray.__array_wrap__(self, out_arr, context)
+    def __array_wrap__(self, out_arr, context=None):  # pylint: disable=arguments-differ
+        return np.ndarray.__array_wrap__(
+            self, out_arr, context
+        )  # noqa: E501 pylint: disable=too-many-function-args
 
     @classmethod
     def _iter_slots(cls):
@@ -384,7 +397,7 @@ class AbstractNdarray(np.ndarray, AbstractObject):
                 except Exception as err:
                     raise ValueError(
                         str(attr) + str(dtype) + str(kwargs[attr]) + str(err)
-                    )
+                    ) from err
 
     def __setattr__(self, name, value):
         if name in self.__slots__:
@@ -460,11 +473,9 @@ class AbstractNdarray(np.ndarray, AbstractObject):
 
         # set the __slot__ attributes
         valid_slot_attrs = list(self._iter_slots())
-        """
-        attributes that have been added later have not been pickled with the
-        full information and thus need to be excluded from the __setstate__
-        need to be in the same order as they have been added to __slots__
-        """
+        # attributes that have been added later have not been pickled with the full information
+        # and thus need to be excluded from the __setstate__ need to be in the same order as they
+        # have been added to __slots__
         added_slot_attrs = ["name"]
         n_np = 5  # number of numpy array states
         n_old = len(valid_slot_attrs) - len(state[n_np:])
@@ -528,7 +539,7 @@ class AbstractNdarray(np.ndarray, AbstractObject):
         for slot_index, setter in zip(slot_indices, setters):
             cls.__slot_setters__[slot_index] = setter
 
-    def copy(self, *args, **kwargs):
+    def copy(self, **kwargs):  # pylint: disable=arguments-differ
         """
         The standard ndarray copy does not copy slots. Correct for this.
 
@@ -555,11 +566,15 @@ class AbstractNdarray(np.ndarray, AbstractObject):
             False
 
         """
+        if kwargs:
+            raise NotImplementedError(
+                "Copying with arguments {kwargs} not yet supported"
+            )
         # works with __reduce__ / __setstate__
         return deepcopy(self)
 
 
-class Tensors(AbstractNdarray):
+class Tensors(AbstractNdarray):  # pylint: disable=too-many-public-methods
     """
     Set of tensors with the same basis.
 
@@ -646,12 +661,12 @@ class Tensors(AbstractNdarray):
     __slot_defaults__ = ["cartesian"]
     __slot_setters__ = [tfields.bases.get_coord_system_name]
 
-    def __new__(cls, tensors, **kwargs):
+    def __new__(cls, tensors, **kwargs):  # pylint: disable=too-many-branches
         dtype = kwargs.pop("dtype", None)
         order = kwargs.pop("order", None)
-        dim = kwargs.pop("dim", None)
+        dim = kwargs.pop("dim", None)  # pylint: disable=redefined-outer-name
 
-        """ copy constructor extracts the kwargs from tensors"""
+        # copy constructor extracts the kwargs from tensors
         if issubclass(type(tensors), Tensors):
             if dim is not None:
                 dim = tensors.dim
@@ -669,15 +684,15 @@ class Tensors(AbstractNdarray):
                 else:
                     dtype = np.float64
 
-        """ demand iterable structure """
+        # demand iterable structure
         try:
             len(tensors)
-        except TypeError:
+        except TypeError as err:
             raise TypeError(
                 "Iterable structure necessary." " Got {tensors}".format(**locals())
-            )
+            ) from err
 
-        """ process empty inputs """
+        # process empty inputs
         if len(tensors) == 0:
             if issubclass(type(tensors), tfields.Tensors):
                 tensors = np.empty(tensors.shape, dtype=tensors.dtype)
@@ -694,9 +709,9 @@ class Tensors(AbstractNdarray):
         tensors = np.asarray(tensors, dtype=dtype, order=order)
         obj = tensors.view(cls)
 
-        """ check dimension(s) """
-        for d in obj.shape[1:]:
-            if not d == obj.dim:
+        # check dimension(s)
+        for obj_dim in obj.shape[1:]:
+            if not obj_dim == obj.dim:
                 raise ValueError(
                     "Dimensions are inconstistent. "
                     "Manifold dimension is {obj.dim}. "
@@ -711,10 +726,10 @@ class Tensors(AbstractNdarray):
                     " {dim} demanded.".format(**locals())
                 )
 
-        """ update kwargs with defaults from slots """
+        # update kwargs with defaults from slots
         cls._update_slot_kwargs(kwargs)
 
-        """ set kwargs to slots attributes """
+        # set kwargs to slots attributes
         for attr in kwargs:
             if attr not in cls._iter_slots():
                 raise AttributeError(
@@ -809,14 +824,14 @@ class Tensors(AbstractNdarray):
 
         """
 
-        """ get most frequent coord_sys or predefined coord_sys """
+        # get most frequent coord_sys or predefined coord_sys
         coord_sys = kwargs.get("coord_sys", None)
         return_templates = kwargs.pop("return_templates", False)
         if coord_sys is None:
             bases = []
-            for t in objects:
+            for tensors in objects:
                 try:
-                    bases.append(t.coord_sys)
+                    bases.append(tensors.coord_sys)
                 except AttributeError:
                     pass
             if bases:
@@ -827,22 +842,22 @@ class Tensors(AbstractNdarray):
                 default = cls.__slot_defaults__[cls.__slots__.index("coord_sys")]
                 kwargs["coord_sys"] = default
 
-        """ transform all raw inputs to cls type with correct coord_sys. Also
-        automatically make a copy of those instances that are of the correct
-        type already."""
+        # transform all raw inputs to cls type with correct coord_sys. Also
+        # automatically make a copy of those instances that are of the correct
+        # type already.
         objects = [cls.__new__(cls, t, **kwargs) for t in objects]
 
-        """ check rank and dimension equality """
-        if not len(set([t.rank for t in objects])) == 1:
+        # check rank and dimension equality
+        if not len(set(t.rank for t in objects)) == 1:
             raise TypeError("Tensors must have the same rank for merging.")
-        if not len(set([t.dim for t in objects])) == 1:
+        if not len(set(t.dim for t in objects)) == 1:
             raise TypeError("Tensors must have the same dimension for merging.")
 
-        """ merge all objects """
-        remainingObjects = objects[1:] or []
+        # merge all objects
+        remaining_objects = objects[1:] or []
         tensors = objects[0]
 
-        for i, obj in enumerate(remainingObjects):
+        for i, obj in enumerate(remaining_objects):
             tensors = np.append(tensors, obj, axis=0)
 
         if len(tensors) == 0 and not kwargs.get("dim", None):
@@ -851,7 +866,7 @@ class Tensors(AbstractNdarray):
             kwargs["dim"] = dim(objects[0])
 
         inst = cls.__new__(cls, tensors, **kwargs)
-        if not return_templates:
+        if not return_templates:  # pylint: disable=no-else-return
             return inst
         else:
             tensor_lengths = [len(o) for o in objects]
@@ -1030,14 +1045,16 @@ class Tensors(AbstractNdarray):
             >>> assert t_cyl[0, 0] == 3
 
         """
-        #           scalars                 empty             already there
-        if self.rank == 0 or self.shape[0] == 0 or self.coord_sys == coord_sys:
-            self.coord_sys = coord_sys
+        #         scalars                 empty             already there
+        if (
+            self.rank == 0 or self.shape[0] == 0 or self.coord_sys == coord_sys
+        ):  # noqa: E501 pylint: disable=access-member-before-definition
+            self.coord_sys = coord_sys  # pylint: disable=attribute-defined-outside-init
             return
 
         tfields.bases.transform(self, self.coord_sys, coord_sys)
         # self[:] = tfields.bases.transform(self, self.coord_sys, coord_sys)
-        self.coord_sys = coord_sys
+        self.coord_sys = coord_sys  # pylint: disable=attribute-defined-outside-init
 
     @contextmanager
     def tmp_transform(self, coord_sys):
@@ -1057,15 +1074,15 @@ class Tensors(AbstractNdarray):
             >>> assert p.coord_sys == tfields.bases.SPHERICAL
 
         """
-        baseBefore = self.coord_sys
-        if baseBefore == coord_sys:
+        base_before = self.coord_sys
+        if base_before == coord_sys:
             yield
         else:
             self.transform(coord_sys)
 
             yield
 
-            self.transform(baseBefore)
+            self.transform(base_before)
 
     def mirror(self, coordinate, condition=None):
         """
@@ -1102,15 +1119,15 @@ class Tensors(AbstractNdarray):
             condition = np.array([True for i in range(len(self))])
         elif isinstance(condition, sympy.Basic):
             condition = self.evalf(condition)
-        if isinstance(coordinate, list) or isinstance(coordinate, tuple):
-            for c in coordinate:
-                self.mirror(c, condition=condition)
+        if isinstance(coordinate, (list, tuple)):
+            for coord in coordinate:
+                self.mirror(coord, condition=condition)
         elif isinstance(coordinate, int):
             self[:, coordinate][condition] *= -1
         else:
             raise TypeError()
 
-    def to_segment(
+    def to_segment(  # pylint: disable=too-many-arguments
         self,
         segment,
         num_segments,
@@ -1162,7 +1179,9 @@ class Tensors(AbstractNdarray):
                 + segment * periodicity / num_segments
             )
 
-    def equal(self, other, rtol=None, atol=None, equal_nan=False, return_bool=True):
+    def equal(
+        self, other, rtol=None, atol=None, equal_nan=False, return_bool=True
+    ):  # noqa: E501 pylint: disable=too-many-arguments
         """
         Evaluate, whether the instance has the same content as other.
 
@@ -1176,23 +1195,25 @@ class Tensors(AbstractNdarray):
         if issubclass(type(other), Tensors) and self.coord_sys != other.coord_sys:
             other = other.copy()
             other.transform(self.coord_sys)
-        x, y = np.asarray(self), np.asarray(other)
+        self_array, other_array = np.asarray(self), np.asarray(other)
         if rtol is None and atol is None:
-            mask = x == y
+            mask = self_array == other_array
             if equal_nan:
-                both_nan = np.isnan(x) & np.isnan(y)
+                both_nan = np.isnan(self_array) & np.isnan(other_array)
                 mask[both_nan] = both_nan[both_nan]
         else:
             if rtol is None:
                 rtol = 0.0
             if atol is None:
                 atol = 0.0
-            mask = np.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan)
+            mask = np.isclose(
+                self_array, other_array, rtol=rtol, atol=atol, equal_nan=equal_nan
+            )
         if return_bool:
             return bool(np.all(mask))
         return mask
 
-    def contains(self, other, **kwargs):
+    def contains(self, other):
         """
         Inspired by a speed argument @
         stackoverflow.com/questions/14766194/testing-whether-a-numpy-array-contains-a-given-row
@@ -1231,20 +1252,20 @@ class Tensors(AbstractNdarray):
             array([1, 3])
 
         """
-        x, y = np.asarray(self), np.asarray(tensor)
+        self_array, other_array = np.asarray(self), np.asarray(tensor)
         if rtol is None and atol is None:
             equal_method = np.equal
         else:
             equal_method = lambda a, b: np.isclose(a, b, rtol=rtol, atol=atol)  # NOQA
 
-        """
-        inspired by https://stackoverflow.com/questions/19228295/\
-find-ordered-vector-in-numpy-array
-        """
+        # inspired by
+        # https://stackoverflow.com/questions/19228295/find-ordered-vector-in-numpy-array
         if self.rank == 0:
-            indices = np.where(equal_method((x - y), 0))[0]
+            indices = np.where(equal_method((self_array - other_array), 0))[0]
         elif self.rank == 1:
-            indices = np.where(np.all(equal_method((x - y), 0), axis=1))[0]
+            indices = np.where(
+                np.all(equal_method((self_array - other_array), 0), axis=1)
+            )[0]
         else:
             raise NotImplementedError()
         return indices
@@ -1318,7 +1339,9 @@ find-ordered-vector-in-numpy-array
         """
         with other.tmp_transform(self.coord_sys):
             # balanced_tree option gives huge speedup!
-            kd_tree = sp.spatial.cKDTree(other, 1000, balanced_tree=False)
+            kd_tree = scipy.spatial.cKDTree(
+                other, 1000, balanced_tree=False
+            )  # noqa: E501 pylint: disable=no-member
             res = kd_tree.query(self, **kwargs)
             array = res[1]
 
@@ -1390,22 +1413,15 @@ find-ordered-vector-in-numpy-array
         points, extending the object it should cut. This becomes relevant for
         Mesh3D when adding vertices at the edge of the cut is necessary.
         """
-        # TODO: in merged return np.empty() for tensors to save space.
-        # use tensors only when adding. I think this function already suits the
-        # needs
-
         # Redirect fields
         fields = []
         if template.fields and issubclass(type(self), TensorFields):
             template_field = np.array(template.fields[0])
             if len(self) > 0:
-                """
-                if new vertices have been created in the template, it is
-                in principle unclear what fields we have to refer to.
-                Thus in creating the template, we gave np.nan.
-                To make it fast, we replace nan with 0 as a dummy and correct
-                the field entries afterwards with np.nan.
-                """
+                # if new vertices have been created in the template, it is in principle unclear
+                # what fields we have to refer to.  Thus in creating the template, we gave np.nan.
+                # To make it fast, we replace nan with 0 as a dummy and correct the field entries
+                # afterwards with np.nan.
                 nan_mask = np.isnan(template_field)
                 template_field[nan_mask] = 0  # dummy reference to index 0.
                 template_field = template_field.astype(int)
@@ -1491,7 +1507,7 @@ find-ordered-vector-in-numpy-array
         Args:
             other(Iterable)
             **kwargs:
-                ... is forwarded to sp.spatial.distance.cdist
+                ... is forwarded to scipy.spatial.distance.cdist
 
         Examples:
             >>> import tfields
@@ -1510,7 +1526,7 @@ find-ordered-vector-in-numpy-array
         if issubclass(type(other), Tensors) and self.coord_sys != other.coord_sys:
             other = other.copy()
             other.transform(self.coord_sys)
-        return sp.spatial.distance.cdist(self, other, **kwargs)
+        return scipy.spatial.distance.cdist(self, other, **kwargs)
 
     def min_dists(self, other=None, **kwargs):
         """
@@ -1519,7 +1535,7 @@ find-ordered-vector-in-numpy-array
             **kwargs:
                 memory_saving (bool): for very large array comparisons
                     default False
-                ... rest is forwarded to sp.spatial.distance.cdist
+                ... rest is forwarded to scipy.spatial.distance.cdist
 
         Returns:
             np.array: minimal distances of self to other
@@ -1554,13 +1570,13 @@ find-ordered-vector-in-numpy-array
         try:
             if memory_saving:
                 raise MemoryError()
-            d = self.distances(other, **kwargs)
-            return d[d > 0].reshape(d.shape[0], -1).min(axis=1)
+            dists = self.distances(other, **kwargs)
+            return dists[dists > 0].reshape(dists.shape[0], -1).min(axis=1)
         except MemoryError:
             min_dists = np.empty(self.shape[0])
             for i, point in enumerate(np.array(other)):
-                d = self.distances([point], **kwargs)
-                min_dists[i] = d[d > 0].reshape(-1).min()
+                dists = self.distances([point], **kwargs)
+                min_dists[i] = dists[dists > 0].reshape(-1).min()
             return min_dists
 
     def epsilon_neighbourhood(self, epsilon):
@@ -1583,8 +1599,8 @@ find-ordered-vector-in-numpy-array
         """
         indices = np.arange(self.shape[0])
         dists = self.distances(self)  # this takes long
-        distsInEpsilon = dists <= epsilon
-        indices = [indices[die] for die in distsInEpsilon]  # this takes long
+        dists_in_epsilon = dists <= epsilon
+        indices = [indices[die] for die in dists_in_epsilon]  # this takes long
         return indices
 
     def _weights(self, weights, rigid=True):
@@ -1623,8 +1639,8 @@ find-ordered-vector-in-numpy-array
         idx = evalfs.argsort()[::-1]
         evalfs = evalfs[idx]
         evecs = evecs[:, idx]
-        e = np.concatenate((evecs, evalfs.reshape(1, 3)))
-        return e.T.reshape(
+        res = np.concatenate((evecs, evalfs.reshape(1, 3)))
+        return res.T.reshape(
             12,
         )
 
@@ -1646,7 +1662,7 @@ find-ordered-vector-in-numpy-array
         """
         Forwarding to rna.plotting.plot_array
         """
-        artist = rna.plotting.plot_array(self, **kwargs)
+        artist = rna.plotting.plot_array(self, **kwargs)  # pylint: disable=no-member
         return artist
 
 
@@ -1787,13 +1803,13 @@ class TensorFields(Tensors):
                 )
         return obj
 
-    def _args(self):
+    def _args(self) -> tuple:
         return super()._args() + tuple(self.fields)
 
-    def _kwargs(self):
-        d = super()._kwargs()
-        d.pop("fields")
-        return d
+    def _kwargs(self) -> dict:
+        content = super()._kwargs()
+        content.pop("fields")
+        return content
 
     def __getitem__(self, index):
         """
@@ -1844,7 +1860,7 @@ class TensorFields(Tensors):
                         item.fields = [
                             field.__getitem__(index) for field in item.fields
                         ]
-        except IndexError as err:  # noqa: F841
+        except IndexError as err:  # noqa: F841 pylint: disable=possibly-unused-variable
             warnings.warn(
                 "Index error occured for field.__getitem__. Error "
                 "message: {err}".format(**locals())
@@ -1890,8 +1906,10 @@ class TensorFields(Tensors):
     @classmethod
     def merged(cls, *objects, **kwargs):
         if not all([isinstance(o, cls) for o in objects]):
-            types = [type(o) for o in objects]
-            # TODO: could allow if all face_fields are none
+            types = [
+                type(o) for o in objects
+            ]  # pylint: disable=possibly-unused-variable
+            # Note: could allow if all map_fields are none
             raise TypeError(
                 "Merge constructor only accepts {cls} instances."
                 "Got objects of types {types} instead.".format(**locals())
@@ -1912,7 +1930,7 @@ class TensorFields(Tensors):
                 )
                 fields.append(field)
         inst = cls.__new__(cls, inst, *fields)
-        if return_templates:
+        if return_templates:  # pylint: disable=no-else-return
             return inst, templates
         else:
             return inst
@@ -1946,7 +1964,7 @@ class TensorFields(Tensors):
         for i, name in enumerate(names):
             self.fields[i].name = name
 
-    def equal(self, other, **kwargs):
+    def equal(self, other, **kwargs):  # pylint: disable=arguments-differ
         """
         Test, whether the instance has the same content as other.
 
@@ -1957,16 +1975,15 @@ class TensorFields(Tensors):
         """
         if not issubclass(type(other), Tensors):
             return super(TensorFields, self).equal(other, **kwargs)
-        else:
-            with other.tmp_transform(self.coord_sys):
-                mask = super(TensorFields, self).equal(other, **kwargs)
-                if issubclass(type(other), TensorFields):
-                    if len(self.fields) != len(other.fields):
-                        mask &= False
-                    else:
-                        for i, field in enumerate(self.fields):
-                            mask &= field.equal(other.fields[i], **kwargs)
-                return mask
+        with other.tmp_transform(self.coord_sys):
+            mask = super(TensorFields, self).equal(other, **kwargs)
+            if issubclass(type(other), TensorFields):
+                if len(self.fields) != len(other.fields):
+                    mask &= False
+                else:
+                    for i, field in enumerate(self.fields):
+                        mask &= field.equal(other.fields[i], **kwargs)
+            return mask
 
     def _weights(self, weights, rigid=True):
         """
@@ -1999,7 +2016,9 @@ class TensorFields(Tensors):
                     "different dimension. No coord_sys check performed."
                 )
             if field.dim <= 3:
-                artist = rna.plotting.plot_tensor_field(self, field, **kwargs)
+                artist = rna.plotting.plot_tensor_field(
+                    self, field, **kwargs
+                )  # noqa: E501 pylint: disable=no-member
             else:
                 raise NotImplementedError(
                     "Field of dimension {field.dim}".format(**locals())
@@ -2008,6 +2027,11 @@ class TensorFields(Tensors):
 
 
 class Fields(list, AbstractObject):
+    """
+    Container for fields which should be attached to tensors with the <>.fields attribute to make
+    them tensor fields
+    """
+
     def _args(self):
         return super()._args() + tuple(self)
 
@@ -2049,6 +2073,9 @@ class Container(Fields):
 
     @property
     def items(self):
+        """
+        items of the container as a list
+        """
         return list(self)
 
     @items.setter
@@ -2095,25 +2122,25 @@ class Maps(sortedcontainers.SortedDict, AbstractObject):
                 if issubclass(type(entry), tuple):
                     if np.issubdtype(type(entry[0]), np.integer):
                         # Maps([(key, value), ...]), Maps({key: value, ...})
-                        mp = self.to_map(entry[1], copy=True)
+                        maps = self.to_map(entry[1], copy=True)
                         dimension = entry[0]
                     else:
                         # Maps([(tensors, field1, field2), ...])
-                        mp = self.to_map(*entry, copy=True)
+                        maps = self.to_map(*entry, copy=True)
                 else:
                     # Maps([mp, mp, ...])
-                    mp = self.to_map(entry, copy=True)
+                    maps = self.to_map(entry, copy=True)
 
                 if dimension is None:
-                    dimension = dim(mp)
-                new_args.append((dimension, mp))
+                    dimension = dim(maps)
+                new_args.append((dimension, maps))
 
             args = (new_args,)
 
         super().__init__(*args, **kwargs)
 
     @staticmethod
-    def to_map(mp, *fields, copy=False, **kwargs):
+    def to_map(mp, *fields, copy=False, **kwargs):  # pylint: disable=invalid-name
         """
         Args:
             mp (TensorFields)
@@ -2137,7 +2164,7 @@ class Maps(sortedcontainers.SortedDict, AbstractObject):
         super().__setitem__(dimension, mp)
 
     def _args(self):
-        return super()._args() + ([(k, v) for k, v in self.items()],)
+        return super()._args() + (list(self.items()),)
 
     def equal(self, other, **kwargs):
         """
@@ -2272,17 +2299,19 @@ class TensorMaps(TensorFields):
                     map_delete_mask = np.full(
                         (len(self.maps[map_dim]),), False, dtype=bool
                     )
-                    for i, mp in enumerate(self.maps[map_dim]):
-                        for index in mp:
-                            if index in delete_indices:
+                    for i, mp in enumerate(
+                        self.maps[map_dim]
+                    ):  # pylint: disable=invalid-name
+                        for node_index in mp:
+                            if node_index in delete_indices:
                                 map_delete_mask[i] = True
                                 break
                     map_mask = ~map_delete_mask
 
                     # build the correction counters
                     move_up_counter = np.zeros(self.maps[map_dim].shape, dtype=int)
-                    for p in delete_indices:
-                        move_up_counter[self.maps[map_dim] > p] -= 1
+                    for delete_index in delete_indices:
+                        move_up_counter[self.maps[map_dim] > delete_index] -= 1
 
                     item.maps[map_dim] = (self.maps[map_dim] + move_up_counter)[
                         map_mask
@@ -2293,7 +2322,7 @@ class TensorMaps(TensorFields):
     @classmethod
     def merged(cls, *objects, **kwargs):
         if not all([isinstance(o, cls) for o in objects]):
-            # TODO: could allow if all face_fields are none
+            # Note: could allow if all face_fields are none
             raise TypeError(
                 "Merge constructor only accepts {cls} instances.".format(**locals())
             )
@@ -2309,8 +2338,8 @@ class TensorMaps(TensorFields):
 
         dim_maps_dict = {}  # {dim: {i: mp}
         for i, obj in enumerate(objects):
-            for dimension, mp in obj.maps.items():
-                mp = mp + cum_tensor_lengths[i]
+            for dimension, mp in obj.maps.items():  # pylint: disable=invalid-name
+                mp = mp + cum_tensor_lengths[i]  # pylint: disable=invalid-name
                 if dimension not in dim_maps_dict:
                     dim_maps_dict[dimension] = {}
                 dim_maps_dict[dimension][i] = mp
@@ -2326,17 +2355,20 @@ class TensorMaps(TensorFields):
                 return_templates=return_templates,
             )
             if return_templates:
-                mp, dimension_map_templates = return_value
+                (
+                    mp,
+                    dimension_map_templates,
+                ) = return_value  # pylint: disable=invalid-name
                 for i in range(len(objects)):
                     template_maps_list[i].append(
                         (dimension, dimension_map_templates[i])
                     )
             else:
-                mp = return_value
+                mp = return_value  # pylint: disable=invalid-name
             maps.append(mp)
 
         inst.maps = maps
-        if return_templates:
+        if return_templates:  # pylint: disable=no-else-return
             for i, template_maps in enumerate(template_maps_list):
                 # template maps will not have dimensions according to their
                 # tensors which are indices
@@ -2388,11 +2420,15 @@ class TensorMaps(TensorFields):
             # bulk was cut so we need to correct the map references.
             index_lut = np.full(len(self), np.nan)  # float type
             index_lut[template.fields[0]] = np.arange(len(template.fields[0]))
-        for mp_dim, mp in self.maps.items():
-            mp = mp._cut_template(template.maps[mp_dim])
+        for mp_dim, mp in self.maps.items():  # pylint: disable=invalid-name
+            mp = mp._cut_template(
+                template.maps[mp_dim]
+            )  # noqa: E501 pylint: disable=invalid-name,protected-access
             if template.fields:
                 # correct
-                mp = Maps.to_map(index_lut[mp], *mp.fields)
+                mp = Maps.to_map(
+                    index_lut[mp], *mp.fields
+                )  # pylint: disable=invalid-name
             inst.maps[mp_dim] = mp
         return inst
 
@@ -2423,12 +2459,11 @@ class TensorMaps(TensorFields):
         """
         if not issubclass(type(other), Tensors):
             return super(TensorMaps, self).equal(other, **kwargs)
-        else:
-            with other.tmp_transform(self.coord_sys):
-                mask = super(TensorMaps, self).equal(other, **kwargs)
-                if issubclass(type(other), TensorMaps):
-                    mask &= self.maps.equal(other.maps, **kwargs)
-                return mask
+        with other.tmp_transform(self.coord_sys):
+            mask = super(TensorMaps, self).equal(other, **kwargs)
+            if issubclass(type(other), TensorMaps):
+                mask &= self.maps.equal(other.maps, **kwargs)
+            return mask
 
     def stale(self):
         """
@@ -2445,12 +2480,12 @@ class TensorMaps(TensorFields):
             >>> assert np.array_equal(tm.stale(), [False, False, False, True])
 
         """
-        staleMask = np.full(self.shape[0], False, dtype=bool)
-        used = set([ind for mp in self.maps.values() for ind in mp.flatten()])
+        stale_mask = np.full(self.shape[0], False, dtype=bool)
+        used = set(ind for mp in self.maps.values() for ind in mp.flatten())
         for i in range(self.shape[0]):
             if i not in used:
-                staleMask[i] = True
-        return staleMask
+                stale_mask[i] = True
+        return stale_mask
 
     def cleaned(self, stale=True, duplicates=True):
         """
@@ -2503,11 +2538,15 @@ class TensorMaps(TensorFields):
                     # redirect maps. Note: work on inst.maps instead of
                     # self.maps in case stale vertices where removed
                     for map_dim in inst.maps:
-                        for f in range(len(inst.maps[map_dim])):  # face index
-                            mp = np.array(inst.maps[map_dim], dtype=int)
-                            if tensor_index in mp[f]:
-                                index = tfields.lib.util.index(mp[f], tensor_index)
-                                inst.maps[map_dim][f][index] = duplicate_index
+                        for face_index in range(len(inst.maps[map_dim])):  # face index
+                            mp = np.array(
+                                inst.maps[map_dim], dtype=int
+                            )  # noqa: E501 pylint: disable=invalid-name
+                            if tensor_index in mp[face_index]:
+                                index = tfields.lib.util.index(
+                                    mp[face_index], tensor_index
+                                )
+                                inst.maps[map_dim][face_index][index] = duplicate_index
             if remove_mask.any():
                 # prevent another copy
                 inst = inst.removed(remove_mask)
@@ -2591,10 +2630,7 @@ class TensorMaps(TensorFields):
                 obj = self.copy()
                 map_indices = set(map_indices)  # for speed up
                 map_delete_mask = np.array(
-                    [
-                        True if i not in map_indices else False
-                        for i in range(len(self.maps[map_dim]))
-                    ]
+                    [i not in map_indices for i in range(len(self.maps[map_dim]))]
                 )
                 obj.maps[map_dim] = obj.maps[map_dim][~map_delete_mask]
                 obj = obj.cleaned(duplicates=False)
@@ -2682,7 +2718,7 @@ class TensorMaps(TensorFields):
 
         flat_map = np.array(obj.maps[map_dim].flat)
         values, counts = np.unique(flat_map, return_counts=True)
-        counts = {v: n for v, n in zip(values, counts)}
+        counts = dict(zip(values, counts))
 
         # last is a helper
         last = np.full(max(flat_map) + 1, -3, dtype=int)
@@ -2721,9 +2757,9 @@ class TensorMaps(TensorFields):
             values, counts = np.unique(path.maps[map_dim].flat, return_counts=True)
 
             first_node = None
-            for v, c in zip(values, counts):
-                if c == 1:
-                    first_node = v
+            for value, count in zip(values, counts):
+                if count == 1:
+                    first_node = value
                     break
             edges = [list(edge) for edge in path.maps[map_dim]]
             if first_node is None:
@@ -2735,8 +2771,7 @@ class TensorMaps(TensorFields):
                         if found_first_node:
                             edge[edge.index(first_node)] = len(path) - 1
                             break
-                        else:
-                            found_first_node = True
+                        found_first_node = True
 
             # follow the edges until you hit the end
             chain = [first_node]