Commit f22b88c5 authored by dboe's avatar dboe
Browse files

linting core

parent 16528828
Pipeline #84872 passed with stages
in 59 seconds
#!/usr/bin/env
#!/usr/bin/env # pylint: disable=too-many-lines,super-with-arguments
# encoding: utf-8
"""
Author: Daniel Boeckenhoff
......@@ -15,16 +15,16 @@ Notes:
# builtin
import warnings
import pathlib
from six import string_types
from contextlib import contextmanager
from collections import Counter
from copy import deepcopy
import logging
from six import string_types
# 3rd party
import numpy as np
import sympy
import scipy as sp
import scipy
import sortedcontainers
import rna
......@@ -51,15 +51,18 @@ def dim(tensor):
return tensor.shape[1]
class AbstractObject(object):
class AbstractObject(object): # pylint: disable=useless-object-inheritance
"""
Abstract base class for all tfields objects implementing polymorphisms
"""
def save(self, path, *args, **kwargs):
"""
Saving by redirecting to the correct save method depending on path
Args:
path (str or buffer)
*args:
forwarded to extension specific method
*args: joined with path
**kwargs:
extension (str): only needed if path is buffer
... remaining:forwarded to extension specific method
......@@ -73,14 +76,14 @@ class AbstractObject(object):
# get the save method
try:
save_method = getattr(self, "_save_{extension}".format(**locals()))
except AttributeError:
save_method = getattr(self, "_save_" + extension)
except AttributeError as err:
raise NotImplementedError(
"Can not find save method for extension: "
"{extension}.".format(**locals())
)
) from err
path = rna.path.resolve(path)
path = rna.path.resolve(path, *args)
rna.path.mkdir(path)
return save_method(path, **kwargs)
......@@ -91,8 +94,7 @@ class AbstractObject(object):
Args:
path (str or buffer)
*args:
forwarded to extension specific method
*args: joined with path
**kwargs:
extension (str): only needed if path is buffer
... remaining:forwarded to extension specific method
......@@ -106,14 +108,14 @@ class AbstractObject(object):
try:
load_method = getattr(cls, "_load_{e}".format(e=extension))
except AttributeError:
except AttributeError as err:
raise NotImplementedError(
"Can not find load method for extension: "
"{extension}.".format(**locals())
)
) from err
return load_method(path, *args, **kwargs)
def _save_npz(self, path, **kwargs):
def _save_npz(self, path):
"""
Args:
path (open file or str/unicode): destination to save file to.
......@@ -164,27 +166,40 @@ class AbstractObject(object):
Factory method
Given a path to a npz file, construct the object
"""
# TODO: think about allow_pickle, wheter it really should be True or
# Note: think about allow_pickle, wheter it really should be True or
# wheter we could avoid pickling (potential security issue)
load_kwargs.setdefault("allow_pickle", True)
np_file = np.load(path, **load_kwargs)
d = dict(np_file)
d.pop("tfields_version", None)
return cls._from_dict(d)
content = dict(np_file)
content.pop("tfields_version", None)
return cls._from_dict(content)
def _args(self) -> tuple:
def _args(self) -> tuple: # pylint: disable=no-self-use
"""
Used for allowing the polymorphic signature Class(obj) as a copy/casting constructor
"""
return tuple()
def _kwargs(self) -> dict:
def _kwargs(self) -> dict: # pylint: disable=no-self-use
"""
Used for allowing the polymorphic signature Class(obj) as a copy/casting constructor
"""
return dict()
_HIERARCHY_SEPARATOR = "::"
def _as_dict(self):
d = {}
def _as_dict(self) -> dict:
"""
Get an object represenation in a dict format. This is necessary e.g. for saving the full
file uniquely in the npz format
Returns:
dict: object packed as nested dictionary
"""
content = {}
# type
d["type"] = type(self).__name__
content["type"] = type(self).__name__
# args and kwargs
for base_attr, iterable in [
......@@ -194,24 +209,26 @@ class AbstractObject(object):
for attr, value in iterable:
attr = base_attr + self._HIERARCHY_SEPARATOR + attr
if hasattr(value, "_as_dict"):
part_dict = value._as_dict()
part_dict = value._as_dict() # pylint: disable=protected-access
for part_attr, part_value in part_dict.items():
d[attr + self._HIERARCHY_SEPARATOR + part_attr] = part_value
content[
attr + self._HIERARCHY_SEPARATOR + part_attr
] = part_value
else:
d[attr] = value
return d
content[attr] = value
return content
@classmethod
def _from_dict(cls, d: dict):
def _from_dict(cls, content: dict):
try:
d.pop("type")
content.pop("type")
except KeyError:
# legacy
return cls._from_dict_legacy(**d)
return cls._from_dict_legacy(**content)
here = {}
for string in d: # TOO no sortelist
value = d[string]
for string in content: # TOO no sortelist
value = content[string]
attr, _, end = string.partition(cls._HIERARCHY_SEPARATOR)
key, _, end = end.partition(cls._HIERARCHY_SEPARATOR)
......@@ -221,9 +238,7 @@ class AbstractObject(object):
here[attr][key] = {}
here[attr][key][end] = value
"""
Do the recursion
"""
# Do the recursion
for attr in here:
for key in here[attr]:
if "type" in here[attr][key]:
......@@ -235,14 +250,14 @@ class AbstractObject(object):
# Found under nt. ???
obj_type = obj_type.decode("UTF-8")
obj_type = getattr(tfields, obj_type)
attr_value = obj_type._from_dict(here[attr][key])
attr_value = obj_type._from_dict(
here[attr][key]
) # noqa: E501 pylint: disable=protected-access
else: # if len(here[attr][key]) == 1:
attr_value = here[attr][key].pop("")
here[attr][key] = attr_value
"""
Build the generic way
"""
# Build the generic way
args = here.pop("args", tuple())
args = tuple(args[key] for key in sorted(args))
kwargs = here.pop("kwargs", {})
......@@ -251,17 +266,15 @@ class AbstractObject(object):
return obj
@classmethod
def _from_dict_legacy(cls, **d):
def _from_dict_legacy(cls, **content):
"""
legacy method of _from_dict - Opposite of old _as_dict method
which is overridden in this version
"""
list_dict = {}
kwargs = {}
"""
De-Flatten the first layer of lists
"""
for key in sorted(list(d)):
# De-Flatten the first layer of lists
for key in sorted(list(content)):
if "::" in key:
attr, _, end = key.partition("::")
if attr not in list_dict:
......@@ -273,13 +286,11 @@ class AbstractObject(object):
index = int(index)
if index not in list_dict[attr]:
list_dict[attr][index] = {}
list_dict[attr][index][end] = d[key]
list_dict[attr][index][end] = content[key]
else:
kwargs[key] = d[key]
kwargs[key] = content[key]
"""
Build the lists (recursively)
"""
# Build the lists (recursively)
for key in list(list_dict):
sub_dict = list_dict[key]
list_dict[key] = []
......@@ -291,19 +302,19 @@ class AbstractObject(object):
# Found under nt. ???
bulk_type = bulk_type.decode("UTF-8")
bulk_type = getattr(tfields, bulk_type)
list_dict[key].append(bulk_type._from_dict_legacy(**sub_dict[index]))
with cls._bypass_setters("fields", demand_existence=False):
"""
Build the normal way
"""
list_dict[key].append(
bulk_type._from_dict_legacy(**sub_dict[index])
) # noqa: E501 pylint: disable=protected-access
with cls._bypass_setters(
"fields", demand_existence=False
): # noqa: E501 pylint: disable=protected-access,no-member
# Build the normal way
bulk = kwargs.pop("bulk")
bulk_type = kwargs.pop("bulk_type")
obj = cls.__new__(cls, bulk, **kwargs)
"""
Set list attributes
"""
# Set list attributes
for attr, list_value in list_dict.items():
setattr(obj, attr, list_value)
return obj
......@@ -354,8 +365,10 @@ class AbstractNdarray(np.ndarray, AbstractObject):
for attr in self._iter_slots():
setattr(self, attr, getattr(obj, attr, None))
def __array_wrap__(self, out_arr, context=None):
return np.ndarray.__array_wrap__(self, out_arr, context)
def __array_wrap__(self, out_arr, context=None): # pylint: disable=arguments-differ
return np.ndarray.__array_wrap__(
self, out_arr, context
) # noqa: E501 pylint: disable=too-many-function-args
@classmethod
def _iter_slots(cls):
......@@ -384,7 +397,7 @@ class AbstractNdarray(np.ndarray, AbstractObject):
except Exception as err:
raise ValueError(
str(attr) + str(dtype) + str(kwargs[attr]) + str(err)
)
) from err
def __setattr__(self, name, value):
if name in self.__slots__:
......@@ -460,11 +473,9 @@ class AbstractNdarray(np.ndarray, AbstractObject):
# set the __slot__ attributes
valid_slot_attrs = list(self._iter_slots())
"""
attributes that have been added later have not been pickled with the
full information and thus need to be excluded from the __setstate__
need to be in the same order as they have been added to __slots__
"""
# attributes that have been added later have not been pickled with the full information
# and thus need to be excluded from the __setstate__ need to be in the same order as they
# have been added to __slots__
added_slot_attrs = ["name"]
n_np = 5 # number of numpy array states
n_old = len(valid_slot_attrs) - len(state[n_np:])
......@@ -528,7 +539,7 @@ class AbstractNdarray(np.ndarray, AbstractObject):
for slot_index, setter in zip(slot_indices, setters):
cls.__slot_setters__[slot_index] = setter
def copy(self, *args, **kwargs):
def copy(self, **kwargs): # pylint: disable=arguments-differ
"""
The standard ndarray copy does not copy slots. Correct for this.
......@@ -555,11 +566,15 @@ class AbstractNdarray(np.ndarray, AbstractObject):
False
"""
if kwargs:
raise NotImplementedError(
"Copying with arguments {kwargs} not yet supported"
)
# works with __reduce__ / __setstate__
return deepcopy(self)
class Tensors(AbstractNdarray):
class Tensors(AbstractNdarray): # pylint: disable=too-many-public-methods
"""
Set of tensors with the same basis.
......@@ -646,12 +661,12 @@ class Tensors(AbstractNdarray):
__slot_defaults__ = ["cartesian"]
__slot_setters__ = [tfields.bases.get_coord_system_name]
def __new__(cls, tensors, **kwargs):
def __new__(cls, tensors, **kwargs): # pylint: disable=too-many-branches
dtype = kwargs.pop("dtype", None)
order = kwargs.pop("order", None)
dim = kwargs.pop("dim", None)
dim = kwargs.pop("dim", None) # pylint: disable=redefined-outer-name
""" copy constructor extracts the kwargs from tensors"""
# copy constructor extracts the kwargs from tensors
if issubclass(type(tensors), Tensors):
if dim is not None:
dim = tensors.dim
......@@ -669,15 +684,15 @@ class Tensors(AbstractNdarray):
else:
dtype = np.float64
""" demand iterable structure """
# demand iterable structure
try:
len(tensors)
except TypeError:
except TypeError as err:
raise TypeError(
"Iterable structure necessary." " Got {tensors}".format(**locals())
)
) from err
""" process empty inputs """
# process empty inputs
if len(tensors) == 0:
if issubclass(type(tensors), tfields.Tensors):
tensors = np.empty(tensors.shape, dtype=tensors.dtype)
......@@ -694,9 +709,9 @@ class Tensors(AbstractNdarray):
tensors = np.asarray(tensors, dtype=dtype, order=order)
obj = tensors.view(cls)
""" check dimension(s) """
for d in obj.shape[1:]:
if not d == obj.dim:
# check dimension(s)
for obj_dim in obj.shape[1:]:
if not obj_dim == obj.dim:
raise ValueError(
"Dimensions are inconstistent. "
"Manifold dimension is {obj.dim}. "
......@@ -711,10 +726,10 @@ class Tensors(AbstractNdarray):
" {dim} demanded.".format(**locals())
)
""" update kwargs with defaults from slots """
# update kwargs with defaults from slots
cls._update_slot_kwargs(kwargs)
""" set kwargs to slots attributes """
# set kwargs to slots attributes
for attr in kwargs:
if attr not in cls._iter_slots():
raise AttributeError(
......@@ -809,14 +824,14 @@ class Tensors(AbstractNdarray):
"""
""" get most frequent coord_sys or predefined coord_sys """
# get most frequent coord_sys or predefined coord_sys
coord_sys = kwargs.get("coord_sys", None)
return_templates = kwargs.pop("return_templates", False)
if coord_sys is None:
bases = []
for t in objects:
for tensors in objects:
try:
bases.append(t.coord_sys)
bases.append(tensors.coord_sys)
except AttributeError:
pass
if bases:
......@@ -827,22 +842,22 @@ class Tensors(AbstractNdarray):
default = cls.__slot_defaults__[cls.__slots__.index("coord_sys")]
kwargs["coord_sys"] = default
""" transform all raw inputs to cls type with correct coord_sys. Also
automatically make a copy of those instances that are of the correct
type already."""
# transform all raw inputs to cls type with correct coord_sys. Also
# automatically make a copy of those instances that are of the correct
# type already.
objects = [cls.__new__(cls, t, **kwargs) for t in objects]
""" check rank and dimension equality """
if not len(set([t.rank for t in objects])) == 1:
# check rank and dimension equality
if not len(set(t.rank for t in objects)) == 1:
raise TypeError("Tensors must have the same rank for merging.")
if not len(set([t.dim for t in objects])) == 1:
if not len(set(t.dim for t in objects)) == 1:
raise TypeError("Tensors must have the same dimension for merging.")
""" merge all objects """
remainingObjects = objects[1:] or []
# merge all objects
remaining_objects = objects[1:] or []
tensors = objects[0]
for i, obj in enumerate(remainingObjects):
for i, obj in enumerate(remaining_objects):
tensors = np.append(tensors, obj, axis=0)
if len(tensors) == 0 and not kwargs.get("dim", None):
......@@ -851,7 +866,7 @@ class Tensors(AbstractNdarray):
kwargs["dim"] = dim(objects[0])
inst = cls.__new__(cls, tensors, **kwargs)
if not return_templates:
if not return_templates: # pylint: disable=no-else-return
return inst
else:
tensor_lengths = [len(o) for o in objects]
......@@ -1030,14 +1045,16 @@ class Tensors(AbstractNdarray):
>>> assert t_cyl[0, 0] == 3
"""
# scalars empty already there
if self.rank == 0 or self.shape[0] == 0 or self.coord_sys == coord_sys:
self.coord_sys = coord_sys
# scalars empty already there
if (
self.rank == 0 or self.shape[0] == 0 or self.coord_sys == coord_sys
): # noqa: E501 pylint: disable=access-member-before-definition
self.coord_sys = coord_sys # pylint: disable=attribute-defined-outside-init
return
tfields.bases.transform(self, self.coord_sys, coord_sys)
# self[:] = tfields.bases.transform(self, self.coord_sys, coord_sys)
self.coord_sys = coord_sys
self.coord_sys = coord_sys # pylint: disable=attribute-defined-outside-init
@contextmanager
def tmp_transform(self, coord_sys):
......@@ -1057,15 +1074,15 @@ class Tensors(AbstractNdarray):
>>> assert p.coord_sys == tfields.bases.SPHERICAL
"""
baseBefore = self.coord_sys
if baseBefore == coord_sys:
base_before = self.coord_sys
if base_before == coord_sys:
yield
else:
self.transform(coord_sys)
yield
self.transform(baseBefore)
self.transform(base_before)
def mirror(self, coordinate, condition=None):
"""
......@@ -1102,15 +1119,15 @@ class Tensors(AbstractNdarray):
condition = np.array([True for i in range(len(self))])
elif isinstance(condition, sympy.Basic):
condition = self.evalf(condition)
if isinstance(coordinate, list) or isinstance(coordinate, tuple):
for c in coordinate:
self.mirror(c, condition=condition)
if isinstance(coordinate, (list, tuple)):
for coord in coordinate:
self.mirror(coord, condition=condition)
elif isinstance(coordinate, int):
self[:, coordinate][condition] *= -1
else:
raise TypeError()
def to_segment(
def to_segment( # pylint: disable=too-many-arguments
self,
segment,
num_segments,
......@@ -1162,7 +1179,9 @@ class Tensors(AbstractNdarray):
+ segment * periodicity / num_segments
)
def equal(self, other, rtol=None, atol=None, equal_nan=False, return_bool=True):
def equal(
self, other, rtol=None, atol=None, equal_nan=False, return_bool=True
): # noqa: E501 pylint: disable=too-many-arguments
"""
Evaluate, whether the instance has the same content as other.
......@@ -1176,23 +1195,25 @@ class Tensors(AbstractNdarray):
if issubclass(type(other), Tensors) and self.coord_sys != other.coord_sys:
other = other.copy()
other.transform(self.coord_sys)
x, y = np.asarray(self), np.asarray(other)
self_array, other_array = np.asarray(self), np.asarray(other)
if rtol is None and atol is None:
mask = x == y
mask = self_array == other_array
if equal_nan:
both_nan = np.isnan(x) & np.isnan(y)
both_nan = np.isnan(self_array) & np.isnan(other_array)
mask[both_nan] = both_nan[both_nan]
else:
if rtol is None:
rtol = 0.0
if atol is None:
atol = 0.0
mask = np.isclose(x, y, rtol=rtol, atol=atol, equal_nan=equal_nan)
mask = np.isclose(
self_array, other_array, rtol=rtol, atol=atol, equal_nan=equal_nan
)
if return_bool:
return bool(np.all(mask))
return mask
def contains(self, other, **kwargs):
def contains(self, other):
"""
Inspired by a speed argument @
stackoverflow.com/questions/14766194/testing-whether-a-numpy-array-contains-a-given-row
......@@ -1231,20 +1252,20 @@ class Tensors(AbstractNdarray):
array([1, 3])
"""
x, y = np.asarray(self), np.asarray(tensor)
self_array, other_array = np.asarray(self), np.asarray(tensor)
if rtol is None and atol is None:
equal_method = np.equal
else:
equal_method = lambda a, b: np.isclose(a, b, rtol=rtol, atol=atol) # NOQA
"""
inspired by https://stackoverflow.com/questions/19228295/\
find-ordered-vector-in-numpy-array
"""
# inspired by
# https://stackoverflow.com/questions/19228295/find-ordered-vector-in-numpy-array
if self.rank == 0:
indices = np.where(equal_method((x - y), 0))[0]
indices = np.where(equal_method((self_array - other_array), 0))[0]
elif self.rank == 1:
indices = np.where(np.all(equal_method((x - y), 0), axis=1))[0]
indices = np.where(
np.all(equal_method((self_array - other_array), 0), axis=1)
)[0]
else:
raise NotImplementedError()
return indices
......@@ -1318,7 +1339,9 @@ find-ordered-vector-in-numpy-array
"""
with other.tmp_transform(self.coord_sys):
# balanced_tree option gives huge speedup!
kd_tree = sp.spatial.cKDTree(other, 1000, balanced_tree=False)
kd_tree = scipy.spatial.cKDTree(
other, 1000, balanced_tree=False
) # noqa: E501 pylint: disable=no-member
res = kd_tree.query(self, **kwargs)
array = res[1]
......@@ -1390,22 +1413,15 @@ find-ordered-vector-in-numpy-array
points, extending the object it should cut. This becomes relevant for
Mesh3D when adding vertices at the edge of the cut is necessary.
"""
# TODO: in merged return np.empty() for tensors to save space.
# use tensors only when adding. I think this function already suits the
# needs
# Redirect fields
fields = []
if template.fields and issubclass(type(self), TensorFields):
template_field = np.array(template.fields[0])
if len(self) > 0:
"""
if new vertices have been created in the template, it is
in principle unclear what fields we have to refer to.
Thus in creating the template, we gave np.nan.
To make it fast, we replace nan with 0 as a dummy and correct
the field entries afterwards with np.nan.
"""
# if new vertices have been created in the template, it is in principle unclear
# what fields we have to refer to. Thus in creating the template, we gave np.nan.
# To make it fast, we replace nan with 0 as a dummy and correct the field entries
# afterwards with np.nan.
nan_mask = np.isnan(template_field)
template_field[nan_mask] = 0 # dummy reference to index 0.
template_field = template_field.astype(int)
......@@ -1491,7 +1507,7 @@ find-ordered-vector-in-numpy-array
Args:
other(Iterable)
**kwargs:
... is forwarded to sp.spatial.distance.cdist
... is forwarded to scipy.spatial.distance.cdist
Examples:
>>> import tfields
......@@ -1510,7 +1526,7 @@ find-ordered-vector-in-numpy-array
if issubclass(type(other), Tensors) and self.coord_sys != other.coord_sys:
other = other.copy()
other.transform(self.coord_sys)
return sp.spatial.distance.cdist(self, other, **kwargs)
return scipy.spatial.distance.cdist(self, other, **kwargs)