Commit 320ab0c7 authored by theos's avatar theos
Browse files

Removed 'np'-datamodel from point_space, rg_space, los_response and power_operator.

parent 2d8eee09
...@@ -818,7 +818,7 @@ class point_space(space): ...@@ -818,7 +818,7 @@ class point_space(space):
"WARNING: incompatible dtype: " + str(dtype))) "WARNING: incompatible dtype: " + str(dtype)))
self.dtype = dtype self.dtype = dtype
if datamodel not in ['np'] + POINT_DISTRIBUTION_STRATEGIES: if datamodel not in POINT_DISTRIBUTION_STRATEGIES:
about._errors.cstring("WARNING: datamodel set to default.") about._errors.cstring("WARNING: datamodel set to default.")
self.datamodel = \ self.datamodel = \
gc['default_distribution_strategy'] gc['default_distribution_strategy']
...@@ -890,24 +890,7 @@ class point_space(space): ...@@ -890,24 +890,7 @@ class point_space(space):
data[key] = update data[key] = update
def apply_scalar_function(self, x, function, inplace=False): def apply_scalar_function(self, x, function, inplace=False):
if self.datamodel == 'np': return x.apply_scalar_function(function, inplace=inplace)
if not inplace:
try:
return function(x)
except:
return np.vectorize(function)(x)
else:
try:
x[:] = function(x)
except:
x[:] = np.vectorize(function)(x)
return x
elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES:
return x.apply_scalar_function(function, inplace=inplace)
else:
raise NotImplementedError(about._errors.cstring(
"ERROR: function is not implemented for given datamodel."))
def unary_operation(self, x, op='None', **kwargs): def unary_operation(self, x, op='None', **kwargs):
""" """
...@@ -915,88 +898,36 @@ class point_space(space): ...@@ -915,88 +898,36 @@ class point_space(space):
Valid operations are Valid operations are
""" """
if self.datamodel == 'np': translation = {'pos': lambda y: getattr(y, '__pos__')(),
def _argmin(z, **kwargs): 'neg': lambda y: getattr(y, '__neg__')(),
ind = np.argmin(z, **kwargs) 'abs': lambda y: getattr(y, '__abs__')(),
if np.isscalar(ind): 'real': lambda y: getattr(y, 'real'),
ind = np.unravel_index(ind, z.shape, order='C') 'imag': lambda y: getattr(y, 'imag'),
if(len(ind) == 1): 'nanmin': lambda y: getattr(y, 'nanmin')(),
return ind[0] 'amin': lambda y: getattr(y, 'amin')(),
return ind 'nanmax': lambda y: getattr(y, 'nanmax')(),
'amax': lambda y: getattr(y, 'amax')(),
def _argmax(z, **kwargs): 'median': lambda y: getattr(y, 'median')(),
ind = np.argmax(z, **kwargs) 'mean': lambda y: getattr(y, 'mean')(),
if np.isscalar(ind): 'std': lambda y: getattr(y, 'std')(),
ind = np.unravel_index(ind, z.shape, order='C') 'var': lambda y: getattr(y, 'var')(),
if(len(ind) == 1): 'argmin': lambda y: getattr(y, 'argmin_nonflat')(),
return ind[0] 'argmin_flat': lambda y: getattr(y, 'argmin')(),
return ind 'argmax': lambda y: getattr(y, 'argmax_nonflat')(),
'argmax_flat': lambda y: getattr(y, 'argmax')(),
translation = {'pos': lambda y: getattr(y, '__pos__')(), 'conjugate': lambda y: getattr(y, 'conjugate')(),
'neg': lambda y: getattr(y, '__neg__')(), 'sum': lambda y: getattr(y, 'sum')(),
'abs': lambda y: getattr(y, '__abs__')(), 'prod': lambda y: getattr(y, 'prod')(),
'real': lambda y: getattr(y, 'real'), 'unique': lambda y: getattr(y, 'unique')(),
'imag': lambda y: getattr(y, 'imag'), 'copy': lambda y: getattr(y, 'copy')(),
'nanmin': np.nanmin, 'copy_empty': lambda y: getattr(y, 'copy_empty')(),
'amin': np.amin, 'isnan': lambda y: getattr(y, 'isnan')(),
'nanmax': np.nanmax, 'isinf': lambda y: getattr(y, 'isinf')(),
'amax': np.amax, 'isfinite': lambda y: getattr(y, 'isfinite')(),
'median': np.median, 'nan_to_num': lambda y: getattr(y, 'nan_to_num')(),
'mean': np.mean, 'all': lambda y: getattr(y, 'all')(),
'std': np.std, 'any': lambda y: getattr(y, 'any')(),
'var': np.var, 'None': lambda y: y}
'argmin': _argmin,
'argmin_flat': np.argmin,
'argmax': _argmax,
'argmax_flat': np.argmax,
'conjugate': np.conjugate,
'sum': np.sum,
'prod': np.prod,
'unique': np.unique,
'copy': np.copy,
'copy_empty': np.empty_like,
'isnan': np.isnan,
'isinf': np.isinf,
'isfinite': np.isfinite,
'nan_to_num': np.nan_to_num,
'all': np.all,
'any': np.any,
'None': lambda y: y}
elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES:
translation = {'pos': lambda y: getattr(y, '__pos__')(),
'neg': lambda y: getattr(y, '__neg__')(),
'abs': lambda y: getattr(y, '__abs__')(),
'real': lambda y: getattr(y, 'real'),
'imag': lambda y: getattr(y, 'imag'),
'nanmin': lambda y: getattr(y, 'nanmin')(),
'amin': lambda y: getattr(y, 'amin')(),
'nanmax': lambda y: getattr(y, 'nanmax')(),
'amax': lambda y: getattr(y, 'amax')(),
'median': lambda y: getattr(y, 'median')(),
'mean': lambda y: getattr(y, 'mean')(),
'std': lambda y: getattr(y, 'std')(),
'var': lambda y: getattr(y, 'var')(),
'argmin': lambda y: getattr(y, 'argmin_nonflat')(),
'argmin_flat': lambda y: getattr(y, 'argmin')(),
'argmax': lambda y: getattr(y, 'argmax_nonflat')(),
'argmax_flat': lambda y: getattr(y, 'argmax')(),
'conjugate': lambda y: getattr(y, 'conjugate')(),
'sum': lambda y: getattr(y, 'sum')(),
'prod': lambda y: getattr(y, 'prod')(),
'unique': lambda y: getattr(y, 'unique')(),
'copy': lambda y: getattr(y, 'copy')(),
'copy_empty': lambda y: getattr(y, 'copy_empty')(),
'isnan': lambda y: getattr(y, 'isnan')(),
'isinf': lambda y: getattr(y, 'isinf')(),
'isfinite': lambda y: getattr(y, 'isfinite')(),
'nan_to_num': lambda y: getattr(y, 'nan_to_num')(),
'all': lambda y: getattr(y, 'all')(),
'any': lambda y: getattr(y, 'any')(),
'None': lambda y: y}
else:
raise NotImplementedError(about._errors.cstring(
"ERROR: function is not implemented for given datamodel."))
return translation[op](x, **kwargs) return translation[op](x, **kwargs)
...@@ -1121,17 +1052,11 @@ class point_space(space): ...@@ -1121,17 +1052,11 @@ class point_space(space):
dtype=dtype, dtype=dtype,
**kwargs) **kwargs)
if self.datamodel in POINT_DISTRIBUTION_STRATEGIES: else:
return self._cast_to_d2o(x=x, return self._cast_to_d2o(x=x,
dtype=dtype, dtype=dtype,
**kwargs) **kwargs)
elif self.datamodel == 'np':
return self._cast_to_np(x=x,
dtype=dtype,
**kwargs)
else:
raise NotImplementedError(about._errors.cstring(
"ERROR: function is not implemented for given datamodel."))
def _cast_to_d2o(self, x, dtype=None, **kwargs): def _cast_to_d2o(self, x, dtype=None, **kwargs):
""" """
...@@ -1204,8 +1129,8 @@ class point_space(space): ...@@ -1204,8 +1129,8 @@ class point_space(space):
if to_copy: if to_copy:
temp = x.copy_empty(dtype=dtype, temp = x.copy_empty(dtype=dtype,
distribution_strategy=self.datamodel) distribution_strategy=self.datamodel)
temp.set_data(to_key=(slice(None),), temp.set_data(to_key=(slice(None),),
data=x, data=x,
from_key=(slice(None),)) from_key=(slice(None),))
temp.hermitian = x.hermitian temp.hermitian = x.hermitian
x = temp x = temp
...@@ -1222,89 +1147,6 @@ class point_space(space): ...@@ -1222,89 +1147,6 @@ class point_space(space):
# Cast the d2o # Cast the d2o
return self.cast(x, dtype=dtype) return self.cast(x, dtype=dtype)
def _cast_to_np(self, x, dtype=None, **kwargs):
"""
Computes valid field values from a given object, trying
to translate the given data into a valid form. Thereby it is as
benevolent as possible.
Parameters
----------
x : {float, numpy.ndarray, nifty.field}
Object to be transformed into an array of valid field values.
Returns
-------
x : numpy.ndarray, distributed_data_object
Array containing the field values, which are compatible to the
space.
Other parameters
----------------
verbose : bool, *optional*
Whether the method should raise a warning if information is
lost during casting (default: False).
"""
if dtype is None:
dtype = self.dtype
# Case 1: x is a distributed_data_object
if isinstance(x, distributed_data_object):
# Extract the data
temp = x.get_full_data()
# Cast the resulting numpy array again
return self._cast_to_np(temp,
dtype=dtype,
**kwargs)
# Case 2: x is a distributed_data_object
elif isinstance(x, np.ndarray):
# Check the shape
if np.any(np.array(x.shape) != np.array(self.get_shape())):
# Check if at least the number of degrees of freedom is equal
if x.size == self.get_dim():
# If the number of dof is equal or 1, use np.reshape...
temp = x.reshape(self.get_shape())
# ... and cast again
return self._cast_to_np(temp,
dtype=dtype,
**kwargs)
elif x.size == 1:
temp = np.empty(shape=self.get_shape(),
dtype=dtype)
temp[:] = x
return self._cast_to_np(temp,
dtype=dtype,
**kwargs)
else:
raise ValueError(about._errors.cstring(
"ERROR: Data has incompatible shape!"))
# Check the dtype
if x.dtype != dtype:
if x.dtype > dtype:
about.warnings.cflush(
"WARNING: Datatypes are of conflicting precision " +
" (own: " + str(dtype) + " <> foreign: " +
str(x.dtype) + ") and will be casted! Potential " +
"loss of precision!\n")
# Fix the datatype...
temp = x.astype(dtype)
# ... and cast again
return self._cast_to_np(temp,
dtype=dtype,
**kwargs)
return x
# Case 3: x is something else
# Use general numpy casting
else:
temp = np.empty(self.get_shape(), dtype=dtype)
if x is not None:
temp[:] = x
return self._cast_to_np(temp, dtype=dtype)
def enforce_power(self, spec, **kwargs): def enforce_power(self, spec, **kwargs):
""" """
Raises an error since the power spectrum is ill-defined for point Raises an error since the power spectrum is ill-defined for point
...@@ -1496,67 +1338,44 @@ class point_space(space): ...@@ -1496,67 +1338,44 @@ class point_space(space):
if arg is None: if arg is None:
return self.cast(0) return self.cast(0)
if self.datamodel == 'np': # Prepare the empty distributed_data_object
if arg['random'] == "pm1": sample = distributed_data_object(
x = random.pm1(dtype=self.dtype, global_shape=self.get_shape(),
shape=self.get_shape()) dtype=self.dtype,
elif arg['random'] == "gau": distribution_strategy=self.datamodel)
x = random.gau(dtype=self.dtype,
shape=self.get_shape(), # Case 1: uniform distribution over {-1,+1}/{1,i,-1,-i}
mean=arg['mean'], if arg['random'] == 'pm1':
std=arg['std']) sample.apply_generator(lambda s: random.pm1(dtype=self.dtype,
elif arg['random'] == "uni": shape=s))
x = random.uni(dtype=self.dtype,
shape=self.get_shape(), # Case 2: normal distribution with zero-mean and a given standard
vmin=arg['vmin'], # deviation or variance
vmax=arg['vmax']) elif arg['random'] == 'gau':
std = arg['std']
if np.isscalar(std) or std is None:
processed_std = std
else: else:
raise KeyError(about._errors.cstring( try:
"ERROR: unsupported random key '" + processed_std = sample.distributor.\
str(arg['random']) + "'.")) extract_local_data(std)
return x except(AttributeError):
processed_std = std
elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES: sample.apply_generator(lambda s: random.gau(dtype=self.dtype,
# Prepare the empty distributed_data_object shape=s,
sample = distributed_data_object( mean=arg['mean'],
global_shape=self.get_shape(), std=processed_std))
dtype=self.dtype,
distribution_strategy=self.datamodel)
# Case 1: uniform distribution over {-1,+1}/{1,i,-1,-i} # Case 3: uniform distribution
if arg['random'] == 'pm1': elif arg['random'] == 'uni':
sample.apply_generator(lambda s: random.pm1(dtype=self.dtype, sample.apply_generator(lambda s: random.uni(dtype=self.dtype,
shape=s)) shape=s,
vmin=arg['vmin'],
vmax=arg['vmax']))
return sample
# Case 2: normal distribution with zero-mean and a given standard
# deviation or variance
elif arg['random'] == 'gau':
std = arg['std']
if np.isscalar(std) or std is None:
processed_std = std
else:
try:
processed_std = sample.distributor.\
extract_local_data(std)
except(AttributeError):
processed_std = std
sample.apply_generator(lambda s: random.gau(dtype=self.dtype,
shape=s,
mean=arg['mean'],
std=processed_std))
# Case 3: uniform distribution
elif arg['random'] == 'uni':
sample.apply_generator(lambda s: random.uni(dtype=self.dtype,
shape=s,
vmin=arg['vmin'],
vmax=arg['vmax']))
return sample
else:
raise NotImplementedError(about._errors.cstring(
"ERROR: function is not implemented for given datamodel."))
def calc_weight(self, x, power=1): def calc_weight(self, x, power=1):
""" """
...@@ -1631,16 +1450,11 @@ class point_space(space): ...@@ -1631,16 +1450,11 @@ class point_space(space):
x = self.cast(x) x = self.cast(x)
y = self.cast(y) y = self.cast(y)
if self.datamodel == 'np': result = x.vdot(y)
result = np.vdot(x, y)
elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES:
result = x.vdot(y)
else:
raise NotImplementedError(about._errors.cstring(
"ERROR: function is not implemented for given datamodel."))
if np.isreal(result): if np.isreal(result):
result = np.asscalar(np.real(result)) result = np.asscalar(np.real(result))
return result return result
def calc_transform(self, x, codomain=None, **kwargs): def calc_transform(self, x, codomain=None, **kwargs):
...@@ -1699,27 +1513,14 @@ class point_space(space): ...@@ -1699,27 +1513,14 @@ class point_space(space):
except AttributeError: except AttributeError:
complex_weights_Q = False complex_weights_Q = False
if self.datamodel == 'np': if complex_weights_Q:
if complex_weights_Q: real_bincount = x.bincount(weights=weights.real,
real_bincount = np.bincount(x, weights=weights.real, minlength=minlength)
minlength=minlength) imag_bincount = x.bincount(weights=weights.imag,
imag_bincount = np.bincount(x, weights=weights.imag, minlength=minlength)
minlength=minlength) return real_bincount + imag_bincount
return real_bincount + imag_bincount
else:
return np.bincount(x, weights=weights, minlength=minlength)
elif self.datamodel in POINT_DISTRIBUTION_STRATEGIES:
if complex_weights_Q:
real_bincount = x.bincount(weights=weights.real,
minlength=minlength)
imag_bincount = x.bincount(weights=weights.imag,
minlength=minlength)
return real_bincount + imag_bincount
else:
return x.bincount(weights=weights, minlength=minlength)
else: else:
raise NotImplementedError(about._errors.cstring( return x.bincount(weights=weights, minlength=minlength)
"ERROR: function is not implemented for given datamodel."))
def get_plot(self, x, title="", vmin=None, vmax=None, unit=None, def get_plot(self, x, title="", vmin=None, vmax=None, unit=None,
norm=None, other=None, legend=False, save=None, **kwargs): norm=None, other=None, legend=False, save=None, **kwargs):
......
...@@ -197,9 +197,7 @@ class los_response(operator): ...@@ -197,9 +197,7 @@ class los_response(operator):
return (sigmas_low_coords, sigmas_up_coords) return (sigmas_low_coords, sigmas_up_coords)
def _get_local_zero_point(self): def _get_local_zero_point(self):
if self.domain.datamodel == 'np': if self.domain.datamodel in STRATEGIES['not']:
return self.zero_point
elif self.domain.datamodel in STRATEGIES['not']:
return self.zero_point return self.zero_point
elif self.domain.datamodel in STRATEGIES['slicing']: elif self.domain.datamodel in STRATEGIES['slicing']:
dummy_d2o = distributed_data_object( dummy_d2o = distributed_data_object(
...@@ -219,9 +217,7 @@ class los_response(operator): ...@@ -219,9 +217,7 @@ class los_response(operator):
str(self.domain.datamodel))) str(self.domain.datamodel)))
def _init_local_shape(self): def _init_local_shape(self):
if self.domain.datamodel == 'np': if self.domain.datamodel in STRATEGIES['not']:
return self.domain.get_shape()
elif self.domain.datamodel in STRATEGIES['not']:
return self.domain.get_shape() return self.domain.get_shape()
elif self.domain.datamodel in STRATEGIES['slicing']: elif self.domain.datamodel in STRATEGIES['slicing']:
dummy_d2o = distributed_data_object( dummy_d2o = distributed_data_object(
...@@ -285,9 +281,7 @@ class los_response(operator): ...@@ -285,9 +281,7 @@ class los_response(operator):
return result_field return result_field
def _multiply_preprocessing(self, input_field): def _multiply_preprocessing(self, input_field):
if self.domain.datamodel == 'np': if self.domain.datamodel in STRATEGIES['not']:
local_input_data = input_field.val
elif self.domain.datamodel in STRATEGIES['not']:
local_input_data = input_field.val.data local_input_data = input_field.val.data
elif self.domain.datamodel in STRATEGIES['slicing']: elif self.domain.datamodel in STRATEGIES['slicing']:
extractor = self._extractor_d2o.distributor.extract_local_data extractor = self._extractor_d2o.distributor.extract_local_data
...@@ -295,9 +289,7 @@ class los_response(operator): ...@@ -295,9 +289,7 @@ class los_response(operator):
return local_input_data return local_input_data
def _multiply_postprocessing(self, local_result): def _multiply_postprocessing(self, local_result):
if self.domain.datamodel == 'np': if self.domain.datamodel in STRATEGIES['not']:
global_result = local_result
elif self.domain.datamodel in STRATEGIES['not']:
global_result = local_result global_result = local_result
elif self.domain.datamodel in STRATEGIES['slicing']: elif self.domain.datamodel in STRATEGIES['slicing']:
global_result = np.empty_like(local_result) global_result = np.empty_like(local_result)
......
...@@ -1904,15 +1904,9 @@ class power_operator(diagonal_operator): ...@@ -1904,15 +1904,9 @@ class power_operator(diagonal_operator):
diag = pindex.apply_scalar_function(lambda x: temp_spec[x], diag = pindex.apply_scalar_function(lambda x: temp_spec[x],
dtype=temp_spec.dtype.type) dtype=temp_spec.dtype.type)
diag.hermitian = True diag.hermitian = True
except(AttributeError): # TODO: update all pindices to d2o's except(AttributeError):
diag = temp_spec[pindex] diag = temp_spec[pindex]
if self.domain.datamodel == 'np':
try:
diag = diag.get_full_data()
except(AttributeError):
pass
# Weight if necessary # Weight if necessary
if not self.domain.discrete and bare: if not self.domain.discrete and bare:
self.val = self.domain.calc_weight(diag, power=1) self.val = self.domain.calc_weight(diag, power=1)
......
...@@ -627,13 +627,7 @@ class rg_space(point_space): ...@@ -627,13 +627,7 @@ class rg_space(point_space):
def temp_erf(x): def temp_erf(x):
return erf(x / np.sqrt(2)) return erf(x / np.sqrt(2))
if self.datamodel == 'np': sample.apply_scalar_function(function=temp_erf, inplace=True)
sample = temp_erf(sample)
elif self.datamodel in RG_DISTRIBUTION_STRATEGIES:
sample.apply_scalar_function(function=temp_erf, inplace=True)
else:
raise NotImplementedError(about._errors.cstring(
"ERROR: function is not implemented for given datamodel."))
# Shift and stretch the uniform distribution into the given limits # Shift and stretch the uniform distribution into the given limits
# sample = (sample + 1)/2 * (vmax-vmin) + vmin # sample = (sample + 1)/2 * (vmax-vmin) + vmin
...@@ -722,20 +716,13 @@ class rg_space(point_space): ...@@ -722,20 +716,13 @@ class rg_space(point_space):
std=1) std=1)
# apply the powerspectrum renormalization # apply the powerspectrum renormalization
if self.datamodel == 'np': # extract the local data from kdict
rescaler = np.sqrt(spec[np.searchsorted(kindex, kdict)]) local_kdict = kdict.get_local_data()
sample *= rescaler rescaler = np.sqrt(
elif self.datamodel in RG_DISTRIBUTION_STRATEGIES: spec[np.searchsorted(kindex, local_kdict)])
# extract the local data from kdict sample.apply_scalar_function(lambda x: x * rescaler,
local_kdict = kdict.get_local_data() inplace=True)
rescaler = np.sqrt(
spec[np.searchsorted(kindex, local_kdict)])
sample.apply_scalar_function(lambda x: x * rescaler,
inplace=True)
else:
raise NotImplementedError(about._errors.cstring(
"ERROR: function is not implemented for given " +
"datamodel."))
# Case 2: self is a position space # Case 2: self is a position space
else: else:
# get a suitable codomain # get a suitable codomain
...@@ -829,13 +816,7 @@ class rg_space(point_space): ...@@ -829,13 +816,7 @@ class rg_space(point_space):
x = self.cast(x) x = self.cast(x)
y = self.cast(y) y = self.cast(y)