Commit 5d23df47 authored by Theo Steininger's avatar Theo Steininger
Browse files

Merge branch 'master' into plotting

parents 56d76cec e5f51246
......@@ -99,7 +99,7 @@ Starting with a fresh Ubuntu installation move to a folder like
- Install pyHealpix:
git clone https://gitlab.mpcdf.mpg.de/mtr/pyHealpix.git
git clone https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
cd pyHealpix
autoreconf -i && ./configure && sudo make install
cd ..
......@@ -133,7 +133,7 @@ your local user directory.
For pyHealpix, use:
git clone https://gitlab.mpcdf.mpg.de/mtr/pyHealpix.git
git clone https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
cd pyHealpix
autoreconf -i && ./configure --prefix=$HOME/.local && make install
cd ..
......@@ -154,7 +154,7 @@ may cause trouble.
- Install pyHealpix:
git clone https://gitlab.mpcdf.mpg.de/mtr/pyHealpix.git
git clone https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
cd pyHealpix
autoreconf -i && ./configure && sudo make install
cd ..
......
#!/bin/bash
git clone -b mpi https://github.com/fredRos/pyFFTW.git
git clone -b mpi https://github.com/ultimanet/pyFFTW.git
cd pyFFTW/
CC=mpicc python setup.py build_ext install
cd ..
......
......@@ -10,10 +10,10 @@ rank = comm.rank
if __name__ == "__main__":
distribution_strategy = 'fftw'
distribution_strategy = 'not'
# Setting up the geometry
s_space = RGSpace([512, 512], dtype=np.float64)
s_space = RGSpace([512, 512])
fft = FFTOperator(s_space)
h_space = fft.target[0]
p_space = PowerSpace(h_space, distribution_strategy=distribution_strategy)
......@@ -54,4 +54,3 @@ if __name__ == "__main__":
# pl.plot([go.Heatmap(z=d_data)], filename='data.html')
# pl.plot([go.Heatmap(z=m_data)], filename='map.html')
# pl.plot([go.Heatmap(z=ss_data)], filename='map_orig.html')
#
......@@ -53,10 +53,10 @@ class WienerFilterEnergy(Energy):
if __name__ == "__main__":
distribution_strategy = 'fftw'
distribution_strategy = 'not'
# Set up spaces and fft transformation
s_space = RGSpace([512, 512], dtype=np.float)
s_space = RGSpace([512, 512])
fft = FFTOperator(s_space)
h_space = fft.target[0]
p_space = PowerSpace(h_space, distribution_strategy=distribution_strategy)
......
......@@ -18,8 +18,6 @@
import abc
import numpy as np
from keepers import Loggable,\
Versionable
......@@ -27,9 +25,9 @@ from keepers import Loggable,\
class DomainObject(Versionable, Loggable, object):
__metaclass__ = abc.ABCMeta
def __init__(self, dtype):
self._dtype = np.dtype(dtype)
self._ignore_for_hash = []
def __init__(self):
# _global_id is used in the Versioning module from keepers
self._ignore_for_hash = ['_global_id']
def __hash__(self):
# Extract the identifying parts from the vars(self) dict.
......@@ -43,17 +41,20 @@ class DomainObject(Versionable, Loggable, object):
def __eq__(self, x):
if isinstance(x, type(self)):
return hash(self) == hash(x)
for key in vars(self).keys():
item1 = vars(self)[key]
if key in self._ignore_for_hash or key == '_ignore_for_hash':
continue
item2 = vars(x)[key]
if item1 != item2:
return False
return True
else:
return False
def __ne__(self, x):
return not self.__eq__(x)
@property
def dtype(self):
return self._dtype
@abc.abstractproperty
def shape(self):
raise NotImplementedError(
......@@ -78,10 +79,9 @@ class DomainObject(Versionable, Loggable, object):
# ---Serialization---
def _to_hdf5(self, hdf5_group):
hdf5_group.attrs['dtype'] = self.dtype.name
return None
@classmethod
def _from_hdf5(cls, hdf5_group, repository):
result = cls(dtype=np.dtype(hdf5_group.attrs['dtype']))
result = cls()
return result
......@@ -45,8 +45,7 @@ class Field(Loggable, Versionable, object):
self.domain_axes = self._get_axes_tuple(self.domain)
self.dtype = self._infer_dtype(dtype=dtype,
val=val,
domain=self.domain)
val=val)
self.distribution_strategy = self._parse_distribution_strategy(
distribution_strategy=distribution_strategy,
......@@ -86,18 +85,19 @@ class Field(Loggable, Versionable, object):
axes_list += [tuple(l)]
return tuple(axes_list)
def _infer_dtype(self, dtype, val, domain):
def _infer_dtype(self, dtype, val):
if dtype is None:
if isinstance(val, Field) or \
isinstance(val, distributed_data_object):
try:
dtype = val.dtype
dtype_tuple = (np.dtype(gc['default_field_dtype']),)
except AttributeError:
try:
if val is None:
raise TypeError
dtype = np.result_type(val)
except(TypeError):
dtype = np.dtype(gc['default_field_dtype'])
else:
dtype_tuple = (np.dtype(dtype),)
if domain is not None:
dtype_tuple += tuple(np.dtype(sp.dtype) for sp in domain)
dtype = reduce(lambda x, y: np.result_type(x, y), dtype_tuple)
dtype = np.dtype(dtype)
return dtype
......@@ -210,16 +210,10 @@ class Field(Loggable, Versionable, object):
self.val.get_axes_local_distribution_strategy(
self.domain_axes[space_index])
if real_signal:
power_dtype = np.dtype('complex')
else:
power_dtype = np.dtype('float')
harmonic_domain = self.domain[space_index]
power_domain = PowerSpace(harmonic_domain=harmonic_domain,
distribution_strategy=distribution_strategy,
log=log, nbin=nbin, binbounds=binbounds,
dtype=power_dtype)
log=log, nbin=nbin, binbounds=binbounds)
# extract pindex and rho from power_domain
pindex = power_domain.pindex
......@@ -251,8 +245,14 @@ class Field(Loggable, Versionable, object):
result_domain = list(self.domain)
result_domain[space_index] = power_domain
if real_signal:
result_dtype = np.complex
else:
result_dtype = np.float
result_field = self.copy_empty(
domain=result_domain,
dtype=result_dtype,
distribution_strategy=power_spectrum.distribution_strategy)
result_field.set_val(new_val=power_spectrum, copy=False)
......@@ -303,59 +303,41 @@ class Field(Loggable, Versionable, object):
return result_obj
def power_synthesize(self, spaces=None, real_signal=True,
def power_synthesize(self, spaces=None, real_power=True, real_signal=True,
mean=None, std=None):
# check if all spaces in `self.domain` are either of signal-type or
# power_space instances
for sp in self.domain:
if not sp.harmonic and not isinstance(sp, PowerSpace):
self.logger.info(
"Field has a space in `domain` which is neither "
"harmonic nor a PowerSpace.")
# check if the `spaces` input is valid
spaces = utilities.cast_axis_to_tuple(spaces, len(self.domain))
if spaces is None:
if len(self.domain) == 1:
spaces = (0,)
else:
raise ValueError(
"Field has multiple spaces as domain "
"but `spaces` is None.")
if len(spaces) == 0:
raise ValueError(
"No space for synthesis specified.")
elif len(spaces) > 1:
raise ValueError(
"Conversion of only one space at a time is allowed.")
if spaces is None:
spaces = range(len(self.domain))
power_space_index = spaces[0]
power_domain = self.domain[power_space_index]
if not isinstance(power_domain, PowerSpace):
raise ValueError(
"A PowerSpace is needed for field synthetization.")
for power_space_index in spaces:
power_space = self.domain[power_space_index]
if not isinstance(power_space, PowerSpace):
raise ValueError("A PowerSpace is needed for field "
"synthetization.")
# create the result domain
result_domain = list(self.domain)
harmonic_domain = power_domain.harmonic_domain
result_domain[power_space_index] = harmonic_domain
for power_space_index in spaces:
power_space = self.domain[power_space_index]
harmonic_domain = power_space.harmonic_domain
result_domain[power_space_index] = harmonic_domain
# create random samples: one or two, depending on whether the
# power spectrum is real or complex
if issubclass(power_domain.dtype.type, np.complexfloating):
result_list = [None, None]
else:
if real_power:
result_list = [None]
else:
result_list = [None, None]
result_list = [self.__class__.from_random(
'normal',
mean=mean,
std=std,
domain=result_domain,
dtype=harmonic_domain.dtype,
dtype=np.complex,
distribution_strategy=self.distribution_strategy)
for x in result_list]
......@@ -363,18 +345,51 @@ class Field(Loggable, Versionable, object):
# processing without killing the fields.
# if the signal-space field should be real, hermitianize the field
# components
spec = self.val.get_full_data()
for power_space_index in spaces:
spec = self._spec_to_rescaler(spec, result_list, power_space_index)
local_rescaler = spec
result_val_list = [x.val for x in result_list]
# apply the rescaler to the random fields
result_val_list[0].apply_scalar_function(
lambda x: x * local_rescaler.real,
inplace=True)
if not real_power:
result_val_list[1].apply_scalar_function(
lambda x: x * local_rescaler.imag,
inplace=True)
if real_signal:
result_val_list = [harmonic_domain.hermitian_decomposition(
x.val,
axes=x.domain_axes[power_space_index],
for power_space_index in spaces:
harmonic_domain = result_domain[power_space_index]
result_val_list = [harmonic_domain.hermitian_decomposition(
result_val,
axes=result.domain_axes[power_space_index],
preserve_gaussian_variance=True)[0]
for x in result_list]
for (result, result_val)
in zip(result_list, result_val_list)]
# store the result into the fields
[x.set_val(new_val=y, copy=False) for x, y in
zip(result_list, result_val_list)]
if real_power:
result = result_list[0]
else:
result_val_list = [x.val for x in result_list]
result = result_list[0] + 1j*result_list[1]
return result
def _spec_to_rescaler(self, spec, result_list, power_space_index):
power_space = self.domain[power_space_index]
# weight the random fields with the power spectrum
# therefore get the pindex from the power space
pindex = power_domain.pindex
pindex = power_space.pindex
# take the local data from pindex. This data must be compatible to the
# local data of the field given the slice of the PowerSpace
local_distribution_strategy = \
......@@ -390,34 +405,12 @@ class Field(Loggable, Versionable, object):
# power spectrum into the appropriate places of the pindex array.
# Do this for every 'pindex-slice' in parallel using the 'slice(None)'s
local_pindex = pindex.get_local_data(copy=False)
full_spec = self.val.get_full_data()
local_blow_up = [slice(None)]*len(self.shape)
local_blow_up[self.domain_axes[power_space_index][0]] = local_pindex
# here, the power_spectrum is distributed into the new shape
local_rescaler = full_spec[local_blow_up]
# apply the rescaler to the random fields
result_val_list[0].apply_scalar_function(
lambda x: x * local_rescaler.real,
inplace=True)
if issubclass(power_domain.dtype.type, np.complexfloating):
result_val_list[1].apply_scalar_function(
lambda x: x * local_rescaler.imag,
inplace=True)
# store the result into the fields
[x.set_val(new_val=y, copy=False) for x, y in
zip(result_list, result_val_list)]
if issubclass(power_domain.dtype.type, np.complexfloating):
result = result_list[0] + 1j*result_list[1]
else:
result = result_list[0]
return result
local_rescaler = spec[local_blow_up]
return local_rescaler
# ---Properties---
......
......@@ -16,20 +16,18 @@
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import numpy as np
from field_type import FieldType
class FieldArray(FieldType):
def __init__(self, shape, dtype=np.float):
def __init__(self, shape):
try:
new_shape = tuple([int(i) for i in shape])
except TypeError:
new_shape = (int(shape), )
self._shape = new_shape
super(FieldArray, self).__init__(dtype=dtype)
super(FieldArray, self).__init__()
@property
def shape(self):
......@@ -43,14 +41,9 @@ class FieldArray(FieldType):
def _to_hdf5(self, hdf5_group):
hdf5_group['shape'] = self.shape
hdf5_group.attrs['dtype'] = self.dtype.name
return None
@classmethod
def _from_hdf5(cls, hdf5_group, loopback_get):
result = cls(
shape=hdf5_group['shape'][:],
dtype=np.dtype(hdf5_group.attrs['dtype'])
)
result = cls(shape=hdf5_group['shape'][:])
return result
......@@ -19,7 +19,6 @@
import numpy as np
from itertools import product
def get_slice_list(shape, axes):
"""
Helper function which generates slice list(s) to traverse over all
......@@ -66,174 +65,6 @@ def get_slice_list(shape, axes):
return
def hermitianize_gaussian(x, axes=None):
# make the point inversions
flipped_x = _hermitianize_inverter(x, axes=axes)
flipped_x = flipped_x.conjugate()
# check if x was already hermitian
if (x == flipped_x).all():
return x
# average x and flipped_x.
# Correct the variance by multiplying sqrt(0.5)
x = (x + flipped_x) * np.sqrt(0.5)
# The fixed points of the point inversion must not be avaraged.
# Hence one must multiply them again with sqrt(0.5)
# -> Get the middle index of the array
mid_index = np.array(x.shape, dtype=np.int) // 2
dimensions = mid_index.size
# Use ndindex to iterate over all combinations of zeros and the
# mid_index in order to correct all fixed points.
if axes is None:
axes = xrange(dimensions)
ndlist = [2 if i in axes else 1 for i in xrange(dimensions)]
ndlist = tuple(ndlist)
for i in np.ndindex(ndlist):
temp_index = tuple(i * mid_index)
x[temp_index] *= np.sqrt(0.5)
try:
x.hermitian = True
except(AttributeError):
pass
return x
def hermitianize(x, axes=None):
# make the point inversions
flipped_x = _hermitianize_inverter(x, axes=axes)
flipped_x = flipped_x.conjugate()
# average x and flipped_x.
# x = (x + flipped_x) / 2.
result_x = x + flipped_x
result_x /= 2.
# try:
# x.hermitian = True
# except(AttributeError):
# pass
return result_x
def _hermitianize_inverter(x, axes):
# calculate the number of dimensions the input array has
dimensions = len(x.shape)
# prepare the slicing object which will be used for mirroring
slice_primitive = [slice(None), ] * dimensions
# copy the input data
y = x.copy()
if axes is None:
axes = xrange(dimensions)
# flip in the desired directions
for i in axes:
slice_picker = slice_primitive[:]
slice_picker[i] = slice(1, None, None)
slice_picker = tuple(slice_picker)
slice_inverter = slice_primitive[:]
slice_inverter[i] = slice(None, 0, -1)
slice_inverter = tuple(slice_inverter)
try:
y.set_data(to_key=slice_picker, data=y,
from_key=slice_inverter)
except(AttributeError):
y[slice_picker] = y[slice_inverter]
return y
def direct_vdot(x, y):
# the input could be fields. Try to extract the data
try:
x = x.get_val()
except(AttributeError):
pass
try:
y = y.get_val()
except(AttributeError):
pass
# try to make a direct vdot
try:
return x.vdot(y)
except(AttributeError):
pass
try:
return y.vdot(x)
except(AttributeError):
pass
# fallback to numpy
return np.vdot(x, y)
def convert_nested_list_to_object_array(x):
# if x is a nested_list full of ndarrays all having the same size,
# np.shape returns the shape of the ndarrays, too, i.e. too many
# dimensions
possible_shape = np.shape(x)
# Check if possible_shape goes too deep.
dimension_counter = 0
current_extract = x
for i in xrange(len(possible_shape)):
if not isinstance(current_extract, list) and \
not isinstance(current_extract, tuple):
break
current_extract = current_extract[0]
dimension_counter += 1
real_shape = possible_shape[:dimension_counter]
# if the numpy array was not encapsulated at all, return x directly
if real_shape == ():
return x
# Prepare the carrier-object
carrier = np.empty(real_shape, dtype=np.object)
for i in xrange(reduce(lambda x, y: x * y, real_shape)):
ii = np.unravel_index(i, real_shape)
try:
carrier[ii] = x[ii]
except(TypeError):
extracted = x
for j in xrange(len(ii)):
extracted = extracted[ii[j]]
carrier[ii] = extracted
return carrier
def field_map(ishape, function, *args):
if ishape == ():
return function(*args)
else:
if args == ():
result = np.empty(ishape, dtype=np.object)
for i in xrange(reduce(lambda x, y: x * y, ishape)):
ii = np.unravel_index(i, ishape)
result[ii] = function()
return result
else:
# define a helper function in order to clip the get-indices
# to be suitable for the foreign arrays in args.
# This allows you to do operations, like adding to fields
# with ishape (3,4,3) and (3,4,1)
def get_clipped(w, ind):
w_shape = np.array(np.shape(w))
get_tuple = tuple(np.clip(ind, 0, w_shape - 1))
return w[get_tuple]
result = np.empty_like(args[0])
for i in xrange(reduce(lambda x, y: x * y, result.shape)):
ii = np.unravel_index(i, result.shape)
result[ii] = function(
*map(
lambda z: get_clipped(z, ii), args
)
)
# result[ii] = function(*map(lambda z: z[ii], args))
return result
def cast_axis_to_tuple(axis, length):
if axis is None:
......@@ -261,41 +92,6 @@ def cast_axis_to_tuple(axis, length):
return axis
def complex_bincount(x, weights=None, minlength=None):
try:
complex_weights_Q = issubclass(weights.dtype.type,
np.complexfloating)
except AttributeError:
complex_weights_Q = False
if complex_weights_Q:
real_bincount = x.bincount(weights=weights.real,
minlength=minlength)
imag_bincount = x.bincount(weights=weights.imag,
minlength=minlength)
return real_bincount + imag_bincount
else:
return x.bincount(weights=weights, minlength=minlength)
def get_default_codomain(domain):
from nifty.spaces import RGSpace, HPSpace, GLSpace, LMSpace
from nifty.operators.fft_operator.transformations import RGRGTransformation, \
HPLMTransformation, GLLMTransformation, LMGLTransformation
if isinstance(domain, RGSpace):
return RGRGTransformation.get_codomain(domain)
elif isinstance(domain, HPSpace):
return HPLMTransformation.get_codomain(domain)