Commit e7db670d authored by Theo Steininger's avatar Theo Steininger

Merge branch 'master' of gitlab.mpcdf.mpg.de:ift/NIFTy

# Conflicts:
#	nifty/spaces/lm_space/lm_space.py
parents b8218064 997bbf68
Pipeline #14877 passed with stage
in 6 minutes and 17 seconds
......@@ -97,10 +97,8 @@ if __name__ == "__main__":
# The information source
j = R.adjoint_times(N.inverse_times(d))
realized_power = log(sh.power_analyze(logarithmic=p_space.config["logarithmic"],
nbin=p_space.config["nbin"]))
data_power = log(fft(d).power_analyze(logarithmic=p_space.config["logarithmic"],
nbin=p_space.config["nbin"]))
realized_power = log(sh.power_analyze(binbounds=p_space.binbounds))
data_power = log(fft(d).power_analyze(binbounds=p_space.binbounds))
d_data = d.val.get_full_data().real
if rank == 0:
pl.plot([go.Heatmap(z=d_data)], filename='data.html')
......
......@@ -270,7 +270,7 @@ class Field(Loggable, Versionable, object):
# ---Powerspectral methods---
def power_analyze(self, spaces=None, logarithmic=False, nbin=None,
def power_analyze(self, spaces=None, logarithmic=None, nbin=None,
binbounds=None, keep_phase_information=False):
""" Computes the square root power spectrum for a subspace of `self`.
......@@ -287,14 +287,15 @@ class Field(Loggable, Versionable, object):
(default : None).
logarithmic : boolean *optional*
True if the output PowerSpace should use logarithmic binning.
{default : False}
{default : None}
nbin : int *optional*
The number of bins the resulting PowerSpace shall have
(default : None).
if nbin==None : maximum number of bins is used
binbounds : array-like *optional*
Inner bounds of the bins (default : None).
if binbounds==None : bins are inferred. Overwrites nbins and log
Overrides nbin and logarithmic.
if binbounds==None : bins are inferred.
keep_phase_information : boolean, *optional*
If False, return a real-valued result containing the power spectrum
of the input Field.
......@@ -397,14 +398,9 @@ class Field(Loggable, Versionable, object):
logarithmic=logarithmic, nbin=nbin,
binbounds=binbounds)
# extract pindex and rho from power_domain
pindex = power_domain.pindex
rho = power_domain.rho
power_spectrum = cls._calculate_power_spectrum(
field_val=work_field.val,
pindex=pindex,
rho=rho,
pdomain=power_domain,
axes=work_field.domain_axes[space_index])
# create the result field and put power_spectrum into it
......@@ -421,8 +417,11 @@ class Field(Loggable, Versionable, object):
return result_field
@classmethod
def _calculate_power_spectrum(cls, field_val, pindex, rho, axes=None):
def _calculate_power_spectrum(cls, field_val, pdomain, axes=None):
pindex = pdomain.pindex
# MR FIXME: how about iterating over slices, instead of replicating
# pindex? Would save memory and probably isn't slower.
if axes is not None:
pindex = cls._shape_up_pindex(
pindex=pindex,
......@@ -431,6 +430,7 @@ class Field(Loggable, Versionable, object):
axes=axes)
power_spectrum = pindex.bincount(weights=field_val,
axis=axes)
rho = pdomain.rho
if axes is not None:
new_rho_shape = [1, ] * len(power_spectrum.shape)
new_rho_shape[axes[0]] = len(rho)
......@@ -755,7 +755,7 @@ class Field(Loggable, Versionable, object):
Returns
-------
out : tuple
The output object. The tuple contains the dimansions of the spaces
The output object. The tuple contains the dimensions of the spaces
in domain.
See Also
......
......@@ -103,14 +103,12 @@ class CriticalPowerEnergy(Energy):
posterior_sample = generate_posterior_sample(
self.m, self.D)
projected_sample = posterior_sample.power_analyze(
logarithmic=self.position.domain[0].config["logarithmic"],
nbin=self.position.domain[0].config["nbin"])
binbounds=self.position.domain[0].binbounds)
w += (projected_sample) * self.rho
w /= float(self.samples)
else:
w = self.m.power_analyze(
logarithmic=self.position.domain[0].config["logarithmic"],
nbin=self.position.domain[0].config["nbin"])
binbounds=self.position.domain[0].binbounds)
w *= self.rho
self._w = w
return self._w
......
......@@ -38,7 +38,7 @@ class InvertibleOperatorMixin(object):
(default: ConjugateGradient)
preconditioner : LinearOperator
Preconditioner that is used by ConjugateGraduent if no minimizer was
Preconditioner that is used by ConjugateGradient if no minimizer was
given.
Attributes
......
......@@ -33,8 +33,7 @@ class ResponseOperator(LinearOperator):
domain : tuple of DomainObjects, i.e. Spaces and FieldTypes
The domain on which the Operator's input Field lives.
target : tuple of DomainObjects, i.e. Spaces and FieldTypes
The domain in which the outcome of the operator lives. As the Operator
is endomorphic this is the same as its domain.
The domain in which the outcome of the operator lives.
unitary : boolean
Indicates whether the Operator is unitary or not.
......
......@@ -154,6 +154,12 @@ class LMSpace(Space):
return dists
def get_unique_distances(self):
return np.arange(self.lmax+1, dtype=np.float64)
def get_natural_binbounds(self):
return np.arange(self.lmax, dtype=np.float64) + 0.5
@staticmethod
def _distance_array_helper(index_array, lmax):
u = 2*lmax + 1
......
......@@ -17,4 +17,3 @@
# and financially supported by the Studienstiftung des deutschen Volkes.
from power_space import PowerSpace
from power_index_factory import PowerIndexFactory
\ No newline at end of file
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2017 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
from power_indices import PowerIndices
class _PowerIndexFactory(object):
def __init__(self):
self.power_indices_storage = {}
def get_power_index(self, domain, distribution_strategy,
logarithmic=False, nbin=None, binbounds=None):
key = (domain, distribution_strategy)
if key not in self.power_indices_storage:
self.power_indices_storage[key] = \
PowerIndices(domain, distribution_strategy,
logarithmic=logarithmic,
nbin=nbin,
binbounds=binbounds)
power_indices = self.power_indices_storage[key]
power_index = power_indices.get_index_dict(logarithmic=logarithmic,
nbin=nbin,
binbounds=binbounds)
return power_index
PowerIndexFactory = _PowerIndexFactory()
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2017 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.
import numpy as np
from d2o import distributed_data_object,\
STRATEGIES as DISTRIBUTION_STRATEGIES
from d2o.config import dependency_injector as d2o_di
from d2o.config import configuration as d2o_config
class PowerIndices(object):
"""Computes helpful quantities to deal with power spectra.
Given the shape and the density of a underlying rectangular grid this
class provides the user
with the pindex, kindex, rho and pundex. The indices are binned
according to the supplied parameter scheme. If wanted, computed
results are stored for future reuse.
Parameters
----------
domain : NIFTy harmonic space
The space for which the power indices get computed
distribution_strategy : str
The distribution_strategy that will be used for the k_array and pindex
distributed_data_object.
logarithmic : bool *optional*
Flag specifying if the binning of the default indices is
performed on logarithmic scale.
nbin : integer *optional*
Number of used bins for the binning of the default indices.
binbounds : {list, array}
Array-like inner boundaries of the used bins of the default
indices.
"""
def __init__(self, domain, distribution_strategy,
logarithmic=False, nbin=None, binbounds=None):
self.domain = domain
self.distribution_strategy = distribution_strategy
# Compute the global k_array
self.k_array = self.domain.get_distance_array(distribution_strategy)
# Initialize the dictionary which stores all individual index-dicts
self.global_dict = {}
# Set self.default_parameters
self.set_default(config_dict={'logarithmic': logarithmic,
'nbin': nbin,
'binbounds': binbounds})
# Redirect the direct calls approaching a power_index instance to the
# default_indices dict
@property
def default_indices(self):
return self.get_index_dict(**self.default_parameters)
def __getitem__(self, x):
return self.default_indices.get(x)
def __contains__(self, x):
return self.default_indices.__contains__(x)
def __iter__(self):
return self.default_indices.__iter__()
def __getattr__(self, x):
return self.default_indices.__getattribute__(x)
def set_default(self, **kwargs):
"""
Sets the index-set which is specified by the parameters as the
default for the power_index instance.
Parameters
----------
logarithmic : bool
Flag specifying if the binning is performed on logarithmic
scale.
nbin : integer
Number of used bins.
binbounds : {list, array}
Array-like inner boundaries of the used bins.
Returns
-------
None
"""
parsed_kwargs = self._cast_config(**kwargs)
self.default_parameters = parsed_kwargs
def _cast_config(self, **kwargs):
"""
internal helper function which casts the various combinations of
possible parameters into a properly defaulted dictionary
"""
temp_config_dict = kwargs.get('config_dict', None)
if temp_config_dict is not None:
return self._cast_config_helper(**temp_config_dict)
else:
defaults = self.default_parameters
temp_logarithmic = kwargs.get("logarithmic",
defaults['logarithmic'])
temp_nbin = kwargs.get("nbin", defaults['nbin'])
temp_binbounds = kwargs.get("binbounds", defaults['binbounds'])
return self._cast_config_helper(logarithmic=temp_logarithmic,
nbin=temp_nbin,
binbounds=temp_binbounds)
def _cast_config_helper(self, logarithmic, nbin, binbounds):
"""
internal helper function which sets the defaults for the
_cast_config function
"""
try:
temp_logarithmic = bool(logarithmic)
except(TypeError):
temp_logarithmic = False
try:
temp_nbin = int(nbin)
except(TypeError):
temp_nbin = None
try:
temp_binbounds = tuple(np.array(binbounds))
except(TypeError):
temp_binbounds = None
temp_dict = {"logarithmic": temp_logarithmic,
"nbin": temp_nbin,
"binbounds": temp_binbounds}
return temp_dict
def get_index_dict(self, **kwargs):
"""
Returns a dictionary containing the pindex, kindex, rho and pundex
binned according to the supplied parameter scheme and a
configuration dict containing this scheme.
Parameters
----------
store : bool
Flag specifying if the calculated index dictionary should be
stored in the global_dict for future use.
logarithmic : bool
Flag specifying if the binning is performed on logarithmic
scale.
nbin : integer
Number of used bins.
binbounds : {list, array}
Array-like inner boundaries of the used bins.
Returns
-------
index_dict : dict
Contains the keys: 'config', 'pindex', 'kindex', 'rho' and
'pundex'
"""
# Cast the input arguments
temp_config_dict = self._cast_config(**kwargs)
# Compute a hashable identifier from the config which will be used
# as dict key
temp_key = self._freeze_config(temp_config_dict)
# Check if the result should be stored for future use.
storeQ = kwargs.get("store", True)
# Try to find the requested index dict in the global_dict
try:
return self.global_dict[temp_key]
except(KeyError):
# If it is not found, calculate it.
temp_index_dict = self._compute_index_dict(temp_config_dict)
# Store it, if required
if storeQ:
self.global_dict[temp_key] = temp_index_dict
# Important: If the result is stored, return a reference to
# the dictionary entry, not anly a plain copy. Otherwise,
# set_default breaks!
return self.global_dict[temp_key]
else:
# Return the plain result.
return temp_index_dict
def _freeze_config(self, config_dict):
"""
a helper function which forms a hashable identifying object from
a config dictionary which can be used as key of a dict
"""
return frozenset(config_dict.items())
def _compute_index_dict(self, config_dict):
"""
Internal helper function which takes a config_dict, asks for the
pindex/kindex/rho/pundex set, and bins them according to the config
"""
# if no binning is requested, compute the indices, build the dict,
# and return it straight.
if not config_dict["logarithmic"] and config_dict["nbin"] is None and \
config_dict["binbounds"] is None:
(temp_pindex, temp_kindex, temp_rho, temp_pundex) =\
self._compute_indices(self.k_array)
temp_k_array = self.k_array
# if binning is required, make a recursive call to get the unbinned
# indices, bin them, compute the pundex and then return everything.
else:
# Get the unbinned indices
temp_unbinned_indices = self.get_index_dict(nbin=None,
binbounds=None,
logarithmic=False,
store=False)
# Bin them
(temp_pindex, temp_kindex, temp_rho, temp_pundex) = \
self._bin_power_indices(
temp_unbinned_indices, **config_dict)
# Make a binned version of k_array
temp_k_array = self._compute_k_array_from_pindex_kindex(
temp_pindex, temp_kindex)
temp_index_dict = {"config": config_dict,
"pindex": temp_pindex,
"kindex": temp_kindex,
"rho": temp_rho,
"pundex": temp_pundex,
"k_array": temp_k_array}
return temp_index_dict
def _compute_k_array_from_pindex_kindex(self, pindex, kindex):
tempindex = pindex.copy(dtype=kindex.dtype)
result = tempindex.apply_scalar_function(
lambda x: kindex[x.astype(np.dtype('int'))])
return result
def _compute_indices(self, k_array):
"""
Internal helper function which computes pindex, kindex, rho and pundex
from a given k_array
"""
##########
# kindex #
##########
global_kindex = k_array.unique()
##########
# pindex #
##########
# compute the local pindex slice on basis of the local k_array data
local_pindex = np.searchsorted(global_kindex, k_array.get_local_data())
# prepare the distributed_data_object
global_pindex = distributed_data_object(
global_shape=k_array.shape,
dtype=local_pindex.dtype,
distribution_strategy=self.distribution_strategy)
# store the local pindex data in the global_pindex d2o
global_pindex.set_local_data(local_pindex)
#######
# rho #
#######
global_rho = global_pindex.bincount().get_full_data()
##########
# pundex #
##########
global_pundex = self._compute_pundex(global_pindex,
global_kindex)
return global_pindex, global_kindex, global_rho, global_pundex
def _compute_pundex(self, global_pindex, global_kindex):
"""
Internal helper function which computes the pundex array from a
pindex and a kindex array. This function is separated from the
_compute_indices function as it is needed in _bin_power_indices,
too.
"""
if self.distribution_strategy in DISTRIBUTION_STRATEGIES['slicing']:
##########
# pundex #
##########
# Prepare the local data
local_pindex = global_pindex.get_local_data()
# Compute the local pundices for the local pindices
(temp_uniqued_pindex, local_temp_pundex) = np.unique(
local_pindex,
return_index=True)
# cure a bug in numpy
# https://github.com/numpy/numpy/issues/9405
local_temp_pundex = np.asarray(local_temp_pundex, dtype=np.int)
# Shift the local pundices by the nodes' local_dim_offset
local_temp_pundex += global_pindex.distributor.local_dim_offset
# Prepare the pundex arrays used for the Allreduce operation
# pundex has the same length as the kindex array
local_pundex = np.zeros(shape=global_kindex.shape, dtype=np.int)
# Set the default value higher than the maximal possible pundex
# value so that MPI.MIN can sort out the default
local_pundex += np.prod(global_pindex.shape) + 1
# Set the default value higher than the length
global_pundex = np.empty_like(local_pundex)
# Store the individual pundices in the local_pundex array
local_pundex[temp_uniqued_pindex] = local_temp_pundex
# Extract the MPI module from the global_pindex d2o
MPI = d2o_di[d2o_config['mpi_module']]
# Use Allreduce to find the first occurences/smallest pundices
global_pindex.comm.Allreduce(local_pundex,
global_pundex,
op=MPI.MIN)
return global_pundex
elif self.distribution_strategy in DISTRIBUTION_STRATEGIES['not']:
##########
# pundex #
##########
pundex = np.unique(global_pindex.get_local_data(),
return_index=True)[1]
return pundex
else:
raise NotImplementedError(
"_compute_pundex_d2o not implemented for given "
"distribution_strategy")
def _bin_power_indices(self, index_dict, **kwargs):
"""
Returns the binned power indices associated with the Fourier grid.
Parameters
----------
pindex : distributed_data_object
Index of the Fourier grid points in a distributed_data_object.
kindex : ndarray
Array of all k-vector lengths.
rho : ndarray
Degeneracy factor of the individual k-vectors.
logarithmic : bool
Flag specifying if the binning is performed on logarithmic
scale.
nbin : integer
Number of used bins.
binbounds : {list, array}
Array-like inner boundaries of the used bins.
Returns
-------
pindex : distributed_data_object
kindex, rho, pundex : ndarrays
The (re)binned power indices.
"""
# Cast the given config
temp_config_dict = self._cast_config(**kwargs)
logarithmic = temp_config_dict['logarithmic']
nbin = temp_config_dict['nbin']
binbounds = temp_config_dict['binbounds']
# Extract the necessary indices from the supplied index dict
pindex = index_dict["pindex"]
kindex = index_dict["kindex"]
rho = index_dict["rho"]
# boundaries
if(binbounds is not None):
binbounds = np.sort(binbounds)
# equal binning
else:
if(logarithmic is None):
logarithmic = False
if(logarithmic):
k = np.r_[0, np.log(kindex[1:])]
else:
k = kindex
dk = np.max(k[2:] - k[1:-1]) # minimal dk
if(nbin is None):
nbin = int((k[-1] - 0.5 * (k[2] + k[1])) /
dk - 0.5) # maximal nbin
else:
nbin = min(int(nbin), int(
(k[-1] - 0.5 * (k[2] + k[1])) / dk + 2.5))
dk = (k[-1] - 0.5 * (k[2] + k[1])) / (nbin - 2.5)
binbounds = np.r_[0.5 * (3 * k[1] - k[2]),
0.5 * (k[1] + k[2]) + dk * np.arange(nbin - 2)]
if(logarithmic):
binbounds = np.exp(binbounds)
# reordering
reorder = np.searchsorted(binbounds, kindex)
rho_ = np.zeros(len(binbounds) + 1, dtype=rho.dtype)
kindex_ = np.empty(len(binbounds) + 1, dtype=kindex.dtype)
for ii in range(len(reorder)):
if(rho_[reorder[ii]] == 0):
kindex_[reorder[ii]] = kindex[ii]
rho_[reorder[ii]] += rho[ii]
else:
kindex_[reorder[ii]] = ((kindex_[reorder[ii]] *
rho_[reorder[ii]] +
kindex[ii] * rho[ii]) /
(rho_[reorder[ii]] + rho[ii]))
rho_[reorder[ii]] += rho[ii]
pindex_ = pindex.copy_empty()
pindex_.set_local_data(reorder[pindex.get_local_data()])
pundex_ = self._compute_pundex(pindex_, kindex_)
return pindex_, kindex_, rho_, pundex_
......@@ -18,9 +18,7 @@
import numpy as np
import d2o
from power_index_factory import PowerIndexFactory
from d2o import distributed_data_object
from nifty.spaces.space import Space
......@@ -37,18 +35,24 @@ class PowerSpace(Space):
derived from this PowerSpace, e.g. the pindex.
(default : 'not')
logarithmic : bool *optional*
True if logarithmic binning should be used (default : False).
True if logarithmic binning should be used (default : None).
nbin : {int, None} *optional*
The number of bins that should be used for power spectrum binning
(default : None).
if nbin == None, then nbin is set to the length of kindex.
binbounds : {list, array-like} *optional*
Array-like inner boundaries of the used bins of the default
indices.