Commit d349bf74 authored by csongor's avatar csongor
Browse files

Move Space classes in separate files

parent 0677d4f0
......@@ -60,7 +60,7 @@ from power import PowerSpace,\
## optional submodule `rg`
try:
from rg import rg_space,\
from rg import RgSpace,\
power_backward_conversion_rg,\
power_forward_conversion_rg
from nifty_paradict import rg_space_paradict
......@@ -69,19 +69,19 @@ except(ImportError):
## optional submodule `lm`
try:
from lm import lm_space,\
from lm import LmSpace,\
power_backward_conversion_lm,\
power_forward_conversion_lm
from nifty_paradict import lm_space_paradict
try:
from lm import gl_space
from lm import GlSpace
from nifty_paradict import gl_space_paradict
except(ImportError):
pass
try:
from lm import hp_space
from lm import HpSpace
from nifty_paradict import hp_space_paradict
except(ImportError):
pass
......
......@@ -256,7 +256,7 @@ class problem(object):
##-----------------------------------------------------------------------------
#
if(__name__=="__main__"):
x = rg_space((1280), zerocenter=True)
x = RgSpace((1280), zerocenter=True)
p = problem(x, log = False)
about.warnings.off()
## pl.close("all")
......
......@@ -48,7 +48,8 @@ about.infos.off()
##-----------------------------------------------------------------------------
# (global) Faraday map
m = field(hp_space(128), val=np.load(os.path.join(get_demo_dir(),"demo_faraday_map.npy")))
m = field(HpSpace(128), val=np.load(os.path.join(get_demo_dir(),
"demo_faraday_map.npy")))
##-----------------------------------------------------------------------------
......@@ -97,8 +98,8 @@ def run(projection=False, power=False):
m4.plot(title=r"angular quadrupole of $m$ on a Gauss-Legendre grid", **nicely)
# (d) representation on regular grid
y_space = gl_space(384, nlon=768) # auxiliary gl_space
z_space = rg_space([768, 384], dist=[1/360, 1/180])
y_space = GlSpace(384, nlon=768) # auxiliary gl_space
z_space = RgSpace([768, 384], dist=[1/360, 1/180])
m5 = m1.transform(y_space)
m5.cast_domain(z_space)
m5.set_val(np.roll(m5.val[::-1, ::-1], y_space.paradict['nlon']//2, axis=1)) # rearrange value array
......
......@@ -9,7 +9,7 @@ if __name__ == "__main__":
shape = (256, 256)
x_space = rg_space(shape)
x_space = RgSpace(shape)
k_space = x_space.get_codomain()
power = lambda k: 42/((1+k*shape[0])**3)
......
......@@ -49,8 +49,8 @@ if __name__ == "__main__":
#shape = [1024, 1024]
#x_space = rg_space(shape)
#y_space = point_space(1280*1280)
x_space = hp_space(32)
#x_space = gl_space(800)
x_space = HpSpace(32)
#x_space = GlSpace(800)
k_space = x_space.get_codomain() # get conjugate space
......
......@@ -55,7 +55,8 @@ from nifty.operators.nifty_minimization import steepest_descent_new
if __name__ == "__main__":
# some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([256, 256]) # define signal space
x_space = RgSpace([256, 256]) # define
# signal space
k_space = x_space.get_codomain() # get conjugate space
......
......@@ -39,7 +39,8 @@ if __name__ == "__main__":
# some signal space; e.g., a one-dimensional regular grid
x_space = rg_space([128,]) # define signal space
x_space = RgSpace([128,]) #
# define signal space
k_space = x_space.get_codomain() # get conjugate space
......
......@@ -38,7 +38,8 @@ except(ImportError):
"INFO: neither libsharp_wrapper_gl nor healpy available.")
pass ## import nothing
else:
from nifty_lm import lm_space, hp_space ## import lm & hp
from lm_space import LmSpace ## import lm & hp
from hp_space import HpSpace
## TODO: change about
else:
try:
......@@ -48,9 +49,12 @@ else:
about._errors.cprint(
"ERROR: installed healpy version is older than 1.8.1!"))
except(ImportError):
from nifty_lm import lm_space, gl_space ## import lm & gl
from gl_space import GlSpace ## import lm & gl
from lm_space import LmSpace
else:
from nifty_lm import lm_space, gl_space, hp_space ##import all
from gl_space import GlSpace ##import all
from lm_space import LmSpace
from hp_space import HpSpace
from nifty.lm.nifty_power_conversion_lm import power_backward_conversion_lm,\
power_forward_conversion_lm
......
from __future__ import division
import numpy as np
import pylab as pl
from matplotlib.colors import LogNorm as ln
from matplotlib.ticker import LogFormatter as lf
from d2o import STRATEGIES as DISTRIBUTION_STRATEGIES
from nifty.space import Space
from nifty.config import about,\
nifty_configuration as gc,\
dependency_injector as gdi
from nifty.nifty_paradict import gl_space_paradict
from nifty.nifty_random import random
gl = gdi.get('libsharp_wrapper_gl')
GL_DISTRIBUTION_STRATEGIES = DISTRIBUTION_STRATEGIES['global']
class GlSpace(Space):
"""
.. __
.. / /
.. ____ __ / /
.. / _ / / /
.. / /_/ / / /_
.. \___ / \___/ space class
.. /______/
NIFTY subclass for Gauss-Legendre pixelizations [#]_ of the two-sphere.
Parameters
----------
nlat : int
Number of latitudinal bins, or rings.
nlon : int, *optional*
Number of longitudinal bins (default: ``2*nlat - 1``).
dtype : numpy.dtype, *optional*
Data type of the field values (default: numpy.float64).
See Also
--------
hp_space : A class for the HEALPix discretization of the sphere [#]_.
lm_space : A class for spherical harmonic components.
Notes
-----
Only real-valued fields on the two-sphere are supported, i.e.
`dtype` has to be either numpy.float64 or numpy.float32.
References
----------
.. [#] M. Reinecke and D. Sverre Seljebotn, 2013, "Libsharp - spherical
harmonic transforms revisited";
`arXiv:1303.4945 <http://www.arxiv.org/abs/1303.4945>`_
.. [#] K.M. Gorski et al., 2005, "HEALPix: A Framework for
High-Resolution Discretization and Fast Analysis of Data
Distributed on the Sphere", *ApJ* 622..759G.
Attributes
----------
para : numpy.ndarray
One-dimensional array containing the two numbers `nlat` and `nlon`.
dtype : numpy.dtype
Data type of the field values.
discrete : bool
Whether or not the underlying space is discrete, always ``False``
for spherical spaces.
vol : numpy.ndarray
An array containing the pixel sizes.
"""
def __init__(self, nlat, nlon=None, dtype=np.dtype('float64')):
"""
Sets the attributes for a gl_space class instance.
Parameters
----------
nlat : int
Number of latitudinal bins, or rings.
nlon : int, *optional*
Number of longitudinal bins (default: ``2*nlat - 1``).
dtype : numpy.dtype, *optional*
Data type of the field values (default: numpy.float64).
Returns
-------
None
Raises
------
ImportError
If the libsharp_wrapper_gl module is not available.
ValueError
If input `nlat` is invaild.
"""
# check imports
if not gc['use_libsharp']:
raise ImportError(about._errors.cstring(
"ERROR: libsharp_wrapper_gl not loaded."))
self._cache_dict = {'check_codomain': {}}
self.paradict = gl_space_paradict(nlat=nlat, nlon=nlon)
# check data type
dtype = np.dtype(dtype)
if dtype not in [np.dtype('float32'), np.dtype('float64')]:
about.warnings.cprint("WARNING: data type set to default.")
dtype = np.dtype('float')
self.dtype = dtype
self.discrete = False
self.harmonic = False
self.distances = tuple(gl.vol(self.paradict['nlat'],
nlon=self.paradict['nlon']
).astype(np.float))
@property
def para(self):
temp = np.array([self.paradict['nlat'],
self.paradict['nlon']], dtype=int)
return temp
@para.setter
def para(self, x):
self.paradict['nlat'] = x[0]
self.paradict['nlon'] = x[1]
def copy(self):
return GlSpace(nlat=self.paradict['nlat'],
nlon=self.paradict['nlon'],
dtype=self.dtype)
@property
def shape(self):
return (np.int((self.paradict['nlat'] * self.paradict['nlon'])),)
@property
def meta_volume(self):
"""
Calculates the meta volumes.
The meta volumes are the volumes associated with each component of
a field, taking into account field components that are not
explicitly included in the array of field values but are determined
by symmetry conditions.
Parameters
----------
total : bool, *optional*
Whether to return the total meta volume of the space or the
individual ones of each field component (default: False).
Returns
-------
mol : {numpy.ndarray, float}
Meta volume of the field components or the complete space.
Notes
-----
For Gauss-Legendre pixelizations, the meta volumes are the pixel
sizes.
"""
return np.float(4 * np.pi)
@property
def meta_volume_split(self):
mol = self.cast(1, dtype=np.float)
return self.calc_weight(mol, power=1)
# TODO: Extend to binning/log
def enforce_power(self, spec, size=None, kindex=None):
if kindex is None:
kindex_size = self.paradict['nlat']
kindex = np.arange(kindex_size,
dtype=np.array(self.distances).dtype)
return self._enforce_power_helper(spec=spec,
size=size,
kindex=kindex)
def _check_codomain(self, codomain):
"""
Checks whether a given codomain is compatible to the space or not.
Parameters
----------
codomain : nifty.space
Space to be checked for compatibility.
Returns
-------
check : bool
Whether or not the given codomain is compatible to the space.
Notes
-----
Compatible codomains are instances of :py:class:`gl_space` and
:py:class:`lm_space`.
"""
if codomain is None:
return False
if not isinstance(codomain, Space):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
if isinstance(codomain, LmSpace):
nlat = self.paradict['nlat']
nlon = self.paradict['nlon']
lmax = codomain.paradict['lmax']
mmax = codomain.paradict['mmax']
# nlon==2*lat-1
# lmax==nlat-1
# lmax==mmax
if (nlon == 2*nlat-1) and (lmax == nlat-1) and (lmax == mmax):
return True
return False
def get_codomain(self, **kwargs):
"""
Generates a compatible codomain to which transformations are
reasonable, i.e.\ an instance of the :py:class:`lm_space` class.
Returns
-------
codomain : nifty.lm_space
A compatible codomain.
"""
nlat = self.paradict['nlat']
lmax = nlat-1
mmax = nlat-1
# lmax,mmax = nlat-1,nlat-1
if self.dtype == np.dtype('float32'):
return LmSpace(lmax=lmax, mmax=mmax, dtype=np.complex64)
else:
return LmSpace(lmax=lmax, mmax=mmax, dtype=np.complex128)
def get_random_values(self, **kwargs):
"""
Generates random field values according to the specifications given
by the parameters.
Returns
-------
x : numpy.ndarray
Valid field values.
Other parameters
----------------
random : string, *optional*
Specifies the probability distribution from which the random
numbers are to be drawn.
Supported distributions are:
- "pm1" (uniform distribution over {+1,-1} or {+1,+i,-1,-i}
- "gau" (normal distribution with zero-mean and a given
standard
deviation or variance)
- "syn" (synthesizes from a given power spectrum)
- "uni" (uniform distribution over [vmin,vmax[)
(default: None).
dev : float, *optional*
Standard deviation (default: 1).
var : float, *optional*
Variance, overriding `dev` if both are specified
(default: 1).
spec : {scalar, list, numpy.array, nifty.field, function},
*optional*
Power spectrum (default: 1).
codomain : nifty.lm_space, *optional*
A compatible codomain for power indexing (default: None).
vmin : float, *optional*
Lower limit for a uniform distribution (default: 0).
vmax : float, *optional*
Upper limit for a uniform distribution (default: 1).
"""
arg = random.parse_arguments(self, **kwargs)
# if(arg is None):
# x = np.zeros(self.shape, dtype=self.dtype)
#
# elif(arg['random'] == "pm1"):
# x = random.pm1(dtype=self.dtype, shape=self.shape)
#
# elif(arg['random'] == "gau"):
# x = random.gau(dtype=self.dtype,
# shape=self.shape,
# mean=arg['mean'],
# std=arg['std'])
#
if(arg['random'] == "syn"):
nlat = self.paradict['nlat']
nlon = self.paradict['nlon']
lmax = nlat - 1
if self.dtype == np.dtype('float32'):
sample = gl.synfast_f(arg['spec'],
nlat=nlat, nlon=nlon,
lmax=lmax, mmax=lmax, alm=False)
else:
sample = gl.synfast(arg['spec'],
nlat=nlat, nlon=nlon,
lmax=lmax, mmax=lmax, alm=False)
# weight if discrete
if self.discrete:
sample = self.calc_weight(sample, power=0.5)
else:
sample = super(GlSpace, self).get_random_values(**arg)
# elif(arg['random'] == "uni"):
# x = random.uni(dtype=self.dtype,
# shape=self.shape,
# vmin=arg['vmin'],
# vmax=arg['vmax'])
#
# else:
# raise KeyError(about._errors.cstring(
# "ERROR: unsupported random key '" + str(arg['random']) + "'."))
sample = self.cast(sample)
return sample
def calc_weight(self, x, power=1):
"""
Weights a given array with the pixel volumes to a given power.
Parameters
----------
x : numpy.ndarray
Array to be weighted.
power : float, *optional*
Power of the pixel volumes to be used (default: 1).
Returns
-------
y : numpy.ndarray
Weighted array.
"""
x = self.cast(x)
# if self.datamodel != 'not':
# about.warnings.cprint(
# "WARNING: Field data is consolidated to all nodes for "
# "external alm2map method!")
np_x = x.get_full_data()
# weight
nlat = self.paradict['nlat']
nlon = self.paradict['nlon']
if self.dtype == np.dtype('float32'):
np_result = gl.weight_f(np_x,
np.array(self.distances),
p=np.float32(power),
nlat=nlat, nlon=nlon,
overwrite=False)
else:
np_result = gl.weight(np_x,
np.array(self.distances),
p=np.float32(power),
nlat=nlat, nlon=nlon,
overwrite=False)
# return self.cast(np_result)
return np_result
def get_weight(self, power=1):
# TODO: Check if this function is compatible to the rest of nifty
# TODO: Can this be done more efficiently?
dummy = self.dtype.type(1)
weighted_dummy = self.calc_weight(dummy, power=power)
return weighted_dummy / dummy
def calc_transform(self, x, codomain=None, **kwargs):
"""
Computes the transform of a given array of field values.
Parameters
----------
x : numpy.ndarray
Array to be transformed.
codomain : nifty.space, *optional*
codomain space to which the transformation shall map
(default: self).
Returns
-------
Tx : numpy.ndarray
Transformed array
Notes
-----
Only instances of the :py:class:`lm_space` or :py:class:`gl_space`
classes are allowed as `codomain`.
"""
x = self.cast(x)
if codomain is None:
codomain = self.get_codomain()
# Check if the given codomain is suitable for the transformation
if not self.check_codomain(codomain):
raise ValueError(about._errors.cstring(
"ERROR: unsupported codomain."))
if isinstance(codomain, LmSpace):
# weight if discrete
if self.discrete:
x = self.calc_weight(x, power=-0.5)
# transform
nlat = self.paradict['nlat']
nlon = self.paradict['nlon']
lmax = codomain.paradict['lmax']
mmax = codomain.paradict['mmax']
# if self.datamodel != 'not':
# about.warnings.cprint(
# "WARNING: Field data is consolidated to all nodes for "
# "external map2alm method!")
np_x = x.get_full_data()
if self.dtype == np.dtype('float32'):
Tx = gl.map2alm_f(np_x,
nlat=nlat, nlon=nlon,
lmax=lmax, mmax=mmax)
else:
Tx = gl.map2alm(np_x,
nlat=nlat, nlon=nlon,
lmax=lmax, mmax=mmax)
else:
raise ValueError(about._errors.cstring(
"ERROR: unsupported transformation."))
return codomain.cast(Tx)
def calc_smooth(self, x, sigma=0, **kwargs):
"""
Smoothes an array of field values by convolution with a Gaussian
kernel.
Parameters
----------
x : numpy.ndarray
Array of field values to be smoothed.
sigma : float, *optional*
Standard deviation of the Gaussian kernel, specified in units
of length in position space; for testing: a sigma of -1 will be
reset to a reasonable value (default: 0).
Returns
-------
Gx : numpy.ndarray
Smoothed array.
"""
x = self.cast(x)
# check sigma
if sigma == 0:
return self.unary_operation(x, op='copy')
elif sigma == -1:
about.infos.cprint("INFO: invalid sigma reset.")
sigma = np.sqrt(2) * np.pi / self.paradict['nlat']
elif sigma < 0:
raise ValueError(about._errors.cstring("ERROR: invalid sigma."))
# smooth
nlat = self.paradict['nlat']
# if self.datamodel != 'not':
# about.warnings.cprint(
# "WARNING: Field data is consolidated to all nodes for "
# "external smoothmap method!")
np_x = x.get_full_data()
result = self.cast(gl.smoothmap(np_x,
nlat=nlat, nlon=self.paradict['nlon'],
lmax=nlat - 1, mmax=nlat - 1,
fwhm=0.0, sigma=sigma))
return result
def calc_power(self, x, **kwargs):
"""
Computes the power of an array of field values.
Parameters
----------
x : numpy.ndarray
Array containing the field values of which the power is to be
calculated.
Returns
-------
spec : numpy.ndarray
Power contained in the input array.
"""
x = self.cast(x)