Commit 22cad1dd authored by csongor's avatar csongor
Browse files

update on remove np from lm_spaces, passing demos and tests

parent b9a4e92d
...@@ -51,15 +51,16 @@ from nifty.nifty_paradict import lm_space_paradict,\ ...@@ -51,15 +51,16 @@ from nifty.nifty_paradict import lm_space_paradict,\
from nifty.nifty_power_indices import lm_power_indices from nifty.nifty_power_indices import lm_power_indices
from nifty.nifty_mpi_data import distributed_data_object from nifty.nifty_mpi_data import distributed_data_object
from nifty.nifty_mpi_data import STRATEGIES as DISTRIBUTION_STRATEGIES
from nifty.nifty_random import random from nifty.nifty_random import random
gl = gdi.get('libsharp_wrapper_gl') gl = gdi.get('libsharp_wrapper_gl')
hp = gdi.get('healpy') hp = gdi.get('healpy')
LM_DISTRIBUTION_STRATEGIES = ['not'] LM_DISTRIBUTION_STRATEGIES = DISTRIBUTION_STRATEGIES['all']
GL_DISTRIBUTION_STRATEGIES = ['not'] GL_DISTRIBUTION_STRATEGIES = DISTRIBUTION_STRATEGIES['all']
HP_DISTRIBUTION_STRATEGIES = ['not'] HP_DISTRIBUTION_STRATEGIES = DISTRIBUTION_STRATEGIES['all']
class lm_space(point_space): class lm_space(point_space):
...@@ -171,9 +172,7 @@ class lm_space(point_space): ...@@ -171,9 +172,7 @@ class lm_space(point_space):
# set datamodel # set datamodel
if datamodel not in ['not']: if datamodel not in ['not']:
about.warnings.cprint("WARNING: datamodel set to default.") about.warnings.cprint("WARNING: datamodel set to default.")
self.datamodel = 'not' self.datamodel = datamodel
else:
self.datamodel = datamodel
self.discrete = True self.discrete = True
self.harmonic = True self.harmonic = True
...@@ -491,7 +490,7 @@ class lm_space(point_space): ...@@ -491,7 +490,7 @@ class lm_space(point_space):
x = hp.synalm(arg['spec'], lmax=lmax, mmax=mmax) x = hp.synalm(arg['spec'], lmax=lmax, mmax=mmax)
else: else:
x = gl.synalm(arg['spec'], lmax=lmax, mmax=mmax) x = gl.synalm(arg['spec'], lmax=lmax, mmax=mmax)
return x return distributed_data_object(x)
elif arg['random'] == "uni": elif arg['random'] == "uni":
x = random.uni(dtype=self.dtype, x = random.uni(dtype=self.dtype,
...@@ -528,9 +527,9 @@ class lm_space(point_space): ...@@ -528,9 +527,9 @@ class lm_space(point_space):
lmax = self.paradict['lmax'] lmax = self.paradict['lmax']
mmax = self.paradict['mmax'] mmax = self.paradict['mmax']
if self.dtype == np.dtype('complex64'): if self.dtype == np.dtype('complex64'):
return gl.dotlm_f(x, y, lmax=lmax, mmax=mmax) return distributed_data_object(gl.dotlm_f(x, y, lmax=lmax, mmax=mmax))
else: else:
return gl.dotlm(x, y, lmax=lmax, mmax=mmax) return distributed_data_object(gl.dotlm(x, y, lmax=lmax, mmax=mmax))
else: else:
return self._dotlm(x, y) return self._dotlm(x, y)
...@@ -576,11 +575,11 @@ class lm_space(point_space): ...@@ -576,11 +575,11 @@ class lm_space(point_space):
# transform # transform
if self.dtype == np.dtype('complex64'): if self.dtype == np.dtype('complex64'):
Tx = gl.alm2map_f(x, nlat=nlat, nlon=nlon, Tx = distributed_data_object(gl.alm2map_f(np.array(x), nlat=nlat, nlon=nlon,
lmax=lmax, mmax=mmax, cl=False) lmax=lmax, mmax=mmax, cl=False))
else: else:
Tx = gl.alm2map(x, nlat=nlat, nlon=nlon, Tx = distributed_data_object(gl.alm2map(np.array(x), nlat=nlat, nlon=nlon,
lmax=lmax, mmax=mmax, cl=False) lmax=lmax, mmax=mmax, cl=False))
# re-weight if discrete # re-weight if discrete
if codomain.discrete: if codomain.discrete:
Tx = codomain.calc_weight(Tx, power=0.5) Tx = codomain.calc_weight(Tx, power=0.5)
...@@ -591,9 +590,9 @@ class lm_space(point_space): ...@@ -591,9 +590,9 @@ class lm_space(point_space):
mmax = self.paradict['mmax'] mmax = self.paradict['mmax']
# transform # transform
Tx = hp.alm2map(x.astype(np.complex128), nside, lmax=lmax, Tx = distributed_data_object(hp.alm2map(np.array(x).astype(np.complex128), nside, lmax=lmax,
mmax=mmax, pixwin=False, fwhm=0.0, sigma=None, mmax=mmax, pixwin=False, fwhm=0.0, sigma=None,
pol=True, inplace=False) pol=True, inplace=False))
# re-weight if discrete # re-weight if discrete
if(codomain.discrete): if(codomain.discrete):
Tx = codomain.calc_weight(Tx, power=0.5) Tx = codomain.calc_weight(Tx, power=0.5)
...@@ -602,7 +601,7 @@ class lm_space(point_space): ...@@ -602,7 +601,7 @@ class lm_space(point_space):
raise ValueError(about._errors.cstring( raise ValueError(about._errors.cstring(
"ERROR: unsupported transformation.")) "ERROR: unsupported transformation."))
return Tx.astype(codomain.dtype) return Tx.copy(dtype=codomain.dtype)
def calc_smooth(self, x, sigma=0, **kwargs): def calc_smooth(self, x, sigma=0, **kwargs):
""" """
...@@ -633,13 +632,13 @@ class lm_space(point_space): ...@@ -633,13 +632,13 @@ class lm_space(point_space):
elif sigma < 0: elif sigma < 0:
raise ValueError(about._errors.cstring("ERROR: invalid sigma.")) raise ValueError(about._errors.cstring("ERROR: invalid sigma."))
if gc['use_healpy']: if gc['use_healpy']:
return hp.smoothalm(x, fwhm=0.0, sigma=sigma, return distributed_data_object(hp.smoothalm(x, fwhm=0.0, sigma=sigma,
pol=True, mmax=self.paradict['mmax'], pol=True, mmax=self.paradict['mmax'],
verbose=False, inplace=False) verbose=False, inplace=False))
else: else:
return gl.smoothalm(x, lmax=self.paradict['lmax'], return distributed_data_object(gl.smoothalm(x, lmax=self.paradict['lmax'],
mmax=self.paradict['mmax'], mmax=self.paradict['mmax'],
fwhm=0.0, sigma=sigma, overwrite=False) fwhm=0.0, sigma=sigma, overwrite=False))
def calc_power(self, x, **kwargs): def calc_power(self, x, **kwargs):
""" """
...@@ -663,17 +662,17 @@ class lm_space(point_space): ...@@ -663,17 +662,17 @@ class lm_space(point_space):
# power spectrum # power spectrum
if self.dtype == np.dtype('complex64'): if self.dtype == np.dtype('complex64'):
if gc['use_libsharp']: if gc['use_libsharp']:
return gl.anaalm_f(x, lmax=lmax, mmax=mmax) return distributed_data_object(gl.anaalm_f(x, lmax=lmax, mmax=mmax))
else: else:
return hp.alm2cl(x.astype(np.complex128), alms2=None, return distributed_data_object(hp.alm2cl(x.copy(dtype=np.complex128), alms2=None,
lmax=lmax, mmax=mmax, lmax_out=lmax, lmax=lmax, mmax=mmax, lmax_out=lmax,
nspec=None).astype(np.float32) nspec=None).copy(dtype=np.float32))
else: else:
if gc['use_healpy']: if gc['use_healpy']:
return hp.alm2cl(x, alms2=None, lmax=lmax, mmax=mmax, return distributed_data_object(hp.alm2cl(np.array(x), alms2=None, lmax=lmax, mmax=mmax,
lmax_out=lmax, nspec=None) lmax_out=lmax, nspec=None))
else: else:
return gl.anaalm(x, lmax=lmax, mmax=mmax) return distributed_data_object(gl.anaalm(x, lmax=lmax, mmax=mmax))
def get_plot(self, x, title="", vmin=None, vmax=None, power=True, def get_plot(self, x, title="", vmin=None, vmax=None, power=True,
norm=None, cmap=None, cbar=True, other=None, legend=False, norm=None, cmap=None, cbar=True, other=None, legend=False,
...@@ -964,15 +963,13 @@ class gl_space(point_space): ...@@ -964,15 +963,13 @@ class gl_space(point_space):
# set datamodel # set datamodel
if datamodel not in ['not']: if datamodel not in ['not']:
about.warnings.cprint("WARNING: datamodel set to default.") about.warnings.cprint("WARNING: datamodel set to default.")
self.datamodel = 'not' self.datamodel = datamodel
else:
self.datamodel = datamodel
self.discrete = False self.discrete = False
self.harmonic = False self.harmonic = False
self.distances = tuple(gl.vol(self.paradict['nlat'], self.distances = tuple(distributed_data_object(gl.vol(self.paradict['nlat'],
nlon=self.paradict['nlon'] nlon=self.paradict['nlon']
).astype(np.float)) ).astype(np.float)))
self.comm = self._parse_comm(comm) self.comm = self._parse_comm(comm)
@property @property
...@@ -1044,18 +1041,6 @@ class gl_space(point_space): ...@@ -1044,18 +1041,6 @@ class gl_space(point_space):
mol = self.cast(1, dtype=np.float) mol = self.cast(1, dtype=np.float)
return self.calc_weight(mol, power=1) return self.calc_weight(mol, power=1)
#TODO: Check with Theo!
def _cast_to_d2o(self, x, dtype=None, hermitianize=True, **kwargs):
casted_x = super(gl_space, self)._cast_to_d2o(x=x,
dtype=dtype,
**kwargs)
complexity_mask = distributed_data_object.iscomplex(casted_x[:self.paradict['nlat']+1])
if distributed_data_object.any(complexity_mask):
about.warnings.cprint("WARNING: Taking the absolute values for " +
"all complex entries where lmax==0")
casted_x[complexity_mask] = distributed_data_object.__abs__(casted_x[complexity_mask])
return casted_x
# TODO: Extend to binning/log # TODO: Extend to binning/log
def enforce_power(self, spec, size=None, kindex=None): def enforce_power(self, spec, size=None, kindex=None):
if kindex is None: if kindex is None:
...@@ -1192,13 +1177,13 @@ class gl_space(point_space): ...@@ -1192,13 +1177,13 @@ class gl_space(point_space):
nlon = self.paradict['nlon'] nlon = self.paradict['nlon']
lmax = nlat - 1 lmax = nlat - 1
if self.dtype == np.dtype('float32'): if self.dtype == np.dtype('float32'):
x = gl.synfast_f(arg['spec'], x = distributed_data_object(gl.synfast_f(arg['spec'],
nlat=nlat, nlon=nlon, nlat=nlat, nlon=nlon,
lmax=lmax, mmax=lmax, alm=False) lmax=lmax, mmax=lmax, alm=False))
else: else:
x = gl.synfast(arg['spec'], x = distributed_data_object(gl.synfast(arg['spec'],
nlat=nlat, nlon=nlon, nlat=nlat, nlon=nlon,
lmax=lmax, mmax=lmax, alm=False) lmax=lmax, mmax=lmax, alm=False))
# weight if discrete # weight if discrete
if self.discrete: if self.discrete:
x = self.calc_weight(x, power=0.5) x = self.calc_weight(x, power=0.5)
...@@ -1295,18 +1280,18 @@ class gl_space(point_space): ...@@ -1295,18 +1280,18 @@ class gl_space(point_space):
mmax = codomain.paradict['mmax'] mmax = codomain.paradict['mmax']
if self.dtype == np.dtype('float32'): if self.dtype == np.dtype('float32'):
Tx = gl.map2alm_f(x, Tx = distributed_data_object(gl.map2alm_f(np.array(x),
nlat=nlat, nlon=nlon, nlat=nlat, nlon=nlon,
lmax=lmax, mmax=mmax) lmax=lmax, mmax=mmax))
else: else:
Tx = gl.map2alm(x, Tx = distributed_data_object(gl.map2alm(np.array(x),
nlat=nlat, nlon=nlon, nlat=nlat, nlon=nlon,
lmax=lmax, mmax=mmax) lmax=lmax, mmax=mmax))
else: else:
raise ValueError(about._errors.cstring( raise ValueError(about._errors.cstring(
"ERROR: unsupported transformation.")) "ERROR: unsupported transformation."))
return Tx.astype(codomain.dtype) return Tx.copy(dtype=codomain.dtype)
def calc_smooth(self, x, sigma=0, **kwargs): def calc_smooth(self, x, sigma=0, **kwargs):
""" """
...@@ -1338,10 +1323,10 @@ class gl_space(point_space): ...@@ -1338,10 +1323,10 @@ class gl_space(point_space):
raise ValueError(about._errors.cstring("ERROR: invalid sigma.")) raise ValueError(about._errors.cstring("ERROR: invalid sigma."))
# smooth # smooth
nlat = self.paradict['nlat'] nlat = self.paradict['nlat']
return gl.smoothmap(x, return distributed_data_object(gl.smoothmap(x,
nlat=nlat, nlon=self.paradict['nlon'], nlat=nlat, nlon=self.paradict['nlon'],
lmax=nlat - 1, mmax=nlat - 1, lmax=nlat - 1, mmax=nlat - 1,
fwhm=0.0, sigma=sigma) fwhm=0.0, sigma=sigma))
def calc_power(self, x, **kwargs): def calc_power(self, x, **kwargs):
""" """
...@@ -1368,15 +1353,15 @@ class gl_space(point_space): ...@@ -1368,15 +1353,15 @@ class gl_space(point_space):
lmax = nlat - 1 lmax = nlat - 1
mmax = nlat - 1 mmax = nlat - 1
if self.dtype == np.dtype('float32'): if self.dtype == np.dtype('float32'):
return gl.anafast_f(x, return distributed_data_object(gl.anafast_f(np.array(x),
nlat=nlat, nlon=nlon, nlat=nlat, nlon=nlon,
lmax=lmax, mmax=mmax, lmax=lmax, mmax=mmax,
alm=False) alm=False))
else: else:
return gl.anafast(x, return distributed_data_object(gl.anafast(np.array(x),
nlat=nlat, nlon=nlon, nlat=nlat, nlon=nlon,
lmax=lmax, mmax=mmax, lmax=lmax, mmax=mmax,
alm=False) alm=False))
def get_plot(self, x, title="", vmin=None, vmax=None, power=False, def get_plot(self, x, title="", vmin=None, vmax=None, power=False,
unit="", norm=None, cmap=None, cbar=True, other=None, unit="", norm=None, cmap=None, cbar=True, other=None,
...@@ -1497,7 +1482,7 @@ class gl_space(point_space): ...@@ -1497,7 +1482,7 @@ class gl_space(point_space):
n_ = ln(vmin=vmin, vmax=vmax) n_ = ln(vmin=vmin, vmax=vmax)
else: else:
n_ = None n_ = None
sub = ax0.pcolormesh(lon, lat, np.roll(x.reshape((self.para[0], self.para[1]), order='C'), self.para[ sub = ax0.pcolormesh(lon, lat, np.roll(np.array(x).reshape((self.para[0], self.para[1]), order='C'), self.para[
1] // 2, axis=1)[::-1, ::-1], cmap=cmap, norm=n_, vmin=vmin, vmax=vmax) 1] // 2, axis=1)[::-1, ::-1], cmap=cmap, norm=n_, vmin=vmin, vmax=vmax)
ax0.set_xlim(-180, 180) ax0.set_xlim(-180, 180)
ax0.set_ylim(-90, 90) ax0.set_ylim(-90, 90)
...@@ -1612,9 +1597,7 @@ class hp_space(point_space): ...@@ -1612,9 +1597,7 @@ class hp_space(point_space):
# set datamodel # set datamodel
if datamodel not in ['not']: if datamodel not in ['not']:
about.warnings.cprint("WARNING: datamodel set to default.") about.warnings.cprint("WARNING: datamodel set to default.")
self.datamodel = 'not' self.datamodel = datamodel
else:
self.datamodel = datamodel
self.discrete = False self.discrete = False
self.harmonic = False self.harmonic = False
...@@ -1809,8 +1792,8 @@ class hp_space(point_space): ...@@ -1809,8 +1792,8 @@ class hp_space(point_space):
elif arg['random'] == "syn": elif arg['random'] == "syn":
nside = self.paradict['nside'] nside = self.paradict['nside']
lmax = 3*nside-1 lmax = 3*nside-1
x = hp.synfast(arg['spec'], nside, lmax=lmax, mmax=lmax, alm=False, x = distributed_data_object(hp.synfast(arg['spec'], nside, lmax=lmax, mmax=lmax, alm=False,
pol=True, pixwin=False, fwhm=0.0, sigma=None) pol=True, pixwin=False, fwhm=0.0, sigma=None))
# weight if discrete # weight if discrete
if self.discrete: if self.discrete:
x = self.calc_weight(x, power=0.5) x = self.calc_weight(x, power=0.5)
...@@ -1866,17 +1849,17 @@ class hp_space(point_space): ...@@ -1866,17 +1849,17 @@ class hp_space(point_space):
if self.discrete: if self.discrete:
x = self.calc_weight(x, power=-0.5) x = self.calc_weight(x, power=-0.5)
# transform # transform
Tx = hp.map2alm(x.astype(np.float64, copy=False), Tx = distributed_data_object(hp.map2alm(x.copy(dtype=np.float64),
lmax=codomain.paradict['lmax'], lmax=codomain.paradict['lmax'],
mmax=codomain.paradict['mmax'], mmax=codomain.paradict['mmax'],
iter=niter, pol=True, use_weights=False, iter=niter, pol=True, use_weights=False,
datapath=None) datapath=None))
else: else:
raise ValueError(about._errors.cstring( raise ValueError(about._errors.cstring(
"ERROR: unsupported transformation.")) "ERROR: unsupported transformation."))
return Tx.astype(codomain.dtype) return Tx.copy(dtype=codomain.dtype)
def calc_smooth(self, x, sigma=0, niter=0, **kwargs): def calc_smooth(self, x, sigma=0, niter=0, **kwargs):
""" """
...@@ -1918,9 +1901,9 @@ class hp_space(point_space): ...@@ -1918,9 +1901,9 @@ class hp_space(point_space):
# smooth # smooth
lmax = 3*nside-1 lmax = 3*nside-1
mmax = lmax mmax = lmax
return hp.smoothing(x, fwhm=0.0, sigma=sigma, pol=True, return distributed_data_object(hp.smoothing(x, fwhm=0.0, sigma=sigma, pol=True,
iter=niter, lmax=lmax, mmax=mmax, iter=niter, lmax=lmax, mmax=mmax,
use_weights=False, datapath=None) use_weights=False, datapath=None))
def calc_power(self, x, niter=0, **kwargs): def calc_power(self, x, niter=0, **kwargs):
""" """
...@@ -1952,9 +1935,9 @@ class hp_space(point_space): ...@@ -1952,9 +1935,9 @@ class hp_space(point_space):
lmax = 3*nside-1 lmax = 3*nside-1
mmax = lmax mmax = lmax
# power spectrum # power spectrum
return hp.anafast(x, map2=None, nspec=None, lmax=lmax, mmax=mmax, return distributed_data_object(hp.anafast(x, map2=None, nspec=None, lmax=lmax, mmax=mmax,
iter=niter, alm=False, pol=True, use_weights=False, iter=niter, alm=False, pol=True, use_weights=False,
datapath=None) datapath=None))
def get_plot(self, x, title="", vmin=None, vmax=None, power=False, unit="", def get_plot(self, x, title="", vmin=None, vmax=None, power=False, unit="",
norm=None, cmap=None, cbar=True, other=None, legend=False, norm=None, cmap=None, cbar=True, other=None, legend=False,
......
...@@ -249,7 +249,7 @@ class power_indices(object): ...@@ -249,7 +249,7 @@ class power_indices(object):
return result return result
def _compute_indices(self, nkdict): def _compute_indices(self, nkdict):
if self.datamodel == 'np': if self.datamodel in ['np','not']:
return self._compute_indices_np(nkdict) return self._compute_indices_np(nkdict)
elif self.datamodel in self.allowed_distribution_strategies: elif self.datamodel in self.allowed_distribution_strategies:
return self._compute_indices_d2o(nkdict) return self._compute_indices_d2o(nkdict)
......
...@@ -1161,6 +1161,9 @@ class diagonal_operator(operator): ...@@ -1161,6 +1161,9 @@ class diagonal_operator(operator):
# Set the diag-val # Set the diag-val
self.val = self.domain.cast(new_diag) self.val = self.domain.cast(new_diag)
# Set the bare-val #TODO Check with Theo
self.bare = bare
# Weight if necessary # Weight if necessary
if not self.domain.discrete and bare: if not self.domain.discrete and bare:
self.val = self.domain.calc_weight(self.val, power=1) self.val = self.domain.calc_weight(self.val, power=1)
...@@ -1606,7 +1609,7 @@ class diagonal_operator(operator): ...@@ -1606,7 +1609,7 @@ class diagonal_operator(operator):
class identity_operator(diagonal_operator): class identity_operator(diagonal_operator):
def __init__(self, domain, codomain=None, bare=False): def __init__(self, domain, codomain=None, bare=False):
super(diagonal_operator, self).__init__(domain=domain, super(identity_operator, self).__init__(domain=domain,
codomain=codomain, codomain=codomain,
diag=1, diag=1,
bare=bare) bare=bare)
......
...@@ -179,8 +179,8 @@ class Test_Operator_Base_Class(unittest.TestCase): ...@@ -179,8 +179,8 @@ class Test_Operator_Base_Class(unittest.TestCase):
testcase_func_name=custom_name_func) testcase_func_name=custom_name_func)
def test_successfull_init_and_methods(self, name): def test_successfull_init_and_methods(self, name):
op = generate_operator(name) op = generate_operator(name)
assert(callable(op.__set_val__)) assert(callable(op.set_val))
assert(callable(op.__get_val__)) assert(callable(op.get_val))
assert(callable(op._multiply)) assert(callable(op._multiply))
assert(callable(op._adjoint_multiply)) assert(callable(op._adjoint_multiply))
assert(callable(op._inverse_multiply)) assert(callable(op._inverse_multiply))
...@@ -199,7 +199,7 @@ class Test_Operator_Base_Class(unittest.TestCase): ...@@ -199,7 +199,7 @@ class Test_Operator_Base_Class(unittest.TestCase):
assert(callable(op.diag)) assert(callable(op.diag))
assert(callable(op.det)) assert(callable(op.det))
assert(callable(op.inverse_det)) assert(callable(op.inverse_det))
assert(callable(op.log_et)) assert(callable(op.log_det))
assert(callable(op.tr_log)) assert(callable(op.tr_log))
assert(callable(op.hat)) assert(callable(op.hat))
assert(callable(op.inverse_hat)) assert(callable(op.inverse_hat))
......
...@@ -1023,18 +1023,22 @@ class Test_Lm_Space(unittest.TestCase): ...@@ -1023,18 +1023,22 @@ class Test_Lm_Space(unittest.TestCase):
DATAMODELS['lm_space']), DATAMODELS['lm_space']),
testcase_func_name=custom_name_func) testcase_func_name=custom_name_func)
def test_successfull_init(self, lmax, mmax, dtype, datamodel): def test_successfull_init(self, lmax, mmax, dtype, datamodel):
l = lm_space(lmax, mmax=mmax, dtype=dtype, datamodel=datamodel) if datamodel in ['not']:
assert(isinstance(l.harmonic, bool)) l = lm_space(lmax, mmax=mmax, dtype=dtype, datamodel=datamodel)
assert_equal(l.paradict['lmax'], lmax) assert(isinstance(l.harmonic, bool))
if mmax is None or mmax > lmax: assert_equal(l.paradict['lmax'], lmax)
assert_equal(l.paradict['mmax'], lmax) if mmax is None or mmax > lmax:
assert_equal(l.paradict['mmax'], lmax)
else:
assert_equal(l.paradict['mmax'], mmax)
assert_equal(l.dtype, dtype)
assert_equal(l.datamodel, datamodel)
assert_equal(l.discrete, True)
assert_equal(l.harmonic, True)
assert_equal(l.distances, (np.float(1),))
else: else:
assert_equal(l.paradict['mmax'], mmax) with assert_raises(NotImplementedError): lm_space(lmax, mmax=mmax, dtype=dtype, datamodel=datamodel)
assert_equal(l.dtype, dtype)
assert_equal(l.datamodel, datamodel)
assert_equal(l.discrete, True)
assert_equal(l.harmonic, True)
assert_equal(l.distances, (np.float(1),))
############################################################################### ###############################################################################
...@@ -1077,18 +1081,21 @@ class Test_Lm_Space(unittest.TestCase): ...@@ -1077,18 +1081,21 @@ class Test_Lm_Space(unittest.TestCase):
def test_enforce_power(self, datamodel): def test_enforce_power(self, datamodel):
lmax = 17 lmax = 17
mmax = 12 mmax = 12
l = lm_space(lmax, mmax=mmax, datamodel=datamodel) if datamodel in ['not']:
l = lm_space(lmax, mmax=mmax, datamodel=datamodel)
assert_equal(l.enforce_power(2),
np.ones(18)*2) assert_equal(l.enforce_power(2),
assert_almost_equal( np.ones(18)*2)
l.enforce_power(lambda x: 42 / (1 + x)**5), assert_almost_equal(
np.array([ 4.20000000e+01, 1.31250000e+00, 1.72839506e-01, l.enforce_power(lambda x: 42 / (1 + x)**5),
4.10156250e-02, 1.34400000e-02, 5.40123457e-03, np.array([ 4.20000000e+01, 1.31250000e+00, 1.72839506e-01,
2.49895877e-03, 1.28173828e-03, 7.11273688e-04, 4.10156250e-02, 1.34400000e-02, 5.40123457e-03,
4.20000000e-04, 2.60786956e-04, 1.68788580e-04, 2.49895877e-03, 1.28173828e-03, 7.11273688e-04,
1.13118211e-04, 7.80924615e-05, 5.53086420e-05, 4.20000000e-04, 2.60786956e-04, 1.68788580e-04,
4.00543213e-05, 2.95804437e-05, 2.22273027e-05])) 1.13118211e-04, 7.80924615e-05, 5.53086420e-05,
4.00543213e-05, 2.95804437e-05, 2.22273027e-05]))
else:
with assert_raises(NotImplementedError): lm_space(lmax, mmax=mmax, datamodel=datamodel)
##############################################################################