Commit 2fcc20f5 authored by Philipp Haim's avatar Philipp Haim

add_fluctuations compatible with DomainTuple

parent 4fa63fe3
Pipeline #63500 failed with stages
in 4 minutes and 54 seconds
...@@ -17,6 +17,7 @@ ...@@ -17,6 +17,7 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik. # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import numpy as np import numpy as np
from functools import reduce
from numpy.testing import assert_allclose from numpy.testing import assert_allclose
from ..domain_tuple import DomainTuple from ..domain_tuple import DomainTuple
...@@ -35,23 +36,38 @@ from ..operators.diagonal_operator import DiagonalOperator ...@@ -35,23 +36,38 @@ from ..operators.diagonal_operator import DiagonalOperator
from ..operators.operator import Operator from ..operators.operator import Operator
from ..operators.simple_linear_operators import VdotOperator, ducktape from ..operators.simple_linear_operators import VdotOperator, ducktape
from ..operators.value_inserter import ValueInserter from ..operators.value_inserter import ValueInserter
from ..sugar import from_global_data, from_random, full, makeDomain from ..sugar import from_global_data, from_random, full, makeDomain, get_default_codomain
#FIXME for all space operators, check if it is valid first (e.g. parse_space()) def _reshaper(domain, x, space):
shape = reduce(lambda x,y: x+y,
def _lognormal_moment_matching(mean, sig, key): (domain[i].shape for i in range(len(domain)) if i != space),())
mean, sig, key = float(mean), float(sig), str(key) x = np.array(x)
assert mean > 0 if x.shape == shape:
assert sig > 0 return np.asfarray(x)
elif x.shape in [(), (1,)]:
return np.full(shape, x, dtype=np.float)
else:
raise TypeError("Shape of parameters cannot be interpreted")
def _lognormal_moment_matching(mean, sig, key,
domain = DomainTuple.scalar_domain(), space = 0):
domain = makeDomain(domain)
mean, sig = (_reshaper(domain, param, space) for param in (mean, sig))
key = str(key)
assert np.all(mean > 0)
assert np.all(sig > 0)
logsig = np.sqrt(np.log((sig/mean)**2 + 1)) logsig = np.sqrt(np.log((sig/mean)**2 + 1))
logmean = np.log(mean) - logsig**2/2 logmean = np.log(mean) - logsig**2/2
return _normal(logmean, logsig, key).exp() return _normal(logmean, logsig, key, domain).exp()
def _normal(mean, sig, key): def _normal(mean, sig, key,
assert sig > 0 domain = DomainTuple.scalar_domain(), space = 0):
return Adder(Field.scalar(mean)) @ ( domain = makeDomain(domain)
sig*ducktape(DomainTuple.scalar_domain(), None, key)) mean, sig = (_reshaper(domain, param, space) for param in (mean, sig))
assert np.all(sig > 0)
return Adder(from_global_data(domain, mean)) @ (
sig*ducktape(domain, None, key))
class _SlopeRemover(EndomorphicOperator): class _SlopeRemover(EndomorphicOperator):
...@@ -92,7 +108,7 @@ def _log_k_lengths(pspace): ...@@ -92,7 +108,7 @@ def _log_k_lengths(pspace):
return np.log(pspace.k_lengths[1:]) return np.log(pspace.k_lengths[1:])
class _TwoLogIntegrations(LinearOperator): class _TwoLogIntegrations(LinearOperator):
def __init__(self, target, space = 0): def __init__(self, target, space = None):
self._target = makeDomain(target) self._target = makeDomain(target)
assert isinstance(self.target[space], PowerSpace) assert isinstance(self.target[space], PowerSpace)
dom = list(self._target) dom = list(self._target)
...@@ -134,15 +150,17 @@ class _TwoLogIntegrations(LinearOperator): ...@@ -134,15 +150,17 @@ class _TwoLogIntegrations(LinearOperator):
class _Normalization(Operator): class _Normalization(Operator):
def __init__(self, domain): def __init__(self, domain, space = 0):
self._domain = self._target = makeDomain(domain) self._domain = self._target = makeDomain(domain)
hspace = self._domain[0].harmonic_partner hspace = list(self._domain)
pd = PowerDistributor(hspace, power_space=self._domain[0]) hspace[space] = hspace[space].harmonic_partner
hspace = makeDomain(hspace)
pd = PowerDistributor(hspace, power_space=self._domain[space], space = space)
# TODO Does not work on sphere yet # TODO Does not work on sphere yet
cst = pd.adjoint(full(pd.target, 1.)).to_global_data_rw() mode_multiplicity = pd.adjoint(full(pd.target, 1.)).to_global_data_rw()
cst[0] = 0 mode_multiplicity[0] = 0
self._cst = from_global_data(self._domain, cst) self._mode_multiplicity = from_global_data(self._domain, mode_multiplicity)
self._specsum = _SpecialSum(self._domain) self._specsum = _SpecialSum(self._domain, space)
# FIXME Move to tests # FIXME Move to tests
consistency_check(self._specsum) consistency_check(self._specsum)
...@@ -152,17 +170,19 @@ class _Normalization(Operator): ...@@ -152,17 +170,19 @@ class _Normalization(Operator):
spec = (2*x).exp() spec = (2*x).exp()
# FIXME This normalizes also the zeromode which is supposed to be left # FIXME This normalizes also the zeromode which is supposed to be left
# untouched by this operator # untouched by this operator
return self._specsum(self._cst*spec)**(-0.5)*amp return self._specsum(self._mode_multiplicity*spec)**(-0.5)*amp
class _SpecialSum(EndomorphicOperator): class _SpecialSum(EndomorphicOperator):
def __init__(self, domain): def __init__(self, domain, space = 0):
self._domain = makeDomain(domain) self._domain = makeDomain(domain)
self._capability = self.TIMES | self.ADJOINT_TIMES self._capability = self.TIMES | self.ADJOINT_TIMES
self._contractor = ContractionOperator(domain, space)
self._zero_mode = (slice(None),)*domain.axes[space][0] + (0,)
def apply(self, x, mode): def apply(self, x, mode):
self._check_input(x, mode) self._check_input(x, mode)
return full(self._tgt(mode), x.sum()) return self._contractor.adjoint(self._contractor(x))
class CorrelatedFieldMaker: class CorrelatedFieldMaker:
...@@ -197,14 +217,8 @@ class CorrelatedFieldMaker: ...@@ -197,14 +217,8 @@ class CorrelatedFieldMaker:
sqrt_t = DiagonalOperator(sqrt_t, twolog.domain, spaces = space) sqrt_t = DiagonalOperator(sqrt_t, twolog.domain, spaces = space)
sigmasq = sqrt_t @ expander @ flexibility sigmasq = sqrt_t @ expander @ flexibility
#FIXME apply dist in asperity target domain, as it is smaller
#dist = np.zeros(asperity.target.shape)
#dist[first] = 1.
#dist = from_global_data(asperity.target, dist)
#dist = DiagonalOperator(dist, asperity.target, spaces = space)
#scale = sigmasq*(Adder(shift) @ expander @ dist @ asperity).sqrt()
dist = np.zeros(twolog.domain.shape) dist = np.zeros(twolog.domain.shape)
dist += 1. dist[first] += 1.
dist = from_global_data(twolog.domain, dist) dist = from_global_data(twolog.domain, dist)
dist = DiagonalOperator(dist, twolog.domain, spaces = space) dist = DiagonalOperator(dist, twolog.domain, spaces = space)
...@@ -215,7 +229,6 @@ class CorrelatedFieldMaker: ...@@ -215,7 +229,6 @@ class CorrelatedFieldMaker:
smooth = twolog @ (scale*ducktape(scale.target, None, key)) smooth = twolog @ (scale*ducktape(scale.target, None, key))
smoothslope = _make_slope_Operator(smooth,loglogavgslope) smoothslope = _make_slope_Operator(smooth,loglogavgslope)
#smoothslope = smooth
# move to tests # move to tests
assert_allclose( assert_allclose(
...@@ -251,28 +264,19 @@ class CorrelatedFieldMaker: ...@@ -251,28 +264,19 @@ class CorrelatedFieldMaker:
def add_fluctuations(self, target, fluctuations_mean, fluctuations_stddev, def add_fluctuations(self, target, fluctuations_mean, fluctuations_stddev,
flexibility_mean, flexibility_stddev, asperity_mean, flexibility_mean, flexibility_stddev, asperity_mean,
asperity_stddev, loglogavgslope_mean, asperity_stddev, loglogavgslope_mean,
loglogavgslope_stddev, prefix): loglogavgslope_stddev, prefix, space = 0):
fluctuations_mean = float(fluctuations_mean)
fluctuations_stddev = float(fluctuations_stddev)
flexibility_mean = float(flexibility_mean)
flexibility_stddev = float(flexibility_stddev)
asperity_mean = float(asperity_mean)
asperity_stddev = float(asperity_stddev)
loglogavgslope_mean = float(loglogavgslope_mean)
loglogavgslope_stddev = float(loglogavgslope_stddev)
prefix = str(prefix) prefix = str(prefix)
fluct = _lognormal_moment_matching(fluctuations_mean, fluct = _lognormal_moment_matching(fluctuations_mean, fluctuations_stddev,
fluctuations_stddev, prefix + 'fluctuations', space)
prefix + 'fluctuations')
flex = _lognormal_moment_matching(flexibility_mean, flexibility_stddev, flex = _lognormal_moment_matching(flexibility_mean, flexibility_stddev,
prefix + 'flexibility') prefix + 'flexibility', space)
asp = _lognormal_moment_matching(asperity_mean, asperity_stddev, asp = _lognormal_moment_matching(asperity_mean, asperity_stddev,
prefix + 'asperity') prefix + 'asperity', space)
avgsl = _normal(loglogavgslope_mean, loglogavgslope_stddev, avgsl = _normal(loglogavgslope_mean, loglogavgslope_stddev,
prefix + 'loglogavgslope') prefix + 'loglogavgslope', space)
self.add_fluctuations_from_ops(target, fluct, flex, asp, avgsl, self.add_fluctuations_from_ops(target, fluct, flex, asp, avgsl,
prefix + 'spectrum') prefix + 'spectrum', space)
def finalize_from_op(self, zeromode): def finalize_from_op(self, zeromode):
raise NotImplementedError raise NotImplementedError
...@@ -285,12 +289,9 @@ class CorrelatedFieldMaker: ...@@ -285,12 +289,9 @@ class CorrelatedFieldMaker:
""" """
offset vs zeromode: volume factor offset vs zeromode: volume factor
""" """
offset_amplitude_stddev = float(offset_amplitude_stddev)
offset_amplitude_mean = float(offset_amplitude_mean)
assert offset_amplitude_stddev > 0
assert offset_amplitude_mean > 0
if offset is not None: if offset is not None:
offset = float(offset) offset = float(offset)
#TODO correct hspace
hspace = makeDomain( hspace = makeDomain(
[dd.target[0].harmonic_partner for dd in self._amplitudes]) [dd.target[0].harmonic_partner for dd in self._amplitudes])
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment