Commit 13f0318c authored by Martin Reinecke's avatar Martin Reinecke
Browse files

Merge branch 'NIFTy_5' into default_codomain_sugar

parents fbe2abd2 1f84d24e
......@@ -29,26 +29,23 @@ def get_random_LOS(n_los):
if __name__ == '__main__':
# FIXME description of the tutorial
np.random.seed(42)
np.seterr(all='raise')
position_space = ift.RGSpace([128, 128])
# Setting up an amplitude model
A = ift.AmplitudeModel(position_space, 16, 1, 10, -4., 1, 0., 1.)
dummy = ift.from_random('normal', A.domain)
A = ift.AmplitudeModel(position_space, 64, 3, 0.4, -4., 1, 1., 1.)
# Building the model for a correlated signal
harmonic_space = position_space.get_default_codomain()
ht = ift.HarmonicTransformOperator(harmonic_space, position_space)
power_space = A.target[0]
power_distributor = ift.PowerDistributor(harmonic_space, power_space)
dummy = ift.Field.from_random('normal', harmonic_space)
domain = ift.MultiDomain.union((A.domain,
ift.MultiDomain.make({
'xi': harmonic_space
})))
correlated_field = ht(power_distributor(A)*ift.FieldAdapter(domain, "xi"))
vol = harmonic_space.scalar_dvol
vol = ift.ScalingOperator(vol ** (-0.5),harmonic_space)
correlated_field = ht(vol(power_distributor(A))*ift.FieldAdapter(harmonic_space, "xi"))
# alternatively to the block above one can do:
# correlated_field = ift.CorrelatedField(position_space, A)
#correlated_field = ift.CorrelatedField(position_space, A)
# apply some nonlinearity
signal = ift.positive_tanh(correlated_field)
......@@ -64,7 +61,7 @@ if __name__ == '__main__':
N = ift.ScalingOperator(noise, data_space)
# generate mock data
MOCK_POSITION = ift.from_random('normal', domain)
MOCK_POSITION = ift.from_random('normal', signal_response.domain)
data = signal_response(MOCK_POSITION) + N.draw_sample()
# set up model likelihood
......@@ -79,13 +76,13 @@ if __name__ == '__main__':
# build model Hamiltonian
H = ift.Hamiltonian(likelihood, ic_sampling)
INITIAL_POSITION = ift.from_random('normal', domain)
INITIAL_POSITION = ift.from_random('normal', H.domain)
position = INITIAL_POSITION
plot = ift.Plot()
plot.add(signal(MOCK_POSITION), title='Ground Truth')
plot.add(R.adjoint_times(data), title='Data')
plot.add([A(MOCK_POSITION.extract(A.domain))], title='Power Spectrum')
plot.add([A.force(MOCK_POSITION)], title='Power Spectrum')
plot.output(ny=1, nx=3, xsize=24, ysize=6, name="setup.png")
# number of samples used to estimate the KL
......@@ -97,14 +94,10 @@ if __name__ == '__main__':
plot = ift.Plot()
plot.add(signal(KL.position), title="reconstruction")
plot.add(
[
A(KL.position.extract(A.domain)),
A(MOCK_POSITION.extract(A.domain))
],
title="power")
plot.add([A.force(KL.position), A.force(MOCK_POSITION)], title="power")
plot.output(ny=1, ysize=6, xsize=16, name="loop.png")
KL = ift.KL_Energy(position, H, N_samples)
plot = ift.Plot()
sc = ift.StatCalculator()
for sample in KL.samples:
......@@ -112,9 +105,8 @@ if __name__ == '__main__':
plot.add(sc.mean, title="Posterior Mean")
plot.add(ift.sqrt(sc.var), title="Posterior Standard Deviation")
powers = [A((s + KL.position).extract(A.domain)) for s in KL.samples]
powers = [A.force(s + KL.position) for s in KL.samples]
plot.add(
[A(KL.position.extract(A.domain)),
A(MOCK_POSITION.extract(A.domain))] + powers,
[A.force(KL.position), A.force(MOCK_POSITION)] + powers,
title="Sampled Posterior Power Spectrum")
plot.output(ny=1, nx=3, xsize=24, ysize=6, name="results.png")
......@@ -84,7 +84,7 @@ d_space = R.target
d = ift.from_global_data(d_space, y)
N = ift.DiagonalOperator(ift.from_global_data(d_space, var))
IC = ift.GradientNormController(tol_abs_gradnorm=1e-8)
IC = ift.DeltaEnergyController(tol_rel_deltaE=1e-12, iteration_limit=200)
likelihood = ift.GaussianEnergy(d, N)(R)
Ham = ift.Hamiltonian(likelihood, IC)
H = ift.EnergyAdapter(params, Ham, want_metric=True)
......
......@@ -24,6 +24,7 @@ from .operators.diagonal_operator import DiagonalOperator
from .operators.distributors import DOFDistributor, PowerDistributor
from .operators.domain_tuple_field_inserter import DomainTupleFieldInserter
from .operators.contraction_operator import ContractionOperator
from .operators.linear_interpolation import LinearInterpolator
from .operators.endomorphic_operator import EndomorphicOperator
from .operators.exp_transform import ExpTransform
from .operators.harmonic_operators import (
......@@ -34,6 +35,7 @@ from .operators.inversion_enabler import InversionEnabler
from .operators.laplace_operator import LaplaceOperator
from .operators.linear_operator import LinearOperator
from .operators.mask_operator import MaskOperator
from .operators.offset_operator import OffsetOperator
from .operators.qht_operator import QHTOperator
from .operators.regridding_operator import RegriddingOperator
from .operators.sampling_enabler import SamplingEnabler
......@@ -79,7 +81,8 @@ from .library.los_response import LOSResponse
from .library.wiener_filter_curvature import WienerFilterCurvature
from .library.correlated_fields import CorrelatedField, MfCorrelatedField
from .library.adjust_variances import make_adjust_variances
from .library.adjust_variances import (make_adjust_variances,
do_adjust_variances)
from . import extra
......
......@@ -28,7 +28,21 @@ from .structured_domain import StructuredDomain
class LogRGSpace(StructuredDomain):
"""NIFTy subclass for logarithmic Cartesian grids.
Parameters
----------
shape : int or tuple of int
Number of grid points or numbers of gridpoints along each axis.
bindistances : float or tuple of float
Distance between two grid points along each axis. These are
measured on logarithmic scale and are constant therfore.
t_0 : float or tuple of float
FIXME
harmonic : bool, optional
Whether the space represents a grid in position or harmonic space.
(default: False).
"""
_needed_for_hash = ['_shape', '_bindistances', '_t_0', '_harmonic']
def __init__(self, shape, bindistances, t_0, harmonic=False):
......@@ -41,8 +55,8 @@ class LogRGSpace(StructuredDomain):
self._bindistances = tuple(bindistances)
self._t_0 = tuple(t_0)
self._dim = int(reduce(lambda x, y: x * y, self._shape))
self._dvol = float(reduce(lambda x, y: x * y, self._bindistances))
self._dim = int(reduce(lambda x, y: x*y, self._shape))
self._dvol = float(reduce(lambda x, y: x*y, self._bindistances))
@property
def harmonic(self):
......@@ -69,31 +83,30 @@ class LogRGSpace(StructuredDomain):
return np.array(self._t_0)
def __repr__(self):
return ("LogRGSpace(shape={}, harmonic={})"
.format(self.shape, self.harmonic))
return ("LogRGSpace(shape={}, harmonic={})".format(
self.shape, self.harmonic))
def get_default_codomain(self):
codomain_bindistances = 1. / (self.bindistances * self.shape)
return LogRGSpace(self.shape, codomain_bindistances,
self._t_0, True)
codomain_bindistances = 1./(self.bindistances*self.shape)
return LogRGSpace(self.shape, codomain_bindistances, self._t_0, True)
def get_k_length_array(self):
ib = dobj.ibegin_from_shape(self._shape)
res = np.arange(self.local_shape[0], dtype=np.float64) + ib[0]
res = np.minimum(res, self.shape[0]-res)*self.bindistances[0]
if len(self.shape) == 1:
return Field.from_local_data(self, res)
res *= res
for i in range(1, len(self.shape)):
tmp = np.arange(self.local_shape[i], dtype=np.float64) + ib[i]
tmp = np.minimum(tmp, self.shape[i]-tmp)*self.bindistances[i]
tmp *= tmp
res = np.add.outer(res, tmp)
return Field.from_local_data(self, np.sqrt(res))
def get_expk_length_array(self):
# FIXME This is a hack! Only for plotting. Seems not to be the final version.
out = exp(self.get_k_length_array()).to_global_data_rw()
out[1:] = out[:-1]
out[0] = 0
return Field.from_global_data(self, out)
if not self.harmonic:
raise NotImplementedError
ks = self.get_k_array()
return Field.from_global_data(self, np.linalg.norm(ks, axis=0))
def get_k_array(self):
ndim = len(self.shape)
k_array = np.zeros((ndim,) + self.shape)
dist = self.bindistances
for i in range(ndim):
ks = np.zeros(self.shape[i])
ks[1:] = np.minimum(self.shape[i] - 1 - np.arange(self.shape[i]-1), np.arange(self.shape[i]-1)) * dist[i]
if self.harmonic:
ks[0] = np.nan
else:
ks[0] = -np.inf
ks[1:] += self.t_0[i]
k_array[i] += ks.reshape((1,)*i + (self.shape[i],) + (1,)*(ndim-i-1))
return k_array
......@@ -20,7 +20,7 @@ if _use_fftw:
_fft_extra_args = dict(planner_effort='FFTW_ESTIMATE', threads=nthreads)
else:
from numpy.fft import fftn, rfftn, ifftn
_fft_extra_args={}
_fft_extra_args = {}
def hartley(a, axes=None):
......
......@@ -19,11 +19,20 @@
from __future__ import absolute_import, division, print_function
from ..compat import *
from ..minimization.energy_adapter import EnergyAdapter
from ..multi_domain import MultiDomain
from ..multi_field import MultiField
from ..operators.distributors import PowerDistributor
from ..operators.energy_operators import Hamiltonian, InverseGammaLikelihood
from ..operators.scaling_operator import ScalingOperator
from ..operators.simple_linear_operators import FieldAdapter
def make_adjust_variances(a, xi, position, samples=[], scaling=None,
def make_adjust_variances(a,
xi,
position,
samples=[],
scaling=None,
ic_samp=None):
""" Creates a Hamiltonian for constant likelihood optimizations.
......@@ -57,13 +66,48 @@ def make_adjust_variances(a, xi, position, samples=[], scaling=None,
if n > 0:
d_eval = 0.
for i in range(n):
d_eval = d_eval + d(position + samples[i])
d_eval = d_eval + d.force(position + samples[i])
d_eval = d_eval/n
else:
d_eval = d(position)
d_eval = d.force(position)
x = (a.conjugate()*a).real
if scaling is not None:
x = ScalingOperator(scaling, x.target)(x)
return Hamiltonian(InverseGammaLikelihood(d_eval)(x), ic_samp=ic_samp)
def do_adjust_variances(position,
amplitude_model,
minimizer,
xi_key='xi',
samples=[]):
h_space = position[xi_key].domain[0]
pd = PowerDistributor(h_space, amplitude_model.target[0])
a = pd(amplitude_model)
xi = FieldAdapter(h_space, xi_key)
ham = make_adjust_variances(a, xi, position, samples=samples)
# Minimize
e = EnergyAdapter(
position.extract(a.domain), ham, constants=[], want_metric=True)
e, _ = minimizer(e)
# Update position
s_h_old = (a*xi).force(position)
position = position.to_dict()
position['xi'] = s_h_old/a(e.position)
position = MultiField.from_dict(position)
position = MultiField.union([position, e.position])
s_h_new = (a*xi).force(position)
import numpy as np
# TODO Move this into the tests
np.testing.assert_allclose(s_h_new.to_global_data(),
s_h_old.to_global_data())
return position
......@@ -23,13 +23,11 @@ import numpy as np
from ..compat import *
from ..domains.power_space import PowerSpace
from ..field import Field
from ..multi_domain import MultiDomain
from ..operators.operator import Operator
from ..sugar import makeOp, sqrt
def _ceps_kernel(dof_space, k, a, k0):
return a**2/(1+(k/(k0*dof_space.bindistances[0]))**2)**2
return a**2/(1 + (k/k0)**2)**2
def create_cepstrum_amplitude_field(domain, cepstrum):
......@@ -45,22 +43,12 @@ def create_cepstrum_amplitude_field(domain, cepstrum):
"""
dim = len(domain.shape)
dist = domain.bindistances
shape = domain.shape
# Prepare q_array
q_array = np.zeros((dim,) + shape)
if dim == 1:
ks = domain.get_k_length_array().to_global_data()
q_array = np.array([ks])
else:
for i in range(dim):
ks = np.minimum(shape[i] - np.arange(shape[i]) +
1, np.arange(shape[i])) * dist[i]
q_array[i] += ks.reshape((1,)*i + (shape[i],) + (1,)*(dim-i-1))
q_array = domain.get_k_array()
# Fill cepstrum field (all non-zero modes)
no_zero_modes = (slice(1, None),) * dim
no_zero_modes = (slice(1, None),)*dim
ks = q_array[(slice(None),) + no_zero_modes]
cepstrum_field = np.zeros(shape)
cepstrum_field[no_zero_modes] = cepstrum(ks)
......@@ -78,7 +66,55 @@ def create_cepstrum_amplitude_field(domain, cepstrum):
return Field.from_global_data(domain, cepstrum_field)
class AmplitudeModel(Operator):
def CepstrumOperator(logk_space, ceps_a, ceps_k, zero_mode=True):
'''
Parameters
----------
ceps_a, ceps_k0 : Smoothness parameters in ceps_kernel
eg. ceps_kernel(k) = (a/(1+(k/k0)**2))**2
a = ceps_a, k0 = ceps_k0
'''
from ..operators.qht_operator import QHTOperator
from ..operators.symmetrizing_operator import SymmetrizingOperator
qht = QHTOperator(target=logk_space)
dof_space = qht.domain[0]
sym = SymmetrizingOperator(logk_space)
kern = lambda k: _ceps_kernel(dof_space, k, ceps_a, ceps_k)
cepstrum = create_cepstrum_amplitude_field(dof_space, kern)
res = sym(qht(makeOp(sqrt(cepstrum))))
if not zero_mode:
shp = res.target.shape
mask = np.ones(shp)
mask[(0,)*len(shp)] = 0.
mask = makeOp(Field.from_global_data(res.target, mask))
res = mask(res)
return res
def SlopeModel(logk_space, sm, sv, im, iv):
'''
Parameters
----------
sm, sv : slope_mean = expected exponent of power law (e.g. -4),
slope_variance (default=1)
im, iv : y-intercept_mean, y-intercept_std of power_slope
'''
from ..operators.slope_operator import SlopeOperator
from ..operators.offset_operator import OffsetOperator
phi_mean = np.array([sm, im + sm*logk_space.t_0[0]])
phi_sig = np.array([sv, iv])
slope = SlopeOperator(logk_space)
phi_mean = Field.from_global_data(slope.domain, phi_mean)
phi_sig = Field.from_global_data(slope.domain, phi_sig)
return slope(OffsetOperator(phi_mean)(makeOp(phi_sig)))
def AmplitudeModel(s_space, Npixdof, ceps_a, ceps_k, sm, sv, im, iv,
keys=['tau', 'phi'], zero_mode=True):
'''
Computes a smooth power spectrum.
Output lives in PowerSpace.
......@@ -97,56 +133,20 @@ class AmplitudeModel(Operator):
im, iv : y-intercept_mean, y-intercept_variance of power_slope
'''
def __init__(self, s_space, Npixdof, ceps_a, ceps_k, sm, sv, im, iv,
keys=['tau', 'phi']):
from ..operators.exp_transform import ExpTransform
from ..operators.qht_operator import QHTOperator
from ..operators.slope_operator import SlopeOperator
from ..operators.symmetrizing_operator import SymmetrizingOperator
h_space = s_space.get_default_codomain()
self._exp_transform = ExpTransform(PowerSpace(h_space), Npixdof)
logk_space = self._exp_transform.domain[0]
qht = QHTOperator(target=logk_space)
dof_space = qht.domain[0]
sym = SymmetrizingOperator(logk_space)
phi_mean = np.array([sm, im])
phi_sig = np.array([sv, iv])
self._slope = SlopeOperator(logk_space, phi_sig)
self._norm_phi_mean = Field.from_global_data(self._slope.domain,
phi_mean/phi_sig)
self._domain = MultiDomain.make({keys[0]: dof_space,
keys[1]: self._slope.domain})
self._target = self._exp_transform.target
kern = lambda k: _ceps_kernel(dof_space, k, ceps_a, ceps_k)
cepstrum = create_cepstrum_amplitude_field(dof_space, kern)
self._smooth_op = sym(qht(makeOp(sqrt(cepstrum))))
self._keys = tuple(keys)
self._qht = qht
self._ceps = makeOp(sqrt(cepstrum))
def apply(self, x):
self._check_input(x)
smooth_spec = self._smooth_op(x[self._keys[0]])
phi = x[self._keys[1]] + self._norm_phi_mean
linear_spec = self._slope(phi)
loglog_spec = smooth_spec + linear_spec
return self._exp_transform((0.5*loglog_spec).exp())
@property
def qht(self):
return self._qht
@property
def ceps(self):
return self._ceps
@property
def norm_phi_mean(self):
return self._norm_phi_mean
from ..operators.exp_transform import ExpTransform
from ..operators.simple_linear_operators import FieldAdapter
from ..operators.scaling_operator import ScalingOperator
h_space = s_space.get_default_codomain()
et = ExpTransform(PowerSpace(h_space), Npixdof)
logk_space = et.domain[0]
smooth = CepstrumOperator(logk_space, ceps_a, ceps_k, zero_mode)
linear = SlopeModel(logk_space, sm, sv, im, iv)
fa_smooth = FieldAdapter(smooth.domain, keys[0])
fa_linear = FieldAdapter(linear.domain, keys[1])
fac = ScalingOperator(0.5, smooth.target)
return et((fac(smooth(fa_smooth) + linear(fa_linear))).exp())
......@@ -25,6 +25,7 @@ from ..operators.contraction_operator import ContractionOperator
from ..operators.distributors import PowerDistributor
from ..operators.harmonic_operators import HarmonicTransformOperator
from ..operators.simple_linear_operators import FieldAdapter
from ..operators.scaling_operator import ScalingOperator
def CorrelatedField(s_space, amplitude_model, name='xi'):
......@@ -45,7 +46,9 @@ def CorrelatedField(s_space, amplitude_model, name='xi'):
p_space = amplitude_model.target[0]
power_distributor = PowerDistributor(h_space, p_space)
A = power_distributor(amplitude_model)
return ht(A*FieldAdapter(MultiDomain.make({name: h_space}), name))
vol = h_space.scalar_dvol
vol = ScalingOperator(vol**(-0.5), h_space)
return ht(vol(A)*FieldAdapter(h_space, name))
def MfCorrelatedField(s_space_spatial, s_space_energy, amplitude_model_spatial,
......@@ -74,4 +77,4 @@ def MfCorrelatedField(s_space_spatial, s_space_energy, amplitude_model_spatial,
a_energy = dom_distr_energy(amplitude_model_energy)
a = a_spatial*a_energy
A = pd(a)
return ht(A*FieldAdapter(MultiDomain.make({name: h_space}), name))
return ht(A*FieldAdapter(h_space, name))
......@@ -30,7 +30,7 @@ from ..sugar import makeOp
class InverseGammaModel(Operator):
def __init__(self, domain, alpha, q):
def __init__(self, domain, alpha, q, delta=0.001):
"""Model which transforms a Gaussian into an inverse gamma distribution.
The pdf of the inverse gamma distribution is defined as follows:
......@@ -42,6 +42,9 @@ class InverseGammaModel(Operator):
The mean of the pdf is at q / (alpha - 1) if alpha > 1.
The mode is q / (alpha + 1).
This transformation is implemented as a linear interpolation which
maps a Gaussian onto a inverse gamma distribution.
Parameters
----------
domain : Domain, tuple of Domain or DomainTuple
......@@ -51,30 +54,38 @@ class InverseGammaModel(Operator):
The alpha-parameter of the inverse-gamma distribution.
q : float
The q-parameter of the inverse-gamma distribution.
delta : float
distance between sampling points for linear interpolation.
"""
self._domain = self._target = DomainTuple.make(domain)
self._alpha = alpha
self._q = q
self._alpha, self._q, self._delta = float(alpha), float(q), float(delta)
self._xmin, self._xmax = -8.2, 8.2
# Precompute
xs = np.arange(self._xmin, self._xmax+2*delta, delta)
self._table = np.log(invgamma.ppf(norm.cdf(xs), self._alpha,
scale=self._q))
self._deriv = (self._table[1:]-self._table[:-1]) / delta
def apply(self, x):
self._check_input(x)
lin = isinstance(x, Linearization)
val = x.val.local_data if lin else x.local_data
# MR FIXME?!
points = np.clip(val, None, 8.2)
points = invgamma.ppf(norm.cdf(points), self._alpha, scale=self._q)
points = Field.from_local_data(self._domain, points)
val = (np.clip(val, self._xmin, self._xmax) - self._xmin) / self._delta
# Operator
fi = np.floor(val).astype(int)
w = val - fi
res = np.exp((1 - w)*self._table[fi] + w*self._table[fi + 1])
points = Field.from_local_data(self._domain, res)
if not lin:
return points
inner = norm.pdf(val)
outer_inv = invgamma.pdf(invgamma.ppf(norm.cdf(val),
self._alpha,
scale=self._q),
self._alpha, scale=self._q)
# FIXME
outer_inv = np.clip(outer_inv, 1e-20, None)
outer = 1/outer_inv
jac = makeOp(Field.from_local_data(self._domain, inner*outer))
# Derivative of linear interpolation
der = self._deriv[fi]*res
jac = makeOp(Field.from_local_data(self._domain, der))
jac = jac(x.jac)
return x.new(points, jac)
......
from __future__ import absolute_import, division, print_function
import numpy as np
......@@ -52,7 +53,7 @@ class Linearization(object):
def __getitem__(self, name):
from .operators.simple_linear_operators import FieldAdapter
return self.new(self._val[name], FieldAdapter(self.domain, name))
return self.new(self._val[name], FieldAdapter(self.domain[name], name))
def __neg__(self):
return self.new(-self._val, -self._jac,
......@@ -101,6 +102,12 @@ class Linearization(object):
def __rtruediv__(self, other):
return self.inverse().__mul__(other)
def __pow__(self, power):
if not np.isscalar(power):
return NotImplemented
return self.new(self._val**power,
makeOp(self._val**(power-1)).scale(power)(self._jac))
def inverse(self):
return self.new(1./self._val, makeOp(-1./(self._val**2))(self._jac))