Commit f2669556 authored by Philipp Haim's avatar Philipp Haim

Merge branch 'normalized_amplitudes_pp' into normalized_amplitudes_DomT

parents 4fe143dd 05dde3a1
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2019 Max-Planck-Society
# Author: Philipp Arras
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import numpy as np
import nifty5 as ift
import matplotlib.pyplot as plt
def _default_pspace(dom):
return ift.PowerSpace(dom.get_default_codomain())
if __name__ == '__main__':
np.random.seed(42)
fa = ift.CorrelatedFieldMaker()
n_samps = 20
slope_means = [-2, -3]
fa.add_fluctuations(ift.RGSpace(128, 0.1), 10, 2, 1, 1e-6,
2, 1e-6, slope_means[0], 0.2, 'spatial')
# fa.add_fluctuations(_default_pspace(ift.RGSpace((128, 64))), 10, 2, 1,
# 1e-6, 2, 1e-6, slope_means[0], 0.2, 'spatial')
fa.add_fluctuations(ift.RGSpace(32), 10, 5, 1, 1e-6, 2,
1e-6, slope_means[1], 1, 'freq')
correlated_field = fa.finalize(10, 0.1, '')
amplitudes = fa.amplitudes
plt.style.use('seaborn-notebook')
tgt = correlated_field.target
if len(tgt.shape) == 1:
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(20, 10)
else:
fig, axes = plt.subplots(nrows=3, ncols=3)
fig.set_size_inches(20, 16)
axs = (ax for ax in axes.ravel())
for ii, aa in enumerate(amplitudes):
ax = next(axs)
pspec = aa**2
ax.set_xscale('log')
ax.set_yscale('log')
for _ in range(n_samps):
fld = pspec(ift.from_random('normal', pspec.domain))
klengths = fld.domain[0].k_lengths
ycoord = fld.to_global_data_rw()
ycoord[0] = ycoord[1]
ax.plot(klengths, ycoord, alpha=1)
ymin, ymax = ax.get_ylim()
color = plt.rcParams['axes.prop_cycle'].by_key()['color'][0]
lbl = 'Mean slope (k^{})'.format(2*slope_means[ii])
for fac in np.linspace(np.log(ymin), np.log(ymax**2/ymin)):
xs = np.linspace(np.amin(klengths[1:]), np.amax(klengths[1:]))
ys = xs**(2*slope_means[ii])*np.exp(fac)
xs = np.insert(xs, 0, 0)
ys = np.insert(ys, 0, ys[0])
ax.plot(xs, ys, zorder=1, color=color, linewidth=0.3, label=lbl)
lbl = None
ax.set_ylim([ymin, ymax])
ax.set_xlim([None, np.amax(klengths)])
ax.legend()
if len(tgt.shape) == 2:
foo = []
for ax in axs:
pos = ift.from_random('normal', correlated_field.domain)
fld = correlated_field(pos).to_global_data()
foo.append((ax, fld))
mi, ma = np.inf, -np.inf
for _, fld in foo:
mi = min([mi, np.amin(fld)])
ma = max([ma, np.amax(fld)])
nxdx, nydy = tgt.shape
if len(tgt) == 2:
nxdx *= tgt[0].distances[0]
nydy *= tgt[1].distances[0]
else:
nxdx *= tgt[0].distances[0]
nydy *= tgt[0].distances[1]
for ax, fld in foo:
im = ax.imshow(fld.T,
extent=[0, nxdx, 0, nydy],
aspect='auto',
origin='lower',
vmin=mi,
vmax=ma)
fig.colorbar(im, ax=axes.ravel().tolist())
elif len(tgt.shape) == 1:
ax = next(axs)
flds = []
for _ in range(n_samps):
pos = ift.from_random('normal', correlated_field.domain)
ax.plot(correlated_field(pos).to_global_data())
plt.savefig('correlated_fields.png')
plt.close()
......@@ -56,10 +56,10 @@ if __name__ == '__main__':
filename = "getting_started_3_mode_{}_".format(mode) + "{}.png"
position_space = ift.RGSpace([128, 128])
power_space = ift.PowerSpace(position_space.get_default_codomain())
cfmaker = ift.CorrelatedFieldMaker()
cfmaker.add_fluctuations(power_space, 1, 1e-2, 1, .5, .1, .5, -3, 0.5, '')
cfmaker.add_fluctuations(position_space,
1, 1e-2, 1, .5, .1, .5, -3, 0.5, '')
correlated_field = cfmaker.finalize(1e-3, 1e-6, '')
A = cfmaker.amplitudes[0]
......@@ -88,8 +88,8 @@ if __name__ == '__main__':
minimizer = ift.NewtonCG(ic_newton)
# Set up likelihood and information Hamiltonian
likelihood = ift.GaussianEnergy(mean=data,
inverse_covariance=N.inverse)(signal_response)
likelihood = (ift.GaussianEnergy(mean=data, inverse_covariance=N.inverse) @
signal_response)
H = ift.StandardHamiltonian(likelihood, ic_sampling)
initial_mean = ift.MultiField.full(H.domain, 0.)
......
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2019 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
############################################################
# Non-linear tomography
#
# The signal is a sigmoid-normal distributed field.
# The data is the field integrated along lines of sight that are
# randomly (set mode=0) or radially (mode=1) distributed
#
# Demo takes a while to compute
#############################################################
import sys
import numpy as np
import nifty5 as ift
class SingleDomain(ift.LinearOperator):
def __init__(self, domain, target):
self._domain = ift.makeDomain(domain)
self._target = ift.makeDomain(target)
self._capability = self.TIMES | self.ADJOINT_TIMES
def apply(self, x, mode):
self._check_input(x, mode)
return ift.from_global_data(self._tgt(mode), x.to_global_data())
def random_los(n_los):
starts = list(np.random.uniform(0, 1, (n_los, 2)).T)
ends = list(np.random.uniform(0, 1, (n_los, 2)).T)
return starts, ends
def radial_los(n_los):
starts = list(np.random.uniform(0, 1, (n_los, 2)).T)
ends = list(0.5 + 0*np.random.uniform(0, 1, (n_los, 2)).T)
return starts, ends
if __name__ == '__main__':
np.random.seed(45)
# Choose between random line-of-sight response (mode=0) and radial lines
# of sight (mode=1)
if len(sys.argv) == 2:
mode = int(sys.argv[1])
else:
mode = 0
filename = "getting_started_mf_mode_{}_".format(mode) + "{}.png"
npix1, npix2 = 128, 128
position_space = ift.RGSpace([npix1, npix2])
sp1 = ift.RGSpace(npix1)
sp2 = ift.RGSpace(npix2)
cfmaker = ift.CorrelatedFieldMaker()
amp1 = 0.5
cfmaker.add_fluctuations(sp1, amp1, 1e-2, 1, .1, .01, .5, -2, 1., 'amp1')
cfmaker.add_fluctuations(sp2, np.sqrt(1. - amp1**2), 1e-2, 1, .1, .01, .5,
-1.5, .5, 'amp2')
correlated_field = cfmaker.finalize(1e-3, 1e-6, '')
A1 = cfmaker.amplitudes[0]
A2 = cfmaker.amplitudes[1]
DC = SingleDomain(correlated_field.target, position_space)
# Apply a nonlinearity
signal = DC @ ift.sigmoid(correlated_field)
# Build the line-of-sight response and define signal response
LOS_starts, LOS_ends = random_los(100) if mode == 0 else radial_los(100)
R = ift.LOSResponse(position_space, starts=LOS_starts, ends=LOS_ends)
signal_response = R(signal)
# Specify noise
data_space = R.target
noise = .001
N = ift.ScalingOperator(noise, data_space)
# Generate mock signal and data
mock_position = ift.from_random('normal', signal_response.domain)
data = signal_response(mock_position) + N.draw_sample()
# Minimization parameters
ic_sampling = ift.AbsDeltaEnergyController(name='Sampling',
deltaE=0.01,
iteration_limit=100)
ic_newton = ift.AbsDeltaEnergyController(name='Newton',
deltaE=0.01,
iteration_limit=35)
minimizer = ift.NewtonCG(ic_newton)
# Set up likelihood and information Hamiltonian
likelihood = ift.GaussianEnergy(
mean=data, inverse_covariance=N.inverse)(signal_response)
H = ift.StandardHamiltonian(likelihood, ic_sampling)
initial_mean = ift.MultiField.full(H.domain, 0.)
mean = initial_mean
plot = ift.Plot()
plot.add(signal(mock_position), title='Ground Truth')
plot.add(R.adjoint_times(data), title='Data')
plot.add([A1.force(mock_position)], title='Power Spectrum 1')
plot.add([A2.force(mock_position)], title='Power Spectrum 2')
plot.output(ny=2, nx=2, xsize=10, ysize=10, name=filename.format("setup"))
# number of samples used to estimate the KL
N_samples = 20
# Draw new samples to approximate the KL five times
for i in range(10):
# Draw new samples and minimize KL
KL = ift.MetricGaussianKL(mean, H, N_samples)
KL, convergence = minimizer(KL)
mean = KL.position
# Plot current reconstruction
plot = ift.Plot()
plot.add(signal(mock_position), title="ground truth")
plot.add(signal(KL.position), title="reconstruction")
plot.add([A1.force(KL.position),
A1.force(mock_position)],
title="power1")
plot.add([A2.force(KL.position),
A2.force(mock_position)],
title="power2")
plot.output(nx=2,
ny=2,
ysize=10,
xsize=10,
name=filename.format("loop_{:02d}".format(i)))
# Draw posterior samples
Nsamples = 20
KL = ift.MetricGaussianKL(mean, H, N_samples)
sc = ift.StatCalculator()
scA1 = ift.StatCalculator()
scA2 = ift.StatCalculator()
powers1 = []
powers2 = []
for sample in KL.samples:
sc.add(signal(sample + KL.position))
p1 = A1.force(sample + KL.position)
p2 = A2.force(sample + KL.position)
scA1.add(p1)
powers1.append(p1)
scA2.add(p2)
powers2.append(p2)
# Plotting
filename_res = filename.format("results")
plot = ift.Plot()
plot.add(sc.mean, title="Posterior Mean")
plot.add(ift.sqrt(sc.var), title="Posterior Standard Deviation")
powers1 = [A1.force(s + KL.position) for s in KL.samples]
powers2 = [A2.force(s + KL.position) for s in KL.samples]
plot.add(powers1 + [scA1.mean, A1.force(mock_position)],
title="Sampled Posterior Power Spectrum 1",
linewidth=[1.]*len(powers1) + [3., 3.])
plot.add(powers2 + [scA2.mean, A2.force(mock_position)],
title="Sampled Posterior Power Spectrum 2",
linewidth=[1.]*len(powers2) + [3., 3.])
plot.output(ny=2, nx=2, xsize=15, ysize=15, name=filename_res)
print("Saved results as '{}'.".format(filename_res))
import nifty5 as ift
import numpy as np
def testAmplitudesConsistency(seed, sspace):
def stats(op,samples):
sc = ift.StatCalculator()
for s in samples:
sc.add(op(s.extract(op.domain)))
return sc.mean.to_global_data(), sc.var.sqrt().to_global_data()
np.random.seed(seed)
offset_std = 30
intergated_fluct_std0 = .003
intergated_fluct_std1 = 0.1
nsam = 1000
fsspace = ift.RGSpace((12,), (0.4,))
fa = ift.CorrelatedFieldMaker()
fa.add_fluctuations(sspace, intergated_fluct_std0, 1E-8, 1.1, 2., 2.1, .5,
-2, 1., 'spatial')
fa.add_fluctuations(fsspace, intergated_fluct_std1, 1E-8, 3.1, 1., .5, .1,
-4, 1., 'freq')
op = fa.finalize(offset_std, 1E-8, '')
samples = [ift.from_random('normal',op.domain) for _ in range(nsam)]
tot_flm, _ = stats(fa.total_fluctuation,samples)
offset_std,_ = stats(fa.amplitude_total_offset,samples)
intergated_fluct_std0,_ = stats(fa.average_fluctuation(0),samples)
intergated_fluct_std1,_ = stats(fa.average_fluctuation(1),samples)
slice_fluct_std0,_ = stats(fa.slice_fluctuation(0),samples)
slice_fluct_std1,_ = stats(fa.slice_fluctuation(1),samples)
sams = [op(s) for s in samples]
fluct_total = fa.total_fluctuation_realized(sams)
fluct_space = fa.average_fluctuation_realized(sams,0)
fluct_freq = fa.average_fluctuation_realized(sams,1)
zm_std_mean = fa.offset_amplitude_realized(sams)
sl_fluct_space = fa.slice_fluctuation_realized(sams,0)
sl_fluct_freq = fa.slice_fluctuation_realized(sams,1)
np.testing.assert_allclose(offset_std, zm_std_mean, rtol=0.5)
np.testing.assert_allclose(intergated_fluct_std0, fluct_space, rtol=0.5)
np.testing.assert_allclose(intergated_fluct_std1, fluct_freq, rtol=0.5)
np.testing.assert_allclose(tot_flm, fluct_total, rtol=0.5)
np.testing.assert_allclose(slice_fluct_std0, sl_fluct_space, rtol=0.5)
np.testing.assert_allclose(slice_fluct_std1, sl_fluct_freq, rtol=0.5)
print("Expected offset Std: " + str(offset_std))
print("Estimated offset Std: " + str(zm_std_mean))
print("Expected integrated fluct. space Std: " +
str(intergated_fluct_std0))
print("Estimated integrated fluct. space Std: " + str(fluct_space))
print("Expected integrated fluct. frequency Std: " +
str(intergated_fluct_std1))
print("Estimated integrated fluct. frequency Std: " + str(fluct_freq))
print("Expected slice fluct. space Std: " +
str(slice_fluct_std0))
print("Estimated slice fluct. space Std: " + str(sl_fluct_space))
print("Expected slice fluct. frequency Std: " +
str(slice_fluct_std1))
print("Estimated slice fluct. frequency Std: " + str(sl_fluct_freq))
print("Expected total fluct. Std: " + str(tot_flm))
print("Estimated total fluct. Std: " + str(fluct_total))
fa = ift.CorrelatedFieldMaker()
fa.add_fluctuations(fsspace, intergated_fluct_std1, 1., 3.1, 1., .5, .1,
-4, 1., 'freq')
m = 3.
x = fa.moment_slice_to_average(m)
fa.add_fluctuations(sspace, x, 1.5, 1.1, 2., 2.1, .5,
-2, 1., 'spatial', 0)
op = fa.finalize(offset_std, .1, '')
em, estd = stats(fa.slice_fluctuation(0),samples)
print("Forced slice fluct. space Std: "+str(m))
print("Expected slice fluct. Std: " + str(em))
np.testing.assert_allclose(m, em, rtol=0.5)
assert op.target[0] == sspace
assert op.target[1] == fsspace
# Move to tests
# FIXME This test fails but it is not relevant for the final result
# assert_allclose(ampl(from_random('normal', ampl.domain)).val[0], vol) or 1??
# End move to tests
# move to tests
# assert_allclose(
# smooth(from_random('normal', smooth.domain)).val[0:2], 0)
# end move to tests
for seed in [1, 42]:
for sp in [
ift.RGSpace((32, 64), (1.1, 0.3)),
ift.HPSpace(32),
ift.GLSpace(32)
]:
testAmplitudesConsistency(seed, sp)
......@@ -3,11 +3,9 @@ import numpy as np
np.random.seed(42)
sspace = ift.RGSpace((128,))
hspace = sspace.get_default_codomain()
target0 = ift.PowerSpace(hspace)
fa = ift.CorrelatedFieldMaker()
fa.add_fluctuations(target0, 10, 2, 1, 1e-6, 2, 1e-6, -2, 1e-6, 'spatial')
fa.add_fluctuations(sspace, 10, 2, 1, 1e-6, 2, 1e-6, -2, 1e-6, 'spatial')
op = fa.finalize(10, 0.1, '')
A = fa.amplitudes[0]
......
......@@ -211,3 +211,18 @@ Consequently, the inverse covariance operator will automatically have lower indi
ensuring that the whole log-likelihood expression does not scale with resolution.
**This upper-lower index convention is not coded into NIFTy**, in order to not reduce user freedom.
One should however have this in mind when constructing log-likelihoods in order to ensure resolution independence.
Harmonic Transform Convention
.............................
In NIFTy the convention for the harmonic transformations is set by requiring the zero mode of the transformed field to be the integral over the original field.
This choice is convenient for the Fourier transformation and used throughout the literature.
Note that for the spherical harmonics this convention is only rarely used and might result in unexpected factors in the transformed field.
To be specific, for the spherical harmonics transformation this means that a monopole of unit amplitude in position-space which is transformed to the spherical harmonics domain and back to the original position space via the adjoint transformation will have a non-unit amplitude afterwards.
The factor between the input field and the twice transformed field is :math:`1/4\pi`.
In comparison to the convention used in HEALPix, this corresponds to dividing the output of the HEALPix transformed field by :math:`\sqrt{4\pi}` in each transformation.
Depending on the use-case, additional volume factors must be accounted for.
This is especially true if one wants to define the inverse transformation.
Note that the convention for the harmonic transformations used in NIFTy results in non-unitary operators.
......@@ -11,7 +11,6 @@ from .domains.gl_space import GLSpace
from .domains.hp_space import HPSpace
from .domains.power_space import PowerSpace
from .domains.dof_space import DOFSpace
from .domains.log_rg_space import LogRGSpace
from .domain_tuple import DomainTuple
from .multi_domain import MultiDomain
......@@ -20,13 +19,13 @@ from .multi_field import MultiField
from .operators.operator import Operator
from .operators.adder import Adder
from .operators.log1p import Log1p
from .operators.diagonal_operator import DiagonalOperator
from .operators.distributors import DOFDistributor, PowerDistributor
from .operators.domain_tuple_field_inserter import DomainTupleFieldInserter
from .operators.contraction_operator import ContractionOperator
from .operators.linear_interpolation import LinearInterpolator
from .operators.endomorphic_operator import EndomorphicOperator
from .operators.exp_transform import ExpTransform
from .operators.harmonic_operators import (
FFTOperator, HartleyOperator, SHTOperator, HarmonicTransformOperator,
HarmonicSmoothingOperator)
......@@ -34,13 +33,10 @@ from .operators.field_zero_padder import FieldZeroPadder
from .operators.inversion_enabler import InversionEnabler
from .operators.linear_operator import LinearOperator
from .operators.mask_operator import MaskOperator
from .operators.qht_operator import QHTOperator
from .operators.regridding_operator import RegriddingOperator
from .operators.sampling_enabler import SamplingEnabler
from .operators.sandwich_operator import SandwichOperator
from .operators.scaling_operator import ScalingOperator
from .operators.slope_operator import SlopeOperator
from .operators.symmetrizing_operator import SymmetrizingOperator
from .operators.block_diagonal_operator import BlockDiagonalOperator
from .operators.outer_product_operator import OuterProduct
from .operators.simple_linear_operators import (
......@@ -51,7 +47,7 @@ from .operators.value_inserter import ValueInserter
from .operators.energy_operators import (
EnergyOperator, GaussianEnergy, PoissonianEnergy, InverseGammaLikelihood,
BernoulliEnergy, StandardHamiltonian, AveragedEnergy, QuadraticFormOperator,
Squared2NormOperator)
Squared2NormOperator, StudentTEnergy)
from .operators.convolution_operators import FuncConvolutionOperator
from .probing import probe_with_posterior_samples, probe_diagonal, \
......
......@@ -32,7 +32,7 @@ __all__ = ["ntask", "rank", "master", "local_shape", "data_object", "full",
"redistribute", "default_distaxis", "is_numpy", "absmax", "norm",
"lock", "locked", "uniform_full", "transpose", "to_global_data_rw",
"ensure_not_distributed", "ensure_default_distributed",
"tanh", "conjugate", "sin", "cos", "tan",
"tanh", "conjugate", "sin", "cos", "tan", "log10",
"sinh", "cosh", "sinc", "absolute", "sign", "clip"]
_comm = MPI.COMM_WORLD
......@@ -297,7 +297,7 @@ def _math_helper(x, function, out):
_current_module = sys.modules[__name__]
for f in ["sqrt", "exp", "log", "tanh", "conjugate", "sin", "cos", "tan",
"sinh", "cosh", "sinc", "absolute", "sign"]:
"sinh", "cosh", "sinc", "absolute", "sign", "log10"]:
def func(f):
def func2(x, out=None):
return _math_helper(x, f, out)
......
......@@ -18,9 +18,11 @@
# Data object module that uses simple numpy ndarrays.
import numpy as np
from numpy import absolute, clip, cos, cosh, empty, empty_like, exp, full, log
from numpy import ndarray as data_object
from numpy import ones, sign, sin, sinc, sinh, sqrt, tan, tanh, vdot, zeros
from numpy import empty, empty_like, ones, zeros, full
from numpy import absolute, sign, clip, vdot
from numpy import sin, cos, sinh, cosh, tan, tanh
from numpy import exp, log, log10, sqrt, sinc
from .random import Random
......@@ -34,7 +36,7 @@ __all__ = ["ntask", "rank", "master", "local_shape", "data_object", "full",
"lock", "locked", "uniform_full", "to_global_data_rw",
"ensure_not_distributed", "ensure_default_distributed",
"clip", "sin", "cos", "tan", "sinh",
"cosh", "absolute", "sign", "sinc"]
"cosh", "absolute", "sign", "sinc", "log10"]
ntask = 1
rank = 0
......
......@@ -19,6 +19,8 @@ from functools import reduce
from . import utilities
from .domains.domain import Domain
import numpy as np
class DomainTuple(object):
"""Ordered sequence of Domain objects.
......@@ -125,6 +127,58 @@ class DomainTuple(object):
"""
return self._size
def scalar_weight(self, spaces=None):
"""Returns the uniform volume element for a sub-domain of `self`.
Parameters
----------
spaces : int, tuple of int or None
Indices of the sub-domains to be considered.
If `None`, the entire domain is used.
Returns
-------
float or None
If the requested sub-domain has a uniform volume element, it is
returned. Otherwise, `None` is returned.
"""
if np.isscalar(spaces):
return self._dom[spaces].scalar_dvol
if spaces is None:
spaces = range(len(self._dom))
res = 1.
for i in spaces:
tmp = self._dom[i].scalar_dvol
if tmp is None:
return None
<