Commit db75cb31 authored by Martin Reinecke's avatar Martin Reinecke
Browse files

Merge branch 'NIFTy_6' into introduce_mpi_namespace

parents 29a722fd f9ca9bad
Pipeline #67634 passed with stages
in 15 minutes and 14 seconds
......@@ -89,6 +89,14 @@ run_getting_started_3:
paths:
- '*.png'
run_getting_started_mf:
stage: demo_runs
script:
- python3 demos/getting_started_mf.py
artifacts:
paths:
- '*.png'
run_bernoulli:
stage: demo_runs
script:
......
......@@ -14,7 +14,6 @@ RUN apt-get update && apt-get install -y \
# more optional NIFTy dependencies
&& pip3 install git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git \
&& pip3 install git+https://gitlab.mpcdf.mpg.de/ift/nifty_gridder.git \
&& pip3 install git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft.git \
&& pip3 install jupyter \
&& rm -rf /var/lib/apt/lists/*
......
......@@ -47,7 +47,6 @@ Installation
- [Python 3](https://www.python.org/) (3.5.x or later)
- [SciPy](https://www.scipy.org/)
- [pypocketfft](https://gitlab.mpcdf.mpg.de/mtr/pypocketfft)
Optional dependencies:
- [pyHealpix](https://gitlab.mpcdf.mpg.de/ift/pyHealpix) (for harmonic
......@@ -56,6 +55,7 @@ Optional dependencies:
interferometry responses)
- [mpi4py](https://mpi4py.scipy.org) (for MPI-parallel execution)
- [matplotlib](https://matplotlib.org/) (for field plotting)
- [pypocketfft](https://gitlab.mpcdf.mpg.de/mtr/pypocketfft) (for faster FFTs)
### Sources
......@@ -73,7 +73,6 @@ NIFTy6 and its mandatory dependencies can be installed via:
sudo apt-get install git python3 python3-pip python3-dev
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty.git@NIFTy_6
pip3 install --user git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft
Plotting support is added via:
......@@ -91,6 +90,14 @@ MPI support is added via:
sudo apt-get install python3-mpi4py
Pypocketfft is added via:
pip3 install --user git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft
If this library is present, NIFTy will detect it automatically and prefer
it over SciPy's FFT. The underlying code is actually the same, but
pypocketfft is compiled with optimizations for the host CPU and can provide
significantly faster transforms.
### Running the tests
To run the tests, additional packages are required:
......
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2019 Max-Planck-Society
# Author: Philipp Arras
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import numpy as np
import nifty6 as ift
import matplotlib.pyplot as plt
def _default_pspace(dom):
return ift.PowerSpace(dom.get_default_codomain())
if __name__ == '__main__':
np.random.seed(42)
fa = ift.CorrelatedFieldMaker.make(10, 0.1, '')
n_samps = 20
slope_means = [-2, -3]
fa.add_fluctuations(ift.RGSpace(128, 0.1), 10, 2, 1, 1e-6,
2, 1e-6, slope_means[0], 0.2, 'spatial')
# fa.add_fluctuations(_default_pspace(ift.RGSpace((128, 64))), 10, 2, 1,
# 1e-6, 2, 1e-6, slope_means[0], 0.2, 'spatial')
fa.add_fluctuations(ift.RGSpace(32), 3, 5, 1, 1e-6, 2,
1e-6, slope_means[1], 1, 'freq')
correlated_field = fa.finalize()
amplitudes = fa.normalized_amplitudes
plt.style.use('seaborn-notebook')
tgt = correlated_field.target
if len(tgt.shape) == 1:
fig, axes = plt.subplots(nrows=1, ncols=2)
fig.set_size_inches(20, 10)
else:
fig, axes = plt.subplots(nrows=3, ncols=3)
fig.set_size_inches(20, 16)
axs = (ax for ax in axes.ravel())
for ii, aa in enumerate(amplitudes):
ax = next(axs)
pspec = aa**2
ax.set_xscale('log')
ax.set_yscale('log')
for _ in range(n_samps):
fld = pspec(ift.from_random('normal', pspec.domain))
klengths = fld.domain[0].k_lengths
ycoord = fld.val_rw()
ycoord[0] = ycoord[1]
ax.plot(klengths, ycoord, alpha=1)
ymin, ymax = ax.get_ylim()
color = plt.rcParams['axes.prop_cycle'].by_key()['color'][0]
lbl = 'Mean slope (k^{})'.format(2*slope_means[ii])
for fac in np.linspace(np.log(ymin), np.log(ymax**2/ymin)):
xs = np.linspace(np.amin(klengths[1:]), np.amax(klengths[1:]))
ys = xs**(2*slope_means[ii])*np.exp(fac)
xs = np.insert(xs, 0, 0)
ys = np.insert(ys, 0, ys[0])
ax.plot(xs, ys, zorder=1, color=color, linewidth=0.3, label=lbl)
lbl = None
ax.set_ylim([ymin, ymax])
ax.set_xlim([None, np.amax(klengths)])
ax.legend()
if len(tgt.shape) == 2:
foo = []
for ax in axs:
pos = ift.from_random('normal', correlated_field.domain)
fld = correlated_field(pos).val
foo.append((ax, fld))
mi, ma = np.inf, -np.inf
for _, fld in foo:
mi = min([mi, np.amin(fld)])
ma = max([ma, np.amax(fld)])
nxdx, nydy = tgt.shape
if len(tgt) == 2:
nxdx *= tgt[0].distances[0]
nydy *= tgt[1].distances[0]
else:
nxdx *= tgt[0].distances[0]
nydy *= tgt[0].distances[1]
for ax, fld in foo:
im = ax.imshow(fld.T,
extent=[0, nxdx, 0, nydy],
aspect='auto',
origin='lower',
vmin=mi,
vmax=ma)
fig.colorbar(im, ax=axes.ravel().tolist())
elif len(tgt.shape) == 1:
ax = next(axs)
flds = []
for _ in range(n_samps):
pos = ift.from_random('normal', correlated_field.domain)
ax.plot(correlated_field(pos).val)
plt.savefig('correlated_fields.png')
plt.close()
import nifty6 as ift
import numpy as np
def testAmplitudesConsistency(seed, sspace):
def stats(op,samples):
sc = ift.StatCalculator()
for s in samples:
sc.add(op(s.extract(op.domain)))
return sc.mean.val, sc.var.sqrt().val
np.random.seed(seed)
offset_std = .1
intergated_fluct_std0 = .003
intergated_fluct_std1 = 0.1
nsam = 1000
fsspace = ift.RGSpace((12,), (0.4,))
fa = ift.CorrelatedFieldMaker.make(offset_std, 1E-8, '')
fa.add_fluctuations(sspace, intergated_fluct_std0, 1E-8, 1.1, 2., 2.1, .5,
-2, 1., 'spatial')
fa.add_fluctuations(fsspace, intergated_fluct_std1, 1E-8, 3.1, 1., .5, .1,
-4, 1., 'freq')
op = fa.finalize()
samples = [ift.from_random('normal',op.domain) for _ in range(nsam)]
tot_flm, _ = stats(fa.total_fluctuation,samples)
offset_std,_ = stats(fa.amplitude_total_offset,samples)
intergated_fluct_std0,_ = stats(fa.average_fluctuation(0),samples)
intergated_fluct_std1,_ = stats(fa.average_fluctuation(1),samples)
slice_fluct_std0,_ = stats(fa.slice_fluctuation(0),samples)
slice_fluct_std1,_ = stats(fa.slice_fluctuation(1),samples)
sams = [op(s) for s in samples]
fluct_total = fa.total_fluctuation_realized(sams)
fluct_space = fa.average_fluctuation_realized(sams,0)
fluct_freq = fa.average_fluctuation_realized(sams,1)
zm_std_mean = fa.offset_amplitude_realized(sams)
sl_fluct_space = fa.slice_fluctuation_realized(sams,0)
sl_fluct_freq = fa.slice_fluctuation_realized(sams,1)
print("Expected offset Std: " + str(offset_std))
print("Estimated offset Std: " + str(zm_std_mean))
print("Expected integrated fluct. space Std: " +
str(intergated_fluct_std0))
print("Estimated integrated fluct. space Std: " + str(fluct_space))
print("Expected integrated fluct. frequency Std: " +
str(intergated_fluct_std1))
print("Estimated integrated fluct. frequency Std: " + str(fluct_freq))
print("Expected slice fluct. space Std: " +
str(slice_fluct_std0))
print("Estimated slice fluct. space Std: " + str(sl_fluct_space))
print("Expected slice fluct. frequency Std: " +
str(slice_fluct_std1))
print("Estimated slice fluct. frequency Std: " + str(sl_fluct_freq))
print("Expected total fluct. Std: " + str(tot_flm))
print("Estimated total fluct. Std: " + str(fluct_total))
np.testing.assert_allclose(offset_std, zm_std_mean, rtol=0.5)
np.testing.assert_allclose(intergated_fluct_std0, fluct_space, rtol=0.5)
np.testing.assert_allclose(intergated_fluct_std1, fluct_freq, rtol=0.5)
np.testing.assert_allclose(tot_flm, fluct_total, rtol=0.5)
np.testing.assert_allclose(slice_fluct_std0, sl_fluct_space, rtol=0.5)
np.testing.assert_allclose(slice_fluct_std1, sl_fluct_freq, rtol=0.5)
fa = ift.CorrelatedFieldMaker.make(offset_std, .1, '')
fa.add_fluctuations(fsspace, intergated_fluct_std1, 1., 3.1, 1., .5, .1,
-4, 1., 'freq')
m = 3.
x = fa.moment_slice_to_average(m)
fa.add_fluctuations(sspace, x, 1.5, 1.1, 2., 2.1, .5,
-2, 1., 'spatial', 0)
op = fa.finalize()
em, estd = stats(fa.slice_fluctuation(0),samples)
print("Forced slice fluct. space Std: "+str(m))
print("Expected slice fluct. Std: " + str(em))
np.testing.assert_allclose(m, em, rtol=0.5)
assert op.target[0] == sspace
assert op.target[1] == fsspace
# Move to tests
# FIXME This test fails but it is not relevant for the final result
# assert_allclose(ampl(from_random('normal', ampl.domain)).val[0], vol) or 1??
# End move to tests
# move to tests
# assert_allclose(
# smooth(from_random('normal', smooth.domain)).val[0:2], 0)
# end move to tests
for seed in [1, 42]:
for sp in [
ift.RGSpace((32, 64), (1.1, 0.3)),
ift.HPSpace(32),
ift.GLSpace(32)
]:
testAmplitudesConsistency(seed, sp)
import nifty6 as ift
import numpy as np
np.random.seed(42)
sspace = ift.RGSpace((128,))
fa = ift.CorrelatedFieldMaker.make(10, 0.1, '')
fa.add_fluctuations(sspace, 10, 2, 1, 1e-6, 2, 1e-6, -2, 1e-6, 'spatial')
op = fa.finalize()
A = fa.amplitude
cstpos = ift.from_random('normal', op.domain)
p1, p2 = [ift.Plot() for _ in range(2)]
lst1 = []
skys1, skys2 = [], []
for _ in range(8):
pos = ift.from_random('normal', op.domain)
foo = ift.MultiField.union([cstpos, pos.extract(A.domain)])
skys2.append(op(foo))
sky = op(pos)
skys1.append(sky)
lst1.append(A.force(pos))
for pp, ll in [(p1, skys1), (p2, skys2)]:
mi, ma = None, None
if False:
mi, ma = np.inf, -np.inf
for ss in ll:
mi = min([mi, np.amin(ss.val)])
ma = max([ma, np.amax(ss.val)])
for ss in ll:
pp.add(ss, zmin=mi, zmax=ma)
p1.add(lst1)
p2.add(lst1)
p1.output(name='full.png')
p2.output(name='xi_fixed.png')
......@@ -9,7 +9,6 @@ NIFTy6 and its mandatory dependencies can be installed via::
sudo apt-get install git python3 python3-pip python3-dev
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty.git@NIFTy_6
pip3 install --user git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft
Plotting support is added via::
......@@ -27,6 +26,14 @@ MPI support is added via::
sudo apt-get install python3-mpi4py
Pypocketfft is added via:
pip3 install --user git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft
If this library is present, NIFTy will detect it automatically and prefer
it over SciPy's FFT. The underlying code is actually the same, but
pypocketfft is compiled with optimizations for the host CPU and can provide
significantly faster transforms.
NIFTy documentation is provided by Sphinx. To build the documentation::
sudo apt-get install python3-sphinx-rtd-theme dvipng
......
......@@ -15,7 +15,6 @@
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import pypocketfft
_nthreads = 1
......@@ -29,14 +28,35 @@ def set_nthreads(nthr):
_nthreads = int(nthr)
def fftn(a, axes=None):
return pypocketfft.c2c(a, axes=axes, nthreads=_nthreads)
try:
import pypocketfft
def ifftn(a, axes=None):
return pypocketfft.c2c(a, axes=axes, inorm=2, forward=False,
nthreads=_nthreads)
def fftn(a, axes=None):
return pypocketfft.c2c(a, axes=axes, nthreads=max(_nthreads, 0))
def hartley(a, axes=None):
return pypocketfft.genuine_hartley(a, axes=axes, nthreads=_nthreads)
def ifftn(a, axes=None):
return pypocketfft.c2c(a, axes=axes, inorm=2, forward=False,
nthreads=max(_nthreads, 0))
def hartley(a, axes=None):
return pypocketfft.genuine_hartley(a, axes=axes,
nthreads=max(_nthreads, 0))
except ImportError:
import scipy.fft
def fftn(a, axes=None):
return scipy.fft.fftn(a, axes=axes, workers=_nthreads)
def ifftn(a, axes=None):
return scipy.fft.ifftn(a, axes=axes, workers=_nthreads)
def hartley(a, axes=None):
tmp = scipy.fft.fftn(a, axes=axes, workers=_nthreads)
return tmp.real+tmp.imag
......@@ -100,11 +100,21 @@ def _log_vol(power_space):
return logk_lengths[1:] - logk_lengths[:-1]
def _structured_spaces(domain):
if isinstance(domain[0], UnstructuredDomain):
return np.arange(1, len(domain))
return np.arange(len(domain))
def _total_fluctuation_realized(samples):
spaces = _structured_spaces(samples[0].domain)
co = ContractionOperator(samples[0].domain, spaces)
size = co.domain.size/co.target.size
res = 0.
for s in samples:
res = res + (s - s.mean())**2
return np.sqrt((res/len(samples)).mean())
res = res + (s - co.adjoint(co(s)/size))**2
res = res.mean(spaces)/len(samples)
return np.sqrt(res if np.isscalar(res) else res.val)
class _LognormalMomentMatching(Operator):
......@@ -206,8 +216,7 @@ class _Normalization(Operator):
mode_multiplicity = pd.adjoint(full(pd.target, 1.)).val_rw()
zero_mode = (slice(None),)*self._domain.axes[space][0] + (0,)
mode_multiplicity[zero_mode] = 0
self._mode_multiplicity = makeField(self._domain,
mode_multiplicity)
self._mode_multiplicity = makeField(self._domain, mode_multiplicity)
self._specsum = _SpecialSum(self._domain, space)
def apply(self, x):
......@@ -299,14 +308,15 @@ class _Amplitude(Operator):
shift = DiagonalOperator(makeField(dom[space], foo), dom, space)
vslope = DiagonalOperator(
makeField(target[space],
_relative_log_k_lengths(target[space])),
makeField(target[space], _relative_log_k_lengths(target[space])),
target, space)
foo, bar = [np.zeros(target[space].shape) for _ in range(2)]
bar[1:] = foo[0] = totvol
vol0, vol1 = [DiagonalOperator(makeField(target[space], aa),
target, space) for aa in (foo, bar)]
vol0, vol1 = [
DiagonalOperator(makeField(target[space], aa), target, space)
for aa in (foo, bar)
]
# Prepare fields for Adder
shift, vol0 = [op(full(op.domain, 1)) for op in (shift, vol0)]
......@@ -413,13 +423,11 @@ class CorrelatedFieldMaker:
self._prefix + prefix + 'flexibility',
N)
asp = _LognormalMomentMatching(asperity_mean, asperity_stddev,
self._prefix + prefix + 'asperity',
N)
self._prefix + prefix + 'asperity', N)
avgsl = _normal(loglogavgslope_mean, loglogavgslope_stddev,
self._prefix + prefix + 'loglogavgslope', N)
amp = _Amplitude(PowerSpace(harmonic_partner),
fluct, flex, asp, avgsl, self._azm,
position_space[-1].total_volume,
amp = _Amplitude(PowerSpace(harmonic_partner), fluct, flex, asp, avgsl,
self._azm, position_space[-1].total_volume,
self._prefix + prefix + 'spectrum', dofdex)
if index is not None:
......@@ -457,7 +465,7 @@ class CorrelatedFieldMaker:
for ii in range(n_amplitudes):
co = ContractionOperator(hspace, spaces[:ii] + spaces[ii + 1:])
pp = self._a[ii].target[amp_space]
pd = PowerDistributor(pp.harmonic_partner, pp, amp_space)
pd = PowerDistributor(co.target, pp, amp_space)
a.append(co.adjoint @ pd @ self._a[ii])
corr = reduce(mul, a)
return ht(azm*corr*ducktape(hspace, None, self._prefix + 'xi'))
......@@ -578,10 +586,12 @@ class CorrelatedFieldMaker:
@staticmethod
def offset_amplitude_realized(samples):
spaces = _structured_spaces(samples[0].domain)
res = 0.
for s in samples:
res = res + s.mean()**2
return np.sqrt(res/len(samples))
res = res + s.mean(spaces)**2
res = res/len(samples)
return np.sqrt(res if np.isscalar(res) else res.val)
@staticmethod
def total_fluctuation_realized(samples):
......@@ -591,36 +601,41 @@ class CorrelatedFieldMaker:
def slice_fluctuation_realized(samples, space):
"""Computes slice fluctuations from collection of field (defined in signal
space) realizations."""
ldom = len(samples[0].domain)
if space >= ldom:
spaces = _structured_spaces(samples[0].domain)
if space >= len(spaces):
raise ValueError("invalid space specified; got {!r}".format(space))
if ldom == 1:
if len(spaces) == 1:
return _total_fluctuation_realized(samples)
space = space + spaces[0]
res1, res2 = 0., 0.
for s in samples:
res1 = res1 + s**2
res2 = res2 + s.mean(space)**2
res1 = res1/len(samples)
res2 = res2/len(samples)
res = res1.mean() - res2.mean()
return np.sqrt(res)
res = res1.mean(spaces) - res2.mean(spaces[:-1])
return np.sqrt(res if np.isscalar(res) else res.val)
@staticmethod
def average_fluctuation_realized(samples, space):
"""Computes average fluctuations from collection of field (defined in signal
space) realizations."""
ldom = len(samples[0].domain)
if space >= ldom:
spaces = _structured_spaces(samples[0].domain)
if space >= len(spaces):
raise ValueError("invalid space specified; got {!r}".format(space))
if ldom == 1:
if len(spaces) == 1:
return _total_fluctuation_realized(samples)
spaces = ()
for i in range(ldom):
if i != space:
spaces += (i,)
space = space + spaces[0]
sub_spaces = set(spaces)
sub_spaces.remove(space)
#Domain containing domain[space] and domain[0] iff total_N>0
sub_dom = makeDomain([samples[0].domain[ind]
for ind in (set([0,])-set(spaces))|set([space,])])
co = ContractionOperator(sub_dom, len(sub_dom)-1)
size = co.domain.size/co.target.size
res = 0.
for s in samples:
r = s.mean(spaces)
res = res + (r - r.mean())**2
res = res/len(samples)
return np.sqrt(res.mean())
r = s.mean(sub_spaces)
res = res + (r - co.adjoint(co(r)/size))**2
res = res.mean(spaces[0])/len(samples)
return np.sqrt(res if np.isscalar(res) else res.val)
......@@ -14,6 +14,7 @@
# Copyright(C) 2013-2019 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import numpy as np
from .. import utilities
from ..linearization import Linearization
......@@ -78,7 +79,7 @@ class MetricGaussianKL(Energy):
def __init__(self, mean, hamiltonian, n_samples, constants=[],
point_estimates=[], mirror_samples=False,
napprox=0, _samples=None):
napprox=0, _samples=None, lh_sampling_dtype=np.float64):
super(MetricGaussianKL, self).__init__(mean)
if not isinstance(hamiltonian, StandardHamiltonian):
......@@ -99,7 +100,8 @@ class MetricGaussianKL(Energy):
mean, point_estimates, True)).metric
if napprox > 1:
met._approximation = makeOp(approximation2endo(met, napprox))
_samples = tuple(met.draw_sample(from_inverse=True)
_samples = tuple(met.draw_sample(from_inverse=True,
dtype=lh_sampling_dtype)
for _ in range(n_samples))
if mirror_samples:
_samples += tuple(-s for s in _samples)
......@@ -120,11 +122,13 @@ class MetricGaussianKL(Energy):
self._grad = g * (1./len(self._samples))
self._metric = None
self._napprox = napprox
self._sampdt = lh_sampling_dtype
def at(self, position):
return MetricGaussianKL(position, self._hamiltonian, 0,
self._constants, self._point_estimates,
napprox=self._napprox, _samples=self._samples)
napprox=self._napprox, _samples=self._samples,
lh_sampling_dtype=self._sampdt)