Commit 20577a56 authored by Martin Reinecke's avatar Martin Reinecke
Browse files

Merge branch 'switch_to_ducc' of gitlab.mpcdf.mpg.de:ift/nifty into switch_to_ducc

parents 98b214c2 fce04fb8
Pipeline #77095 passed with stages
in 25 minutes and 38 seconds
......@@ -9,6 +9,21 @@ now uses the DUCC package (<https://gitlab.mpcdf.mpg.de/mtr/ducc)>,
which is their successor.
Naming of operator tests
------------------------
The implementation tests for nonlinear operators are now available in
`ift.extra.check_operator()` and for linear operators
`ift.extra.check_linear_operator()`.
MetricGaussianKL interface
--------------------------
Users do not instanciate `MetricGaussianKL` by its constructor anymore. Rather
`MetricGaussianKL.make()` shall be used.
Changes since NIFTy 5
=====================
......@@ -70,6 +85,19 @@ print(met)
print(met.draw_sample())
```
New approach for sampling complex numbers
=========================================
When calling draw_sample_with_dtype with a complex dtype,
the variance is now used for the imaginary part and real part separately.
This is done in order to be consistent with the Hamiltonian.
Note that by this,
```
np.std(ift.from_random(domain, 'normal', dtype=np.complex128).val)
````
does not give 1, but sqrt(2) as a result.
MPI parallelisation over samples in MetricGaussianKL
----------------------------------------------------
......@@ -85,6 +113,7 @@ the generation of reproducible random numbers in the presence of MPI parallelism
and leads to cleaner code overall. Please see the documentation of
`nifty7.random` for details.
Interface Change for from_random and OuterProduct
-------------------------------------------------
......
......@@ -131,7 +131,7 @@ def main():
# Draw new samples to approximate the KL five times
for i in range(5):
# Draw new samples and minimize KL
KL = ift.MetricGaussianKL(mean, H, N_samples)
KL = ift.MetricGaussianKL.make(mean, H, N_samples)
KL, convergence = minimizer(KL)
mean = KL.position
......@@ -144,7 +144,7 @@ def main():
name=filename.format("loop_{:02d}".format(i)))
# Draw posterior samples
KL = ift.MetricGaussianKL(mean, H, N_samples)
KL = ift.MetricGaussianKL.make(mean, H, N_samples)
sc = ift.StatCalculator()
for sample in KL.samples:
sc.add(signal(sample + KL.position))
......
%% Cell type:markdown id: tags:
# Notebook showcasing the NIFTy 6 Correlated Field model
**Skip to `Parameter Showcases` for the meat/veggies ;)**
The field model roughly works like this:
`f = HT( A * zero_mode * xi ) + offset`
`A` is a spectral power field which is constructed from power spectra that hold on subdomains of the target domain.
It is scaled by a zero mode operator and then pointwise multiplied by a gaussian excitation field, yielding
a representation of the field in harmonic space.
It is then transformed into the target real space and a offset added.
The power spectra `A` is constructed of are in turn constructed as the sum of a power law component
and an integrated Wiener process whose amplitude and roughness can be set.
## Setup code
%% Cell type:code id: tags:
``` python
import nifty7 as ift
import matplotlib.pyplot as plt
import numpy as np
ift.random.push_sseq_from_seed(43)
n_pix = 256
x_space = ift.RGSpace(n_pix)
```
%% Cell type:code id: tags:
``` python
# Plotting routine
def plot(fields, spectra, title=None):
# Plotting preparation is normally handled by nifty7.Plot
# It is done manually here to be able to tweak details
# Fields are assumed to have identical domains
fig = plt.figure(tight_layout=True, figsize=(12, 3.5))
if title is not None:
fig.suptitle(title, fontsize=14)
# Field
ax1 = fig.add_subplot(1, 2, 1)
ax1.axhline(y=0., color='k', linestyle='--', alpha=0.25)
for field in fields:
dom = field.domain[0]
xcoord = np.arange(dom.shape[0]) * dom.distances[0]
ax1.plot(xcoord, field.val)
ax1.set_xlim(xcoord[0], xcoord[-1])
ax1.set_ylim(-5., 5.)
ax1.set_xlabel('x')
ax1.set_ylabel('f(x)')
ax1.set_title('Field realizations')
# Spectrum
ax2 = fig.add_subplot(1, 2, 2)
for spectrum in spectra:
xcoord = spectrum.domain[0].k_lengths
ycoord = spectrum.val_rw()
ycoord[0] = ycoord[1]
ax2.plot(xcoord, ycoord)
ax2.set_ylim(1e-6, 10.)
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.set_xlabel('k')
ax2.set_ylabel('p(k)')
ax2.set_title('Power Spectrum')
fig.align_labels()
plt.show()
# Helper: draw main sample
main_sample = None
def init_model(m_pars, fl_pars):
global main_sample
cf = ift.CorrelatedFieldMaker.make(**m_pars)
cf.add_fluctuations(**fl_pars)
field = cf.finalize(prior_info=0)
main_sample = ift.from_random(field.domain)
print("model domain keys:", field.domain.keys())
# Helper: field and spectrum from parameter dictionaries + plotting
def eval_model(m_pars, fl_pars, title=None, samples=None):
cf = ift.CorrelatedFieldMaker.make(**m_pars)
cf.add_fluctuations(**fl_pars)
field = cf.finalize(prior_info=0)
spectrum = cf.amplitude
if samples is None:
samples = [main_sample]
field_realizations = [field(s) for s in samples]
spectrum_realizations = [spectrum.force(s) for s in samples]
plot(field_realizations, spectrum_realizations, title)
def gen_samples(key_to_vary):
if key_to_vary is None:
return [main_sample]
dct = main_sample.to_dict()
subdom_to_vary = dct.pop(key_to_vary).domain
samples = []
for i in range(8):
d = dct.copy()
d[key_to_vary] = ift.from_random(subdom_to_vary)
samples.append(ift.MultiField.from_dict(d))
return samples
def vary_parameter(parameter_key, values, samples_vary_in=None):
s = gen_samples(samples_vary_in)
for v in values:
if parameter_key in cf_make_pars.keys():
m_pars = {**cf_make_pars, parameter_key: v}
eval_model(m_pars, cf_x_fluct_pars, f"{parameter_key} = {v}", s)
else:
fl_pars = {**cf_x_fluct_pars, parameter_key: v}
eval_model(cf_make_pars, fl_pars, f"{parameter_key} = {v}", s)
```
%% Cell type:markdown id: tags:
## Before the Action: The Moment-Matched Log-Normal Distribution
Many properties of the correlated field are modelled as being lognormally distributed.
The distribution models are parametrized via their means `_mean` and standard-deviations `_stddev`.
To get a feeling of how the ratio of the `mean` and `stddev` parameters influences the distribution shape,
here are a few example histograms: (observe the x-axis!)
%% Cell type:code id: tags:
``` python
fig = plt.figure(figsize=(13, 3.5))
mean = 1.0
sigmas = [1.0, 0.5, 0.1]
for i in range(3):
op = ift.library.correlated_fields._LognormalMomentMatching(mean=mean,
sig=sigmas[i],
key='foo',
N_copies=0)
op = ift.LognormalTransform(mean=mean, sigma=sigmas[i],
key='foo', N_copies=0)
op_samples = np.array(
[op(s).val for s in [ift.from_random(op.domain) for i in range(10000)]])
ax = fig.add_subplot(1, 3, i + 1)
ax.hist(op_samples, bins=50)
ax.set_title(f"mean = {mean}, sigma = {sigmas[i]}")
ax.set_xlabel('x')
del op_samples
plt.show()
```
%% Cell type:markdown id: tags:
## The Neutral Field
To demonstrate the effect of all parameters, first a 'neutral' set of parameters
is defined which then are varied one by one, showing the effect of the variation
on the generated field realizations and the underlying power spectrum from which
they were drawn.
As a neutral field, a model with a white power spectrum and vanishing spectral power was chosen.
%% Cell type:code id: tags:
``` python
# Neutral model parameters yielding a quasi-constant field
cf_make_pars = {
'offset_mean': 0.,
'offset_std_mean': 1e-3,
'offset_std_std': 1e-16,
'prefix': ''
}
cf_x_fluct_pars = {
'target_subdomain': x_space,
'fluctuations_mean': 1e-3,
'fluctuations_stddev': 1e-16,
'flexibility_mean': 1e-3,
'flexibility_stddev': 1e-16,
'asperity_mean': 1e-3,
'asperity_stddev': 1e-16,
'loglogavgslope_mean': 0.,
'loglogavgslope_stddev': 1e-16
}
init_model(cf_make_pars, cf_x_fluct_pars)
```
%% Cell type:code id: tags:
``` python
# Show neutral field
eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field")
```
%% Cell type:markdown id: tags:
# Parameter Showcases
## The `fluctuations_` parameters of `add_fluctuations()`
determine the **amplitude of variations along the field dimension**
for which `add_fluctuations` is called.
`fluctuations_mean` set the _average_ amplitude of the fields fluctuations along the given dimension,\
`fluctuations_stddev` sets the width and shape of the amplitude distribution.
The amplitude is modelled as being log-normally distributed,
see `The Moment-Matched Log-Normal Distribution` above for details.
#### `fluctuations_mean`:
%% Cell type:code id: tags:
``` python
vary_parameter('fluctuations_mean', [0.05, 0.5, 2.], samples_vary_in='xi')
```
%% Cell type:markdown id: tags:
#### `fluctuations_stddev`:
%% Cell type:code id: tags:
``` python
cf_x_fluct_pars['fluctuations_mean'] = 1.0
vary_parameter('fluctuations_stddev', [0.01, 0.1, 1.0], samples_vary_in='fluctuations')
```
%% Cell type:markdown id: tags:
## The `loglogavgslope_` parameters of `add_fluctuations()`
determine **the slope of the loglog-linear (power law) component of the power spectrum**.
The slope is modelled to be normally distributed.
#### `loglogavgslope_mean`:
%% Cell type:code id: tags:
``` python
vary_parameter('loglogavgslope_mean', [-3., -1., 1.], samples_vary_in='xi')
```
%% Cell type:markdown id: tags:
#### `loglogavgslope_stddev`:
%% Cell type:code id: tags:
``` python
cf_x_fluct_pars['loglogavgslope_mean'] = -1.0
vary_parameter('loglogavgslope_stddev', [0.01, 0.1, 1.0], samples_vary_in='loglogavgslope')
```
%% Cell type:markdown id: tags:
## The `flexibility_` parameters of `add_fluctuations()`
determine **the amplitude of the integrated Wiener process component of the power spectrum**
(how strong the power spectrum varies besides the power-law).
`flexibility_mean` sets the _average_ amplitude of the i.g.p. component,
`flexibility_stddev` sets how much the amplitude can vary.\
These two parameters feed into a moment-matched log-normal distribution model,
see above for a demo of its behavior.
#### `flexibility_mean`:
%% Cell type:code id: tags:
``` python
vary_parameter('flexibility_mean', [0.1, 1.0, 3.0], samples_vary_in='spectrum')
```
%% Cell type:markdown id: tags:
#### `flexibility_stddev`:
%% Cell type:code id: tags:
``` python
cf_x_fluct_pars['flexibility_mean'] = 2.0
vary_parameter('flexibility_stddev', [0.01, 0.1, 1.0], samples_vary_in='flexibility')
```
%% Cell type:markdown id: tags:
## The `asperity_` parameters of `add_fluctuations()`
`asperity_` determines **how rough the integrated Wiener process component of the power spectrum is**.
`asperity_mean` sets the average roughness, `asperity_stddev` sets how much the roughness can vary.\
These two parameters feed into a moment-matched log-normal distribution model,
see above for a demo of its behavior.
#### `asperity_mean`:
%% Cell type:code id: tags:
``` python
vary_parameter('asperity_mean', [0.001, 1.0, 5.0], samples_vary_in='spectrum')
```
%% Cell type:markdown id: tags:
#### `asperity_stddev`:
%% Cell type:code id: tags:
``` python
cf_x_fluct_pars['asperity_mean'] = 1.0
vary_parameter('asperity_stddev', [0.01, 0.1, 1.0], samples_vary_in='asperity')
```
%% Cell type:markdown id: tags:
## The `offset_mean` parameter of `CorrelatedFieldMaker.make()`
The `offset_mean` parameter defines a global additive offset on the field realizations.
If the field is used for a lognormal model `f = field.exp()`, this acts as a global signal magnitude offset.
%% Cell type:code id: tags:
``` python
# Reset model to neutral
cf_x_fluct_pars['fluctuations_mean'] = 1e-3
cf_x_fluct_pars['flexibility_mean'] = 1e-3
cf_x_fluct_pars['asperity_mean'] = 1e-3
cf_x_fluct_pars['loglogavgslope_mean'] = 1e-3
```
%% Cell type:code id: tags:
``` python
vary_parameter('offset_mean', [3., 0., -2.])
```
%% Cell type:markdown id: tags:
## The `offset_std_` parameters of `CorrelatedFieldMaker.make()`
Variation of the global offset of the field are modelled as being log-normally distributed.
See `The Moment-Matched Log-Normal Distribution` above for details.
The `offset_std_mean` parameter sets how much NIFTy will vary the offset *on average*.\
The `offset_std_std` parameters defines the with and shape of the offset variation distribution.
#### `offset_std_mean`:
%% Cell type:code id: tags:
``` python
vary_parameter('offset_std_mean', [1e-16, 0.5, 2.], samples_vary_in='xi')
```
%% Cell type:markdown id: tags:
#### `offset_std_std`:
%% Cell type:code id: tags:
``` python
cf_make_pars['offset_std_mean'] = 1.0
vary_parameter('offset_std_std', [0.01, 0.1, 1.0], samples_vary_in='zeromode')
```
......
......@@ -131,7 +131,7 @@ def main():
for i in range(10):
# Draw new samples and minimize KL
KL = ift.MetricGaussianKL(mean, H, N_samples)
KL = ift.MetricGaussianKL.make(mean, H, N_samples)
KL, convergence = minimizer(KL)
mean = KL.position
......@@ -157,7 +157,7 @@ def main():
name=filename.format("loop_{:02d}".format(i)))
# Done, draw posterior samples
KL = ift.MetricGaussianKL(mean, H, N_samples)
KL = ift.MetricGaussianKL.make(mean, H, N_samples)
sc = ift.StatCalculator()
scA1 = ift.StatCalculator()
scA2 = ift.StatCalculator()
......
......@@ -34,6 +34,7 @@ from matplotlib.colors import LogNorm
import nifty7 as ift
def main():
dom = ift.UnstructuredDomain(1)
scale = 10
......@@ -90,7 +91,7 @@ def main():
plt.figure(figsize=[12, 8])
for ii in range(15):
if ii % 3 == 0:
mgkl = ift.MetricGaussianKL(pos, ham, 40)
mgkl = ift.MetricGaussianKL.make(pos, ham, 40)
plt.cla()
plt.imshow(z.T, origin='lower', norm=LogNorm(), vmin=1e-3,
......
......@@ -97,7 +97,7 @@ def main():
p_space = ift.UnstructuredDomain(N_params)
params = ift.full(p_space, 0.)
R = PolynomialResponse(p_space, x)
ift.extra.consistency_check(R)
ift.extra.check_linear_operator(R)
d_space = R.target
d = ift.makeField(d_space, y)
......
......@@ -52,6 +52,7 @@ from .operators.energy_operators import (
BernoulliEnergy, StandardHamiltonian, AveragedEnergy, QuadraticFormOperator,
Squared2NormOperator, StudentTEnergy, VariableCovarianceGaussianEnergy)
from .operators.convolution_operators import FuncConvolutionOperator
from .operators.normal_operators import NormalTransform, LognormalTransform
from .probing import probe_with_posterior_samples, probe_diagonal, \
StatCalculator, approximation2endo
......
......@@ -15,21 +15,115 @@
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
from itertools import combinations
import numpy as np
from numpy.testing import assert_
from . import random
from .domain_tuple import DomainTuple
from .field import Field
from .linearization import Linearization
from .multi_domain import MultiDomain
from .multi_field import MultiField
from .operators.energy_operators import EnergyOperator
from .operators.linear_operator import LinearOperator
from .operators.operator import Operator
from .sugar import from_random
__all__ = ["consistency_check", "check_jacobian_consistency",
__all__ = ["check_linear_operator", "check_operator",
"assert_allclose"]
def check_linear_operator(op, domain_dtype=np.float64, target_dtype=np.float64,
atol=1e-12, rtol=1e-12, only_r_linear=False):
"""
Checks an operator for algebraic consistency of its capabilities.
Checks whether times(), adjoint_times(), inverse_times() and
adjoint_inverse_times() (if in capability list) is implemented
consistently. Additionally, it checks whether the operator is linear.
Parameters
----------
op : LinearOperator
Operator which shall be checked.
domain_dtype : dtype
The data type of the random vectors in the operator's domain. Default
is `np.float64`.
target_dtype : dtype
The data type of the random vectors in the operator's target. Default
is `np.float64`.
atol : float
Absolute tolerance for the check. If rtol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
rtol : float
Relative tolerance for the check. If atol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
only_r_linear: bool
set to True if the operator is only R-linear, not C-linear.
This will relax the adjointness test accordingly.
"""
if not isinstance(op, LinearOperator):
raise TypeError('This test tests only linear operators.')
_domain_check_linear(op, domain_dtype)
_domain_check_linear(op.adjoint, target_dtype)
_domain_check_linear(op.inverse, target_dtype)
_domain_check_linear(op.adjoint.inverse, domain_dtype)
_check_linearity(op, domain_dtype, atol, rtol)
_check_linearity(op.adjoint, target_dtype, atol, rtol)
_check_linearity(op.inverse, target_dtype, atol, rtol)
_check_linearity(op.adjoint.inverse, domain_dtype, atol, rtol)
_full_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.inverse, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint.inverse, domain_dtype, target_dtype, atol,
rtol, only_r_linear)
def check_operator(op, loc, tol=1e-12, ntries=100, perf_check=True,
only_r_differentiable=True, metric_sampling=True):
"""
Performs various checks of the implementation of linear and nonlinear
operators.
Computes the Jacobian with finite differences and compares it to the
implemented Jacobian.
Parameters
----------
op : Operator
Operator which shall be checked.
loc : Field or MultiField
An Field or MultiField instance which has the same domain
as op. The location at which the gradient is checked
tol : float
Tolerance for the check.
perf_check : Boolean
Do performance check. May be disabled for very unimportant operators.
only_r_differentiable : Boolean
Jacobians of C-differentiable operators need to be C-linear.
Default: True
metric_sampling: Boolean
If op is an EnergyOperator, metric_sampling determines whether the
test shall try to sample from the metric or not.
"""
if not isinstance(op, Operator):
raise TypeError('This test tests only linear operators.')
_domain_check_nonlinear(op, loc)
_performance_check(op, loc, bool(perf_check))
_linearization_value_consistency(op, loc)
_jac_vs_finite_differences(op, loc, np.sqrt(tol), ntries,
only_r_differentiable)
_check_nontrivial_constant(op, loc, tol, ntries, only_r_differentiable,
metric_sampling)
def assert_allclose(f1, f2, atol, rtol):
if isinstance(f1, Field):
return np.testing.assert_allclose(f1.val, f2.val, atol=atol, rtol=rtol)
......@@ -37,6 +131,27 @@ def assert_allclose(f1, f2, atol, rtol):
assert_allclose(val, f2[key], atol=atol, rtol=rtol)
def assert_equal(f1, f2):
if isinstance(f1, Field):
return np.testing.assert_equal(f1.val, f2.val)
for key,