Commit 20577a56 authored by Martin Reinecke's avatar Martin Reinecke
Browse files

Merge branch 'switch_to_ducc' of gitlab.mpcdf.mpg.de:ift/nifty into switch_to_ducc

parents 98b214c2 fce04fb8
Pipeline #77095 passed with stages
in 25 minutes and 38 seconds
...@@ -9,6 +9,21 @@ now uses the DUCC package (<https://gitlab.mpcdf.mpg.de/mtr/ducc)>, ...@@ -9,6 +9,21 @@ now uses the DUCC package (<https://gitlab.mpcdf.mpg.de/mtr/ducc)>,
which is their successor. which is their successor.
Naming of operator tests
------------------------
The implementation tests for nonlinear operators are now available in
`ift.extra.check_operator()` and for linear operators
`ift.extra.check_linear_operator()`.
MetricGaussianKL interface
--------------------------
Users do not instanciate `MetricGaussianKL` by its constructor anymore. Rather
`MetricGaussianKL.make()` shall be used.
Changes since NIFTy 5 Changes since NIFTy 5
===================== =====================
...@@ -70,6 +85,19 @@ print(met) ...@@ -70,6 +85,19 @@ print(met)
print(met.draw_sample()) print(met.draw_sample())
``` ```
New approach for sampling complex numbers
=========================================
When calling draw_sample_with_dtype with a complex dtype,
the variance is now used for the imaginary part and real part separately.
This is done in order to be consistent with the Hamiltonian.
Note that by this,
```
np.std(ift.from_random(domain, 'normal', dtype=np.complex128).val)
````
does not give 1, but sqrt(2) as a result.
MPI parallelisation over samples in MetricGaussianKL MPI parallelisation over samples in MetricGaussianKL
---------------------------------------------------- ----------------------------------------------------
...@@ -85,6 +113,7 @@ the generation of reproducible random numbers in the presence of MPI parallelism ...@@ -85,6 +113,7 @@ the generation of reproducible random numbers in the presence of MPI parallelism
and leads to cleaner code overall. Please see the documentation of and leads to cleaner code overall. Please see the documentation of
`nifty7.random` for details. `nifty7.random` for details.
Interface Change for from_random and OuterProduct Interface Change for from_random and OuterProduct
------------------------------------------------- -------------------------------------------------
......
...@@ -131,7 +131,7 @@ def main(): ...@@ -131,7 +131,7 @@ def main():
# Draw new samples to approximate the KL five times # Draw new samples to approximate the KL five times
for i in range(5): for i in range(5):
# Draw new samples and minimize KL # Draw new samples and minimize KL
KL = ift.MetricGaussianKL(mean, H, N_samples) KL = ift.MetricGaussianKL.make(mean, H, N_samples)
KL, convergence = minimizer(KL) KL, convergence = minimizer(KL)
mean = KL.position mean = KL.position
...@@ -144,7 +144,7 @@ def main(): ...@@ -144,7 +144,7 @@ def main():
name=filename.format("loop_{:02d}".format(i))) name=filename.format("loop_{:02d}".format(i)))
# Draw posterior samples # Draw posterior samples
KL = ift.MetricGaussianKL(mean, H, N_samples) KL = ift.MetricGaussianKL.make(mean, H, N_samples)
sc = ift.StatCalculator() sc = ift.StatCalculator()
for sample in KL.samples: for sample in KL.samples:
sc.add(signal(sample + KL.position)) sc.add(signal(sample + KL.position))
......
...@@ -152,10 +152,8 @@ ...@@ -152,10 +152,8 @@
"sigmas = [1.0, 0.5, 0.1]\n", "sigmas = [1.0, 0.5, 0.1]\n",
"\n", "\n",
"for i in range(3):\n", "for i in range(3):\n",
" op = ift.library.correlated_fields._LognormalMomentMatching(mean=mean,\n", " op = ift.LognormalTransform(mean=mean, sigma=sigmas[i],\n",
" sig=sigmas[i],\n", " key='foo', N_copies=0)\n",
" key='foo',\n",
" N_copies=0)\n",
" op_samples = np.array(\n", " op_samples = np.array(\n",
" [op(s).val for s in [ift.from_random(op.domain) for i in range(10000)]])\n", " [op(s).val for s in [ift.from_random(op.domain) for i in range(10000)]])\n",
"\n", "\n",
......
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Notebook showcasing the NIFTy 6 Correlated Field model # Notebook showcasing the NIFTy 6 Correlated Field model
**Skip to `Parameter Showcases` for the meat/veggies ;)** **Skip to `Parameter Showcases` for the meat/veggies ;)**
The field model roughly works like this: The field model roughly works like this:
`f = HT( A * zero_mode * xi ) + offset` `f = HT( A * zero_mode * xi ) + offset`
`A` is a spectral power field which is constructed from power spectra that hold on subdomains of the target domain. `A` is a spectral power field which is constructed from power spectra that hold on subdomains of the target domain.
It is scaled by a zero mode operator and then pointwise multiplied by a gaussian excitation field, yielding It is scaled by a zero mode operator and then pointwise multiplied by a gaussian excitation field, yielding
a representation of the field in harmonic space. a representation of the field in harmonic space.
It is then transformed into the target real space and a offset added. It is then transformed into the target real space and a offset added.
The power spectra `A` is constructed of are in turn constructed as the sum of a power law component The power spectra `A` is constructed of are in turn constructed as the sum of a power law component
and an integrated Wiener process whose amplitude and roughness can be set. and an integrated Wiener process whose amplitude and roughness can be set.
## Setup code ## Setup code
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
import nifty7 as ift import nifty7 as ift
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
import numpy as np import numpy as np
ift.random.push_sseq_from_seed(43) ift.random.push_sseq_from_seed(43)
n_pix = 256 n_pix = 256
x_space = ift.RGSpace(n_pix) x_space = ift.RGSpace(n_pix)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Plotting routine # Plotting routine
def plot(fields, spectra, title=None): def plot(fields, spectra, title=None):
# Plotting preparation is normally handled by nifty7.Plot # Plotting preparation is normally handled by nifty7.Plot
# It is done manually here to be able to tweak details # It is done manually here to be able to tweak details
# Fields are assumed to have identical domains # Fields are assumed to have identical domains
fig = plt.figure(tight_layout=True, figsize=(12, 3.5)) fig = plt.figure(tight_layout=True, figsize=(12, 3.5))
if title is not None: if title is not None:
fig.suptitle(title, fontsize=14) fig.suptitle(title, fontsize=14)
# Field # Field
ax1 = fig.add_subplot(1, 2, 1) ax1 = fig.add_subplot(1, 2, 1)
ax1.axhline(y=0., color='k', linestyle='--', alpha=0.25) ax1.axhline(y=0., color='k', linestyle='--', alpha=0.25)
for field in fields: for field in fields:
dom = field.domain[0] dom = field.domain[0]
xcoord = np.arange(dom.shape[0]) * dom.distances[0] xcoord = np.arange(dom.shape[0]) * dom.distances[0]
ax1.plot(xcoord, field.val) ax1.plot(xcoord, field.val)
ax1.set_xlim(xcoord[0], xcoord[-1]) ax1.set_xlim(xcoord[0], xcoord[-1])
ax1.set_ylim(-5., 5.) ax1.set_ylim(-5., 5.)
ax1.set_xlabel('x') ax1.set_xlabel('x')
ax1.set_ylabel('f(x)') ax1.set_ylabel('f(x)')
ax1.set_title('Field realizations') ax1.set_title('Field realizations')
# Spectrum # Spectrum
ax2 = fig.add_subplot(1, 2, 2) ax2 = fig.add_subplot(1, 2, 2)
for spectrum in spectra: for spectrum in spectra:
xcoord = spectrum.domain[0].k_lengths xcoord = spectrum.domain[0].k_lengths
ycoord = spectrum.val_rw() ycoord = spectrum.val_rw()
ycoord[0] = ycoord[1] ycoord[0] = ycoord[1]
ax2.plot(xcoord, ycoord) ax2.plot(xcoord, ycoord)
ax2.set_ylim(1e-6, 10.) ax2.set_ylim(1e-6, 10.)
ax2.set_xscale('log') ax2.set_xscale('log')
ax2.set_yscale('log') ax2.set_yscale('log')
ax2.set_xlabel('k') ax2.set_xlabel('k')
ax2.set_ylabel('p(k)') ax2.set_ylabel('p(k)')
ax2.set_title('Power Spectrum') ax2.set_title('Power Spectrum')
fig.align_labels() fig.align_labels()
plt.show() plt.show()
# Helper: draw main sample # Helper: draw main sample
main_sample = None main_sample = None
def init_model(m_pars, fl_pars): def init_model(m_pars, fl_pars):
global main_sample global main_sample
cf = ift.CorrelatedFieldMaker.make(**m_pars) cf = ift.CorrelatedFieldMaker.make(**m_pars)
cf.add_fluctuations(**fl_pars) cf.add_fluctuations(**fl_pars)
field = cf.finalize(prior_info=0) field = cf.finalize(prior_info=0)
main_sample = ift.from_random(field.domain) main_sample = ift.from_random(field.domain)
print("model domain keys:", field.domain.keys()) print("model domain keys:", field.domain.keys())
# Helper: field and spectrum from parameter dictionaries + plotting # Helper: field and spectrum from parameter dictionaries + plotting
def eval_model(m_pars, fl_pars, title=None, samples=None): def eval_model(m_pars, fl_pars, title=None, samples=None):
cf = ift.CorrelatedFieldMaker.make(**m_pars) cf = ift.CorrelatedFieldMaker.make(**m_pars)
cf.add_fluctuations(**fl_pars) cf.add_fluctuations(**fl_pars)
field = cf.finalize(prior_info=0) field = cf.finalize(prior_info=0)
spectrum = cf.amplitude spectrum = cf.amplitude
if samples is None: if samples is None:
samples = [main_sample] samples = [main_sample]
field_realizations = [field(s) for s in samples] field_realizations = [field(s) for s in samples]
spectrum_realizations = [spectrum.force(s) for s in samples] spectrum_realizations = [spectrum.force(s) for s in samples]
plot(field_realizations, spectrum_realizations, title) plot(field_realizations, spectrum_realizations, title)
def gen_samples(key_to_vary): def gen_samples(key_to_vary):
if key_to_vary is None: if key_to_vary is None:
return [main_sample] return [main_sample]
dct = main_sample.to_dict() dct = main_sample.to_dict()
subdom_to_vary = dct.pop(key_to_vary).domain subdom_to_vary = dct.pop(key_to_vary).domain
samples = [] samples = []
for i in range(8): for i in range(8):
d = dct.copy() d = dct.copy()
d[key_to_vary] = ift.from_random(subdom_to_vary) d[key_to_vary] = ift.from_random(subdom_to_vary)
samples.append(ift.MultiField.from_dict(d)) samples.append(ift.MultiField.from_dict(d))
return samples return samples
def vary_parameter(parameter_key, values, samples_vary_in=None): def vary_parameter(parameter_key, values, samples_vary_in=None):
s = gen_samples(samples_vary_in) s = gen_samples(samples_vary_in)
for v in values: for v in values:
if parameter_key in cf_make_pars.keys(): if parameter_key in cf_make_pars.keys():
m_pars = {**cf_make_pars, parameter_key: v} m_pars = {**cf_make_pars, parameter_key: v}
eval_model(m_pars, cf_x_fluct_pars, f"{parameter_key} = {v}", s) eval_model(m_pars, cf_x_fluct_pars, f"{parameter_key} = {v}", s)
else: else:
fl_pars = {**cf_x_fluct_pars, parameter_key: v} fl_pars = {**cf_x_fluct_pars, parameter_key: v}
eval_model(cf_make_pars, fl_pars, f"{parameter_key} = {v}", s) eval_model(cf_make_pars, fl_pars, f"{parameter_key} = {v}", s)
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## Before the Action: The Moment-Matched Log-Normal Distribution ## Before the Action: The Moment-Matched Log-Normal Distribution
Many properties of the correlated field are modelled as being lognormally distributed. Many properties of the correlated field are modelled as being lognormally distributed.
The distribution models are parametrized via their means `_mean` and standard-deviations `_stddev`. The distribution models are parametrized via their means `_mean` and standard-deviations `_stddev`.
To get a feeling of how the ratio of the `mean` and `stddev` parameters influences the distribution shape, To get a feeling of how the ratio of the `mean` and `stddev` parameters influences the distribution shape,
here are a few example histograms: (observe the x-axis!) here are a few example histograms: (observe the x-axis!)
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
fig = plt.figure(figsize=(13, 3.5)) fig = plt.figure(figsize=(13, 3.5))
mean = 1.0 mean = 1.0
sigmas = [1.0, 0.5, 0.1] sigmas = [1.0, 0.5, 0.1]
for i in range(3): for i in range(3):
op = ift.library.correlated_fields._LognormalMomentMatching(mean=mean, op = ift.LognormalTransform(mean=mean, sigma=sigmas[i],
sig=sigmas[i], key='foo', N_copies=0)
key='foo',
N_copies=0)
op_samples = np.array( op_samples = np.array(
[op(s).val for s in [ift.from_random(op.domain) for i in range(10000)]]) [op(s).val for s in [ift.from_random(op.domain) for i in range(10000)]])
ax = fig.add_subplot(1, 3, i + 1) ax = fig.add_subplot(1, 3, i + 1)
ax.hist(op_samples, bins=50) ax.hist(op_samples, bins=50)
ax.set_title(f"mean = {mean}, sigma = {sigmas[i]}") ax.set_title(f"mean = {mean}, sigma = {sigmas[i]}")
ax.set_xlabel('x') ax.set_xlabel('x')
del op_samples del op_samples
plt.show() plt.show()
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## The Neutral Field ## The Neutral Field
To demonstrate the effect of all parameters, first a 'neutral' set of parameters To demonstrate the effect of all parameters, first a 'neutral' set of parameters
is defined which then are varied one by one, showing the effect of the variation is defined which then are varied one by one, showing the effect of the variation
on the generated field realizations and the underlying power spectrum from which on the generated field realizations and the underlying power spectrum from which
they were drawn. they were drawn.
As a neutral field, a model with a white power spectrum and vanishing spectral power was chosen. As a neutral field, a model with a white power spectrum and vanishing spectral power was chosen.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Neutral model parameters yielding a quasi-constant field # Neutral model parameters yielding a quasi-constant field
cf_make_pars = { cf_make_pars = {
'offset_mean': 0., 'offset_mean': 0.,
'offset_std_mean': 1e-3, 'offset_std_mean': 1e-3,
'offset_std_std': 1e-16, 'offset_std_std': 1e-16,
'prefix': '' 'prefix': ''
} }
cf_x_fluct_pars = { cf_x_fluct_pars = {
'target_subdomain': x_space, 'target_subdomain': x_space,
'fluctuations_mean': 1e-3, 'fluctuations_mean': 1e-3,
'fluctuations_stddev': 1e-16, 'fluctuations_stddev': 1e-16,
'flexibility_mean': 1e-3, 'flexibility_mean': 1e-3,
'flexibility_stddev': 1e-16, 'flexibility_stddev': 1e-16,
'asperity_mean': 1e-3, 'asperity_mean': 1e-3,
'asperity_stddev': 1e-16, 'asperity_stddev': 1e-16,
'loglogavgslope_mean': 0., 'loglogavgslope_mean': 0.,
'loglogavgslope_stddev': 1e-16 'loglogavgslope_stddev': 1e-16
} }
init_model(cf_make_pars, cf_x_fluct_pars) init_model(cf_make_pars, cf_x_fluct_pars)
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Show neutral field # Show neutral field
eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field") eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field")
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
# Parameter Showcases # Parameter Showcases
## The `fluctuations_` parameters of `add_fluctuations()` ## The `fluctuations_` parameters of `add_fluctuations()`
determine the **amplitude of variations along the field dimension** determine the **amplitude of variations along the field dimension**
for which `add_fluctuations` is called. for which `add_fluctuations` is called.
`fluctuations_mean` set the _average_ amplitude of the fields fluctuations along the given dimension,\ `fluctuations_mean` set the _average_ amplitude of the fields fluctuations along the given dimension,\
`fluctuations_stddev` sets the width and shape of the amplitude distribution. `fluctuations_stddev` sets the width and shape of the amplitude distribution.
The amplitude is modelled as being log-normally distributed, The amplitude is modelled as being log-normally distributed,
see `The Moment-Matched Log-Normal Distribution` above for details. see `The Moment-Matched Log-Normal Distribution` above for details.
#### `fluctuations_mean`: #### `fluctuations_mean`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
vary_parameter('fluctuations_mean', [0.05, 0.5, 2.], samples_vary_in='xi') vary_parameter('fluctuations_mean', [0.05, 0.5, 2.], samples_vary_in='xi')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
#### `fluctuations_stddev`: #### `fluctuations_stddev`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
cf_x_fluct_pars['fluctuations_mean'] = 1.0 cf_x_fluct_pars['fluctuations_mean'] = 1.0
vary_parameter('fluctuations_stddev', [0.01, 0.1, 1.0], samples_vary_in='fluctuations') vary_parameter('fluctuations_stddev', [0.01, 0.1, 1.0], samples_vary_in='fluctuations')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## The `loglogavgslope_` parameters of `add_fluctuations()` ## The `loglogavgslope_` parameters of `add_fluctuations()`
determine **the slope of the loglog-linear (power law) component of the power spectrum**. determine **the slope of the loglog-linear (power law) component of the power spectrum**.
The slope is modelled to be normally distributed. The slope is modelled to be normally distributed.
#### `loglogavgslope_mean`: #### `loglogavgslope_mean`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
vary_parameter('loglogavgslope_mean', [-3., -1., 1.], samples_vary_in='xi') vary_parameter('loglogavgslope_mean', [-3., -1., 1.], samples_vary_in='xi')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
#### `loglogavgslope_stddev`: #### `loglogavgslope_stddev`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
cf_x_fluct_pars['loglogavgslope_mean'] = -1.0 cf_x_fluct_pars['loglogavgslope_mean'] = -1.0
vary_parameter('loglogavgslope_stddev', [0.01, 0.1, 1.0], samples_vary_in='loglogavgslope') vary_parameter('loglogavgslope_stddev', [0.01, 0.1, 1.0], samples_vary_in='loglogavgslope')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## The `flexibility_` parameters of `add_fluctuations()` ## The `flexibility_` parameters of `add_fluctuations()`
determine **the amplitude of the integrated Wiener process component of the power spectrum** determine **the amplitude of the integrated Wiener process component of the power spectrum**
(how strong the power spectrum varies besides the power-law). (how strong the power spectrum varies besides the power-law).
`flexibility_mean` sets the _average_ amplitude of the i.g.p. component, `flexibility_mean` sets the _average_ amplitude of the i.g.p. component,
`flexibility_stddev` sets how much the amplitude can vary.\ `flexibility_stddev` sets how much the amplitude can vary.\
These two parameters feed into a moment-matched log-normal distribution model, These two parameters feed into a moment-matched log-normal distribution model,
see above for a demo of its behavior. see above for a demo of its behavior.
#### `flexibility_mean`: #### `flexibility_mean`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
vary_parameter('flexibility_mean', [0.1, 1.0, 3.0], samples_vary_in='spectrum') vary_parameter('flexibility_mean', [0.1, 1.0, 3.0], samples_vary_in='spectrum')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
#### `flexibility_stddev`: #### `flexibility_stddev`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
cf_x_fluct_pars['flexibility_mean'] = 2.0 cf_x_fluct_pars['flexibility_mean'] = 2.0
vary_parameter('flexibility_stddev', [0.01, 0.1, 1.0], samples_vary_in='flexibility') vary_parameter('flexibility_stddev', [0.01, 0.1, 1.0], samples_vary_in='flexibility')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## The `asperity_` parameters of `add_fluctuations()` ## The `asperity_` parameters of `add_fluctuations()`
`asperity_` determines **how rough the integrated Wiener process component of the power spectrum is**. `asperity_` determines **how rough the integrated Wiener process component of the power spectrum is**.
`asperity_mean` sets the average roughness, `asperity_stddev` sets how much the roughness can vary.\ `asperity_mean` sets the average roughness, `asperity_stddev` sets how much the roughness can vary.\
These two parameters feed into a moment-matched log-normal distribution model, These two parameters feed into a moment-matched log-normal distribution model,
see above for a demo of its behavior. see above for a demo of its behavior.
#### `asperity_mean`: #### `asperity_mean`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
vary_parameter('asperity_mean', [0.001, 1.0, 5.0], samples_vary_in='spectrum') vary_parameter('asperity_mean', [0.001, 1.0, 5.0], samples_vary_in='spectrum')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
#### `asperity_stddev`: #### `asperity_stddev`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
cf_x_fluct_pars['asperity_mean'] = 1.0 cf_x_fluct_pars['asperity_mean'] = 1.0
vary_parameter('asperity_stddev', [0.01, 0.1, 1.0], samples_vary_in='asperity') vary_parameter('asperity_stddev', [0.01, 0.1, 1.0], samples_vary_in='asperity')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## The `offset_mean` parameter of `CorrelatedFieldMaker.make()` ## The `offset_mean` parameter of `CorrelatedFieldMaker.make()`
The `offset_mean` parameter defines a global additive offset on the field realizations. The `offset_mean` parameter defines a global additive offset on the field realizations.
If the field is used for a lognormal model `f = field.exp()`, this acts as a global signal magnitude offset. If the field is used for a lognormal model `f = field.exp()`, this acts as a global signal magnitude offset.
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
# Reset model to neutral # Reset model to neutral
cf_x_fluct_pars['fluctuations_mean'] = 1e-3 cf_x_fluct_pars['fluctuations_mean'] = 1e-3
cf_x_fluct_pars['flexibility_mean'] = 1e-3 cf_x_fluct_pars['flexibility_mean'] = 1e-3
cf_x_fluct_pars['asperity_mean'] = 1e-3 cf_x_fluct_pars['asperity_mean'] = 1e-3
cf_x_fluct_pars['loglogavgslope_mean'] = 1e-3 cf_x_fluct_pars['loglogavgslope_mean'] = 1e-3
``` ```
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
vary_parameter('offset_mean', [3., 0., -2.]) vary_parameter('offset_mean', [3., 0., -2.])
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
## The `offset_std_` parameters of `CorrelatedFieldMaker.make()` ## The `offset_std_` parameters of `CorrelatedFieldMaker.make()`
Variation of the global offset of the field are modelled as being log-normally distributed. Variation of the global offset of the field are modelled as being log-normally distributed.
See `The Moment-Matched Log-Normal Distribution` above for details. See `The Moment-Matched Log-Normal Distribution` above for details.
The `offset_std_mean` parameter sets how much NIFTy will vary the offset *on average*.\ The `offset_std_mean` parameter sets how much NIFTy will vary the offset *on average*.\
The `offset_std_std` parameters defines the with and shape of the offset variation distribution. The `offset_std_std` parameters defines the with and shape of the offset variation distribution.
#### `offset_std_mean`: #### `offset_std_mean`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
vary_parameter('offset_std_mean', [1e-16, 0.5, 2.], samples_vary_in='xi') vary_parameter('offset_std_mean', [1e-16, 0.5, 2.], samples_vary_in='xi')
``` ```
%% Cell type:markdown id: tags: %% Cell type:markdown id: tags:
#### `offset_std_std`: #### `offset_std_std`:
%% Cell type:code id: tags: %% Cell type:code id: tags:
``` python ``` python
cf_make_pars['offset_std_mean'] = 1.0 cf_make_pars['offset_std_mean'] = 1.0
vary_parameter('offset_std_std', [0.01, 0.1, 1.0], samples_vary_in='zeromode') vary_parameter('offset_std_std', [0.01, 0.1, 1.0], samples_vary_in='zeromode')
``` ```
......
...@@ -131,7 +131,7 @@ def main(): ...@@ -131,7 +131,7 @@ def main():
for i in range(10): for i in range(10):
# Draw new samples and minimize KL # Draw new samples and minimize KL
KL = ift.MetricGaussianKL(mean, H, N_samples) KL = ift.MetricGaussianKL.make(mean, H, N_samples)
KL, convergence = minimizer(KL) KL, convergence = minimizer(KL)
mean = KL.position mean = KL.position
...@@ -157,7 +157,7 @@ def main(): ...@@ -157,7 +157,7 @@ def main():
name=filename.format("loop_{:02d}".format(i))) name=filename.format("loop_{:02d}".format(i)))
# Done, draw posterior samples # Done, draw posterior samples
KL = ift.MetricGaussianKL(mean, H, N_samples) KL = ift.MetricGaussianKL.make(mean, H, N_samples)
sc = ift.StatCalculator() sc = ift.StatCalculator()
scA1 = ift.StatCalculator() scA1 = ift.StatCalculator()
scA2 = ift.StatCalculator() scA2 = ift.StatCalculator()
......
...@@ -34,6 +34,7 @@ from matplotlib.colors import LogNorm ...@@ -34,6 +34,7 @@ from matplotlib.colors import LogNorm
import nifty7 as ift import nifty7 as ift
def main(): def main():
dom = ift.UnstructuredDomain(1) dom = ift.UnstructuredDomain(1)
scale = 10 scale = 10
...@@ -90,7 +91,7 @@ def main(): ...@@ -90,7 +91,7 @@ def main():
plt.figure(figsize=[12, 8]) plt.figure(figsize=[12, 8])
for ii in range(15): for ii in range(15):
if ii % 3 == 0: if ii % 3 == 0:
mgkl = ift.MetricGaussianKL(pos, ham, 40) mgkl = ift.MetricGaussianKL.make(pos, ham, 40)
plt.cla() plt.cla()
plt.imshow(z.T, origin='lower', norm=LogNorm(), vmin=1e-3, plt.imshow(z.T, origin='lower', norm=LogNorm(), vmin=1e-3,
......
...@@ -97,7 +97,7 @@ def main(): ...@@ -97,7 +97,7 @@ def main():
p_space = ift.UnstructuredDomain(N_params) p_space = ift.UnstructuredDomain(N_params)
params = ift.full(p_space, 0.) params = ift.full(p_space, 0.)
R = PolynomialResponse(p_space, x) R = PolynomialResponse(p_space, x)
ift.extra.consistency_check(R) ift.extra.check_linear_operator(R)
d_space = R.target d_space = R.target
d = ift.makeField(d_space, y) d = ift.makeField(d_space, y)
......
...@@ -52,6 +52,7 @@ from .operators.energy_operators import ( ...@@ -52,6 +52,7 @@ from .operators.energy_operators import (
BernoulliEnergy, StandardHamiltonian, AveragedEnergy, QuadraticFormOperator, BernoulliEnergy, StandardHamiltonian, AveragedEnergy, QuadraticFormOperator,
Squared2NormOperator, StudentTEnergy, VariableCovarianceGaussianEnergy) Squared2NormOperator, StudentTEnergy, VariableCovarianceGaussianEnergy)
from .operators.convolution_operators import FuncConvolutionOperator from .operators.convolution_operators import FuncConvolutionOperator
from .operators.normal_operators import NormalTransform, LognormalTransform
from .probing import probe_with_posterior_samples, probe_diagonal, \ from .probing import probe_with_posterior_samples, probe_diagonal, \
StatCalculator, approximation2endo StatCalculator, approximation2endo
......
...@@ -15,21 +15,115 @@ ...@@ -15,21 +15,115 @@
# #
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik. # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
from itertools import combinations
import numpy as np import numpy as np
from numpy.testing import assert_ from numpy.testing import assert_
from . import random
from .domain_tuple import DomainTuple from .domain_tuple import DomainTuple
from .field import Field from .field import Field
from .linearization import Linearization from .linearization import Linearization
from .multi_domain import MultiDomain from .multi_domain import MultiDomain
from .multi_field import MultiField from .multi_field import MultiField
from .operators.energy_operators import EnergyOperator
from .operators.linear_operator import LinearOperator from .operators.linear_operator import LinearOperator
from .operators.operator import Operator
from .sugar import from_random from .sugar import from_random
__all__ = ["consistency_check", "check_jacobian_consistency", __all__ = ["check_linear_operator", "check_operator",
"assert_allclose"] "assert_allclose"]
def check_linear_operator(op, domain_dtype=np.float64, target_dtype=np.float64,
atol=1e-12, rtol=1e-12, only_r_linear=False):
"""
Checks an operator for algebraic consistency of its capabilities.
Checks whether times(), adjoint_times(), inverse_times() and
adjoint_inverse_times() (if in capability list) is implemented
consistently. Additionally, it checks whether the operator is linear.
Parameters
----------
op : LinearOperator
Operator which shall be checked.
domain_dtype : dtype
The data type of the random vectors in the operator's domain. Default
is `np.float64`.
target_dtype : dtype
The data type of the random vectors in the operator's target. Default
is `np.float64`.
atol : float
Absolute tolerance for the check. If rtol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
rtol : float
Relative tolerance for the check. If atol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
only_r_linear: bool
set to True if the operator is only R-linear, not C-linear.
This will relax the adjointness test accordingly.
"""
if not isinstance(op, LinearOperator):
raise TypeError('This test tests only linear operators.')
_domain_check_linear(op, domain_dtype)
_domain_check_linear(op.adjoint, target_dtype)
_domain_check_linear(op.inverse, target_dtype)
_domain_check_linear(op.adjoint.inverse, domain_dtype)
_check_linearity(op, domain_dtype, atol, rtol)
_check_linearity(op.adjoint, target_dtype, atol, rtol)
_check_linearity(op.inverse, target_dtype, atol, rtol)
_check_linearity(op.adjoint.inverse, domain_dtype, atol, rtol)
_full_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.inverse, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint.inverse, domain_dtype, target_dtype, atol,
rtol, only_r_linear)
def check_operator(op, loc, tol=1e-12, ntries=100, perf_check=True,
only_r_differentiable=True, metric_sampling=True):
"""
Performs various checks of the implementation of linear and nonlinear
operators.
Computes the Jacobian with finite differences and compares it to the
implemented Jacobian.
Parameters
----------
op : Operator
Operator which shall be checked.
loc : Field or MultiField
An Field or MultiField instance which has the same domain
as op. The location at which the gradient is checked
tol : float
Tolerance for the check.
perf_check : Boolean
Do performance check. May be disabled for very unimportant operators.
only_r_differentiable : Boolean
Jacobians of C-differentiable operators need to be C-linear.
Default: True
metric_sampling: Boolean
If op is an EnergyOperator, metric_sampling determines whether the
test shall try to sample from the metric or not.
"""
if not isinstance(op, Operator):
raise TypeError('This test tests only linear operators.')
_domain_check_nonlinear(op, loc)
_performance_check(op, loc, bool(perf_check))
_linearization_value_consistency(op, loc)
_jac_vs_finite_differences(op, loc, np.sqrt(tol), ntries,
only_r_differentiable)
_check_nontrivial_constant(op, loc, tol, ntries, only_r_differentiable,
metric_sampling)
def assert_allclose(f1, f2, atol, rtol): def assert_allclose(f1, f2, atol, rtol):
if isinstance(f1, Field): if isinstance(f1, Field):
return np.testing.assert_allclose(f1.val, f2.val, atol=atol, rtol=rtol) return np.testing.assert_allclose(f1.val, f2.val, atol=atol, rtol=rtol)
...@@ -37,6 +131,27 @@ def assert_allclose(f1, f2, atol, rtol): ...@@ -37,6 +131,27 @@ def assert_allclose(f1, f2, atol, rtol):
assert_allclose(val, f2[key], atol=atol, rtol=rtol) assert_allclose(val, f2[key], atol=atol, rtol=rtol)
def assert_equal(f1, f2):
if isinstance(f1, Field):
return np.testing.assert_equal(f1.val, f2.val)
for key, val in f1.items():
assert_equal(val, f2[key])
def _nozero(fld):
if isinstance(fld, Field):
return np.testing.assert_((fld != 0).s_all())
for val in fld.values():
_nozero(val)
def _allzero(fld):
if isinstance(fld, Field):
return np.testing.assert_((fld == 0.).s_all())
for val in fld.values():
_allzero(val)
def _adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol, def _adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear): only_r_linear):
needed_cap = op.TIMES | op.ADJOINT_TIMES needed_cap = op.TIMES | op.ADJOINT_TIMES
...@@ -83,7 +198,8 @@ def _check_linearity(op, domain_dtype, atol, rtol): ...@@ -83,7 +198,8 @@ def _check_linearity(op, domain_dtype, atol, rtol):
assert_allclose(val1, val2, atol=atol, rtol=rtol) assert_allclose(val1, val2, atol=atol, rtol=rtol)
def _actual_domain_check_linear(op, domain_dtype=None, inp=None): def _domain_check_linear(op, domain_dtype=None, inp=None):
_domain_check(op)
needed_cap = op.TIMES needed_cap = op.TIMES
if (op.capability & needed_cap) != needed_cap: if (op.capability & needed_cap) != needed_cap:
return return
...@@ -95,8 +211,9 @@ def _actual_domain_check_linear(op, domain_dtype=None, inp=None): ...@@ -95,8 +211,9 @@ def _actual_domain_check_linear(op, domain_dtype=None, inp=None):
assert_(op(inp).domain is op.target) assert_(op(inp).domain is op.target)
def _actual_domain_check_nonlinear(op, loc): def _domain_check_nonlinear(op, loc):
assert isinstance(loc, (Field, MultiField)) _domain_check(op)
assert_(isinstance(loc, (Field, MultiField)))
assert_(loc.domain is op.domain) assert_(loc.domain is op.domain)
for wm in [False, True]: for wm in [False, True]:
lin = Linearization.make_var(loc, wm) lin = Linearization.make_var(loc, wm)
...@@ -111,8 +228,8 @@ def _actual_domain_check_nonlinear(op, loc): ...@@ -111,8 +228,8 @@ def _actual_domain_check_nonlinear(op, loc):
assert_(reslin.jac.domain is reslin.domain) assert_(reslin.jac.domain is reslin.domain)
assert_(reslin.jac.target is reslin.target) assert_(reslin.jac.target is reslin.target)
assert_(lin.want_metric == reslin.want_metric) assert_(lin.want_metric == reslin.want_metric)
_actual_domain_check_linear(reslin.jac, inp=loc) _domain_check_linear(reslin.jac, inp=loc)
_actual_domain_check_linear(reslin.jac.adjoint, inp=reslin.jac(loc)) _domain_check_linear(reslin.jac.adjoint, inp=reslin.jac(loc))
if reslin.metric is not None: if reslin.metric is not None:
assert_(reslin.metric.domain is reslin.metric.target) assert_(reslin.metric.domain is reslin.metric.target)
assert_(reslin.metric.domain is op.domain) assert_(reslin.metric.domain is op.domain)
...@@ -164,58 +281,6 @@ def _performance_check(op, pos, raise_on_fail): ...@@ -164,58 +281,6 @@ def _performance_check(op, pos, raise_on_fail):
raise RuntimeError(s) raise RuntimeError(s)
def consistency_check(op, domain_dtype=np.float64, target_dtype=np.float64,
atol=0, rtol=1e-7, only_r_linear=False):
"""
Checks an operator for algebraic consistency of its capabilities.
Checks whether times(), adjoint_times(), inverse_times() and
adjoint_inverse_times() (if in capability list) is implemented
consistently. Additionally, it checks whether the operator is linear.
Parameters
----------
op : LinearOperator
Operator which shall be checked.
domain_dtype : dtype
The data type of the random vectors in the operator's domain. Default
is `np.float64`.
target_dtype : dtype
The data type of the random vectors in the operator's target. Default
is `np.float64`.
atol : float
Absolute tolerance for the check. If rtol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
rtol : float
Relative tolerance for the check. If atol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
only_r_linear: bool
set to True if the operator is only R-linear, not C-linear.
This will relax the adjointness test accordingly.
"""
if not isinstance(op, LinearOperator):
raise TypeError('This test tests only linear operators.')
_domain_check(op)
_actual_domain_check_linear(op, domain_dtype)
_actual_domain_check_linear(op.adjoint, target_dtype)
_actual_domain_check_linear(op.inverse, target_dtype)
_actual_domain_check_linear(op.adjoint.inverse, domain_dtype)
_check_linearity(op, domain_dtype, atol, rtol)
_check_linearity(op.adjoint, target_dtype, atol, rtol)
_check_linearity(op.inverse, target_dtype, atol, rtol)
_check_linearity(op.adjoint.inverse, domain_dtype, atol, rtol)
_full_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.inverse, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint.inverse, domain_dtype, target_dtype, atol,
rtol, only_r_linear)
def _get_acceptable_location(op, loc, lin): def _get_acceptable_location(op, loc, lin):
if not np.isfinite(lin.val.s_sum()): if not np.isfinite(lin.val.s_sum()):
raise ValueError('Initial value must be finite') raise ValueError('Initial value must be finite')
...@@ -248,34 +313,51 @@ def _linearization_value_consistency(op, loc): ...@@ -248,34 +313,51 @@ def _linearization_value_consistency(op, loc):
assert_allclose(fld0, fld1, 0, 1e-7) assert_allclose(fld0, fld1, 0, 1e-7)