Skip to content
Snippets Groups Projects
Commit 548a9287 authored by Jakob Roth's avatar Jakob Roth
Browse files

remove deprecated matplotlib style

parent 5972e1d8
Branches
No related tags found
1 merge request!9Fix ci
Pipeline #180639 passed
%% Cell type:markdown id:14d83955 tags: %% Cell type:markdown id:14d83955 tags:
# Showcasing the Correlated Field model # Showcasing the Correlated Field model
**Skip to `Parameter Showcases` for the meat/veggies ;)** **Skip to `Parameter Showcases` for the meat/veggies ;)**
The field model roughly works like this: The field model roughly works like this:
`f = HT( A * zero_mode * xi ) + offset` `f = HT( A * zero_mode * xi ) + offset`
`A` is a spectral power field which is constructed from power spectra that hold on subdomains of the target domain. `A` is a spectral power field which is constructed from power spectra that hold on subdomains of the target domain.
It is scaled by a zero mode operator and then pointwise multiplied by a gaussian excitation field, yielding It is scaled by a zero mode operator and then pointwise multiplied by a gaussian excitation field, yielding
a representation of the field in harmonic space. a representation of the field in harmonic space.
It is then transformed into the target real space and a offset added. It is then transformed into the target real space and a offset added.
The power spectra `A` is constructed of are in turn constructed as the sum of a power law component The power spectra `A` is constructed of are in turn constructed as the sum of a power law component
and an integrated Wiener process whose amplitude and roughness can be set. and an integrated Wiener process whose amplitude and roughness can be set.
## Setup code ## Setup code
%% Cell type:code id:efe07ef1 tags: %% Cell type:code id:efe07ef1 tags:
``` python ``` python
%matplotlib inline %matplotlib inline
import nifty8 as ift import nifty8 as ift
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 100 plt.rcParams['figure.dpi'] = 100
plt.style.use("seaborn-notebook")
import numpy as np import numpy as np
n_pix = 256 n_pix = 256
x_space = ift.RGSpace(n_pix) x_space = ift.RGSpace(n_pix)
ift.random.push_sseq_from_seed(1) ift.random.push_sseq_from_seed(1)
``` ```
%% Cell type:code id:f22e1276 tags: %% Cell type:code id:f22e1276 tags:
``` python ``` python
# Plotting routine # Plotting routine
def plot(fields, spectra, title=None): def plot(fields, spectra, title=None):
# Plotting preparation is normally handled by nifty8.Plot # Plotting preparation is normally handled by nifty8.Plot
# It is done manually here to be able to tweak details # It is done manually here to be able to tweak details
# Fields are assumed to have identical domains # Fields are assumed to have identical domains
fig = plt.figure(tight_layout=True, figsize=(10, 3)) fig = plt.figure(tight_layout=True, figsize=(10, 3))
if title is not None: if title is not None:
fig.suptitle(title, fontsize=14) fig.suptitle(title, fontsize=14)
# Field # Field
ax1 = fig.add_subplot(1, 2, 1) ax1 = fig.add_subplot(1, 2, 1)
ax1.axhline(y=0., color='k', linestyle='--', alpha=0.25) ax1.axhline(y=0., color='k', linestyle='--', alpha=0.25)
for field in fields: for field in fields:
dom = field.domain[0] dom = field.domain[0]
xcoord = np.arange(dom.shape[0]) * dom.distances[0] xcoord = np.arange(dom.shape[0]) * dom.distances[0]
ax1.plot(xcoord, field.val) ax1.plot(xcoord, field.val)
ax1.set_xlim(xcoord[0], xcoord[-1]) ax1.set_xlim(xcoord[0], xcoord[-1])
ax1.set_ylim(-5., 5.) ax1.set_ylim(-5., 5.)
ax1.set_xlabel('x') ax1.set_xlabel('x')
ax1.set_ylabel('f(x)') ax1.set_ylabel('f(x)')
ax1.set_title('Field realizations') ax1.set_title('Field realizations')
# Spectrum # Spectrum
ax2 = fig.add_subplot(1, 2, 2) ax2 = fig.add_subplot(1, 2, 2)
for spectrum in spectra: for spectrum in spectra:
xcoord = spectrum.domain[0].k_lengths xcoord = spectrum.domain[0].k_lengths
ycoord = spectrum.val_rw() ycoord = spectrum.val_rw()
ycoord[0] = ycoord[1] ycoord[0] = ycoord[1]
ax2.plot(xcoord, ycoord) ax2.plot(xcoord, ycoord)
ax2.set_ylim(1e-6, 10.) ax2.set_ylim(1e-6, 10.)
ax2.set_xscale('log') ax2.set_xscale('log')
ax2.set_yscale('log') ax2.set_yscale('log')
ax2.set_xlabel('k') ax2.set_xlabel('k')
ax2.set_ylabel('p(k)') ax2.set_ylabel('p(k)')
ax2.set_title('Power Spectrum') ax2.set_title('Power Spectrum')
fig.align_labels() fig.align_labels()
plt.show() plt.show()
# Helper: draw main sample # Helper: draw main sample
main_sample = None main_sample = None
def init_model(m_pars, fl_pars, matern=False): def init_model(m_pars, fl_pars, matern=False):
global main_sample global main_sample
cf = ift.CorrelatedFieldMaker(m_pars["prefix"]) cf = ift.CorrelatedFieldMaker(m_pars["prefix"])
cf.set_amplitude_total_offset(m_pars["offset_mean"], m_pars["offset_std"]) cf.set_amplitude_total_offset(m_pars["offset_mean"], m_pars["offset_std"])
cf.add_fluctuations_matern(**fl_pars) if matern else cf.add_fluctuations(**fl_pars) cf.add_fluctuations_matern(**fl_pars) if matern else cf.add_fluctuations(**fl_pars)
field = cf.finalize(prior_info=0) field = cf.finalize(prior_info=0)
main_sample = ift.from_random(field.domain) main_sample = ift.from_random(field.domain)
print("model domain keys:", field.domain.keys()) print("model domain keys:", field.domain.keys())
# Helper: field and spectrum from parameter dictionaries + plotting # Helper: field and spectrum from parameter dictionaries + plotting
def eval_model(m_pars, fl_pars, title=None, samples=None, matern=False): def eval_model(m_pars, fl_pars, title=None, samples=None, matern=False):
cf = ift.CorrelatedFieldMaker(m_pars["prefix"]) cf = ift.CorrelatedFieldMaker(m_pars["prefix"])
cf.set_amplitude_total_offset(m_pars["offset_mean"], m_pars["offset_std"]) cf.set_amplitude_total_offset(m_pars["offset_mean"], m_pars["offset_std"])
cf.add_fluctuations_matern(**fl_pars) if matern else cf.add_fluctuations(**fl_pars) cf.add_fluctuations_matern(**fl_pars) if matern else cf.add_fluctuations(**fl_pars)
field = cf.finalize(prior_info=0) field = cf.finalize(prior_info=0)
spectrum = cf.amplitude spectrum = cf.amplitude
if samples is None: if samples is None:
samples = [main_sample] samples = [main_sample]
field_realizations = [field(s) for s in samples] field_realizations = [field(s) for s in samples]
spectrum_realizations = [spectrum.force(s) for s in samples] spectrum_realizations = [spectrum.force(s) for s in samples]
plot(field_realizations, spectrum_realizations, title) plot(field_realizations, spectrum_realizations, title)
def gen_samples(key_to_vary): def gen_samples(key_to_vary):
if key_to_vary is None: if key_to_vary is None:
return [main_sample] return [main_sample]
dct = main_sample.to_dict() dct = main_sample.to_dict()
subdom_to_vary = dct.pop(key_to_vary).domain subdom_to_vary = dct.pop(key_to_vary).domain
samples = [] samples = []
for i in range(8): for i in range(8):
d = dct.copy() d = dct.copy()
d[key_to_vary] = ift.from_random(subdom_to_vary) d[key_to_vary] = ift.from_random(subdom_to_vary)
samples.append(ift.MultiField.from_dict(d)) samples.append(ift.MultiField.from_dict(d))
return samples return samples
def vary_parameter(parameter_key, values, samples_vary_in=None, matern=False): def vary_parameter(parameter_key, values, samples_vary_in=None, matern=False):
s = gen_samples(samples_vary_in) s = gen_samples(samples_vary_in)
for v in values: for v in values:
if parameter_key in cf_make_pars.keys(): if parameter_key in cf_make_pars.keys():
m_pars = {**cf_make_pars, parameter_key: v} m_pars = {**cf_make_pars, parameter_key: v}
eval_model(m_pars, cf_x_fluct_pars, f"{parameter_key} = {v}", s, matern) eval_model(m_pars, cf_x_fluct_pars, f"{parameter_key} = {v}", s, matern)
else: else:
fl_pars = {**cf_x_fluct_pars, parameter_key: v} fl_pars = {**cf_x_fluct_pars, parameter_key: v}
eval_model(cf_make_pars, fl_pars, f"{parameter_key} = {v}", s, matern) eval_model(cf_make_pars, fl_pars, f"{parameter_key} = {v}", s, matern)
``` ```
%% Cell type:markdown id:6a3ab2c9 tags: %% Cell type:markdown id:6a3ab2c9 tags:
## Before the Action: The Moment-Matched Log-Normal Distribution ## Before the Action: The Moment-Matched Log-Normal Distribution
Many properties of the correlated field are modelled as being lognormally distributed. Many properties of the correlated field are modelled as being lognormally distributed.
The distribution models are parametrized via their means and standard-deviations (first and second position in tuple). The distribution models are parametrized via their means and standard-deviations (first and second position in tuple).
To get a feeling of how the ratio of the `mean` and `stddev` parameters influences the distribution shape, To get a feeling of how the ratio of the `mean` and `stddev` parameters influences the distribution shape,
here are a few example histograms: (observe the x-axis!) here are a few example histograms: (observe the x-axis!)
%% Cell type:code id:d1664307 tags: %% Cell type:code id:d1664307 tags:
``` python ``` python
fig = plt.figure(figsize=(13, 3.5)) fig = plt.figure(figsize=(13, 3.5))
mean = 1.0 mean = 1.0
sigmas = [1.0, 0.5, 0.1] sigmas = [1.0, 0.5, 0.1]
for i in range(3): for i in range(3):
op = ift.LognormalTransform(mean=mean, sigma=sigmas[i], op = ift.LognormalTransform(mean=mean, sigma=sigmas[i],
key='foo', N_copies=0) key='foo', N_copies=0)
op_samples = np.array( op_samples = np.array(
[op(s).val for s in [ift.from_random(op.domain) for i in range(10000)]]) [op(s).val for s in [ift.from_random(op.domain) for i in range(10000)]])
ax = fig.add_subplot(1, 3, i + 1) ax = fig.add_subplot(1, 3, i + 1)
ax.hist(op_samples, bins=50) ax.hist(op_samples, bins=50)
ax.set_title(f"mean = {mean}, sigma = {sigmas[i]}") ax.set_title(f"mean = {mean}, sigma = {sigmas[i]}")
ax.set_xlabel('x') ax.set_xlabel('x')
del op_samples del op_samples
plt.show() plt.show()
``` ```
%% Cell type:markdown id:0c5435cf tags: %% Cell type:markdown id:0c5435cf tags:
## The Neutral Field ## The Neutral Field
To demonstrate the effect of all parameters, first a 'neutral' set of parameters To demonstrate the effect of all parameters, first a 'neutral' set of parameters
is defined which then are varied one by one, showing the effect of the variation is defined which then are varied one by one, showing the effect of the variation
on the generated field realizations and the underlying power spectrum from which on the generated field realizations and the underlying power spectrum from which
they were drawn. they were drawn.
As a neutral field, a model with a white power spectrum and vanishing spectral power was chosen. As a neutral field, a model with a white power spectrum and vanishing spectral power was chosen.
%% Cell type:code id:bec31c51 tags: %% Cell type:code id:bec31c51 tags:
``` python ``` python
# Neutral model parameters yielding a quasi-constant field # Neutral model parameters yielding a quasi-constant field
cf_make_pars = { cf_make_pars = {
'offset_mean': 0., 'offset_mean': 0.,
'offset_std': (1e-3, 1e-16), 'offset_std': (1e-3, 1e-16),
'prefix': '' 'prefix': ''
} }
cf_x_fluct_pars = { cf_x_fluct_pars = {
'target_subdomain': x_space, 'target_subdomain': x_space,
'fluctuations': (1e-3, 1e-16), 'fluctuations': (1e-3, 1e-16),
'flexibility': (1e-3, 1e-16), 'flexibility': (1e-3, 1e-16),
'asperity': (1e-3, 1e-16), 'asperity': (1e-3, 1e-16),
'loglogavgslope': (0., 1e-16) 'loglogavgslope': (0., 1e-16)
} }
init_model(cf_make_pars, cf_x_fluct_pars) init_model(cf_make_pars, cf_x_fluct_pars)
``` ```
%% Cell type:code id:9cd804c2 tags: %% Cell type:code id:9cd804c2 tags:
``` python ``` python
# Show neutral field # Show neutral field
eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field") eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field")
``` ```
%% Cell type:markdown id:b051a975 tags: %% Cell type:markdown id:b051a975 tags:
# Parameter Showcases # Parameter Showcases
## The `fluctuations` parameters of `add_fluctuations()` ## The `fluctuations` parameters of `add_fluctuations()`
determine the **amplitude of variations along the field dimension** determine the **amplitude of variations along the field dimension**
for which `add_fluctuations` is called. for which `add_fluctuations` is called.
`fluctuations[0]` set the _average_ amplitude of the fields fluctuations along the given dimension,\ `fluctuations[0]` set the _average_ amplitude of the fields fluctuations along the given dimension,\
`fluctuations[1]` sets the width and shape of the amplitude distribution. `fluctuations[1]` sets the width and shape of the amplitude distribution.
The amplitude is modelled as being log-normally distributed, The amplitude is modelled as being log-normally distributed,
see `The Moment-Matched Log-Normal Distribution` above for details. see `The Moment-Matched Log-Normal Distribution` above for details.
#### `fluctuations` mean: #### `fluctuations` mean:
%% Cell type:code id:173d9eb2 tags: %% Cell type:code id:173d9eb2 tags:
``` python ``` python
vary_parameter('fluctuations', [(0.05, 1e-16), (0.5, 1e-16), (2., 1e-16)], samples_vary_in='xi') vary_parameter('fluctuations', [(0.05, 1e-16), (0.5, 1e-16), (2., 1e-16)], samples_vary_in='xi')
``` ```
%% Cell type:markdown id:8e348a20 tags: %% Cell type:markdown id:8e348a20 tags:
#### `fluctuations` std: #### `fluctuations` std:
%% Cell type:code id:99064d12 tags: %% Cell type:code id:99064d12 tags:
``` python ``` python
vary_parameter('fluctuations', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='fluctuations') vary_parameter('fluctuations', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='fluctuations')
cf_x_fluct_pars['fluctuations'] = (1., 1e-16) cf_x_fluct_pars['fluctuations'] = (1., 1e-16)
``` ```
%% Cell type:markdown id:f2c26fbf tags: %% Cell type:markdown id:f2c26fbf tags:
## The `loglogavgslope` parameters of `add_fluctuations()` ## The `loglogavgslope` parameters of `add_fluctuations()`
determine **the slope of the loglog-linear (power law) component of the power spectrum**. determine **the slope of the loglog-linear (power law) component of the power spectrum**.
The slope is modelled to be normally distributed. The slope is modelled to be normally distributed.
#### `loglogavgslope` mean: #### `loglogavgslope` mean:
%% Cell type:code id:84847644 tags: %% Cell type:code id:84847644 tags:
``` python ``` python
vary_parameter('loglogavgslope', [(-6., 1e-16), (-2., 1e-16), (2., 1e-16)], samples_vary_in='xi') vary_parameter('loglogavgslope', [(-6., 1e-16), (-2., 1e-16), (2., 1e-16)], samples_vary_in='xi')
``` ```
%% Cell type:markdown id:6ed6c403 tags: %% Cell type:markdown id:6ed6c403 tags:
#### `loglogavgslope` std: #### `loglogavgslope` std:
%% Cell type:code id:cbb51f3c tags: %% Cell type:code id:cbb51f3c tags:
``` python ``` python
vary_parameter('loglogavgslope', [(-2., 0.02), (-2., 0.2), (-2., 2.0)], samples_vary_in='loglogavgslope') vary_parameter('loglogavgslope', [(-2., 0.02), (-2., 0.2), (-2., 2.0)], samples_vary_in='loglogavgslope')
cf_x_fluct_pars['loglogavgslope'] = (-2., 1e-16) cf_x_fluct_pars['loglogavgslope'] = (-2., 1e-16)
``` ```
%% Cell type:markdown id:33036677 tags: %% Cell type:markdown id:33036677 tags:
## The `flexibility` parameters of `add_fluctuations()` ## The `flexibility` parameters of `add_fluctuations()`
determine **the amplitude of the integrated Wiener process component of the power spectrum** determine **the amplitude of the integrated Wiener process component of the power spectrum**
(how strong the power spectrum varies besides the power-law). (how strong the power spectrum varies besides the power-law).
`flexibility[0]` sets the _average_ amplitude of the i.g.p. component,\ `flexibility[0]` sets the _average_ amplitude of the i.g.p. component,\
`flexibility[1]` sets how much the amplitude can vary.\ `flexibility[1]` sets how much the amplitude can vary.\
These two parameters feed into a moment-matched log-normal distribution model, These two parameters feed into a moment-matched log-normal distribution model,
see above for a demo of its behavior. see above for a demo of its behavior.
#### `flexibility` mean: #### `flexibility` mean:
%% Cell type:code id:f3226c12 tags: %% Cell type:code id:f3226c12 tags:
``` python ``` python
vary_parameter('flexibility', [(0.4, 1e-16), (4.0, 1e-16), (12.0, 1e-16)], samples_vary_in='spectrum') vary_parameter('flexibility', [(0.4, 1e-16), (4.0, 1e-16), (12.0, 1e-16)], samples_vary_in='spectrum')
``` ```
%% Cell type:markdown id:d23fab8f tags: %% Cell type:markdown id:d23fab8f tags:
#### `flexibility` std: #### `flexibility` std:
%% Cell type:code id:0a29cd4f tags: %% Cell type:code id:0a29cd4f tags:
``` python ``` python
vary_parameter('flexibility', [(4., 0.02), (4., 0.2), (4., 2.)], samples_vary_in='flexibility') vary_parameter('flexibility', [(4., 0.02), (4., 0.2), (4., 2.)], samples_vary_in='flexibility')
cf_x_fluct_pars['flexibility'] = (4., 1e-16) cf_x_fluct_pars['flexibility'] = (4., 1e-16)
``` ```
%% Cell type:markdown id:61b1f4dc tags: %% Cell type:markdown id:61b1f4dc tags:
## The `asperity` parameters of `add_fluctuations()` ## The `asperity` parameters of `add_fluctuations()`
`asperity` determines **how rough the integrated Wiener process component of the power spectrum is**. `asperity` determines **how rough the integrated Wiener process component of the power spectrum is**.
`asperity[0]` sets the average roughness, `asperity[1]` sets how much the roughness can vary.\ `asperity[0]` sets the average roughness, `asperity[1]` sets how much the roughness can vary.\
These two parameters feed into a moment-matched log-normal distribution model, These two parameters feed into a moment-matched log-normal distribution model,
see above for a demo of its behavior. see above for a demo of its behavior.
#### `asperity` mean: #### `asperity` mean:
%% Cell type:code id:bbc3ff74 tags: %% Cell type:code id:bbc3ff74 tags:
``` python ``` python
vary_parameter('asperity', [(0.001, 1e-16), (1.0, 1e-16), (5., 1e-16)], samples_vary_in='spectrum') vary_parameter('asperity', [(0.001, 1e-16), (1.0, 1e-16), (5., 1e-16)], samples_vary_in='spectrum')
``` ```
%% Cell type:markdown id:c8686aaf tags: %% Cell type:markdown id:c8686aaf tags:
#### `asperity` std: #### `asperity` std:
%% Cell type:code id:8873d699 tags: %% Cell type:code id:8873d699 tags:
``` python ``` python
vary_parameter('asperity', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='asperity') vary_parameter('asperity', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='asperity')
cf_x_fluct_pars['asperity'] = (1., 1e-16) cf_x_fluct_pars['asperity'] = (1., 1e-16)
``` ```
%% Cell type:markdown id:0274688f tags: %% Cell type:markdown id:0274688f tags:
## The `offset_mean` parameter of `CorrelatedFieldMaker()` ## The `offset_mean` parameter of `CorrelatedFieldMaker()`
The `offset_mean` parameter defines a global additive offset on the field realizations. The `offset_mean` parameter defines a global additive offset on the field realizations.
If the field is used for a lognormal model `f = field.exp()`, this acts as a global signal magnitude offset. If the field is used for a lognormal model `f = field.exp()`, this acts as a global signal magnitude offset.
%% Cell type:code id:1effc123 tags: %% Cell type:code id:1effc123 tags:
``` python ``` python
# Reset model to neutral # Reset model to neutral
cf_x_fluct_pars['fluctuations'] = (1e-3, 1e-16) cf_x_fluct_pars['fluctuations'] = (1e-3, 1e-16)
cf_x_fluct_pars['flexibility'] = (1e-3, 1e-16) cf_x_fluct_pars['flexibility'] = (1e-3, 1e-16)
cf_x_fluct_pars['asperity'] = (1e-3, 1e-16) cf_x_fluct_pars['asperity'] = (1e-3, 1e-16)
cf_x_fluct_pars['loglogavgslope'] = (1e-3, 1e-16) cf_x_fluct_pars['loglogavgslope'] = (1e-3, 1e-16)
``` ```
%% Cell type:code id:77fd4d0d tags: %% Cell type:code id:77fd4d0d tags:
``` python ``` python
vary_parameter('offset_mean', [3., 0., -2.]) vary_parameter('offset_mean', [3., 0., -2.])
``` ```
%% Cell type:markdown id:46a3331a tags: %% Cell type:markdown id:46a3331a tags:
## The `offset_std` parameters of `CorrelatedFieldMaker()` ## The `offset_std` parameters of `CorrelatedFieldMaker()`
Variation of the global offset of the field are modelled as being log-normally distributed. Variation of the global offset of the field are modelled as being log-normally distributed.
See `The Moment-Matched Log-Normal Distribution` above for details. See `The Moment-Matched Log-Normal Distribution` above for details.
The `offset_std[0]` parameter sets how much NIFTy will vary the offset *on average*.\ The `offset_std[0]` parameter sets how much NIFTy will vary the offset *on average*.\
The `offset_std[1]` parameters defines the with and shape of the offset variation distribution. The `offset_std[1]` parameters defines the with and shape of the offset variation distribution.
#### `offset_std` mean: #### `offset_std` mean:
%% Cell type:code id:18d3bfea tags: %% Cell type:code id:18d3bfea tags:
``` python ``` python
vary_parameter('offset_std', [(1e-16, 1e-16), (0.5, 1e-16), (2., 1e-16)], samples_vary_in='xi') vary_parameter('offset_std', [(1e-16, 1e-16), (0.5, 1e-16), (2., 1e-16)], samples_vary_in='xi')
``` ```
%% Cell type:markdown id:5efbd81e tags: %% Cell type:markdown id:5efbd81e tags:
#### `offset_std` std: #### `offset_std` std:
%% Cell type:code id:4c724a7d tags: %% Cell type:code id:4c724a7d tags:
``` python ``` python
vary_parameter('offset_std', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='zeromode') vary_parameter('offset_std', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='zeromode')
``` ```
%% Cell type:markdown id:490f6f0f tags: %% Cell type:markdown id:490f6f0f tags:
## Matern fluctuation kernels ## Matern fluctuation kernels
The correlated fields model also supports parametrizing the power spectra of field dimensions The correlated fields model also supports parametrizing the power spectra of field dimensions
using Matern kernels. In the following, the effects of their parameters are demonstrated. using Matern kernels. In the following, the effects of their parameters are demonstrated.
Contrary to the field fluctuations parametrization showed above, the Matern kernel Contrary to the field fluctuations parametrization showed above, the Matern kernel
parameters show strong interactions. For example, the field amplitude does not only depend on the parameters show strong interactions. For example, the field amplitude does not only depend on the
amplitude scaling parameter `scale`, but on the combination of all three parameters `scale`, amplitude scaling parameter `scale`, but on the combination of all three parameters `scale`,
`cutoff` and `loglogslope`. `cutoff` and `loglogslope`.
%% Cell type:code id:c485cfe3 tags: %% Cell type:code id:c485cfe3 tags:
``` python ``` python
# Neutral model parameters yielding a quasi-constant field # Neutral model parameters yielding a quasi-constant field
cf_make_pars = { cf_make_pars = {
'offset_mean': 0., 'offset_mean': 0.,
'offset_std': (1e-3, 1e-16), 'offset_std': (1e-3, 1e-16),
'prefix': '' 'prefix': ''
} }
``` ```
%% Cell type:code id:6a6c80d8 tags: %% Cell type:code id:6a6c80d8 tags:
``` python ``` python
cf_x_fluct_pars = { cf_x_fluct_pars = {
'target_subdomain': x_space, 'target_subdomain': x_space,
'scale': (1e-2, 1e-16), 'scale': (1e-2, 1e-16),
'cutoff': (1., 1e-16), 'cutoff': (1., 1e-16),
'loglogslope': (-2.0, 1e-16) 'loglogslope': (-2.0, 1e-16)
} }
``` ```
%% Cell type:code id:f07425da tags: %% Cell type:code id:f07425da tags:
``` python ``` python
init_model(cf_make_pars, cf_x_fluct_pars, matern=True) init_model(cf_make_pars, cf_x_fluct_pars, matern=True)
``` ```
%% Cell type:code id:315a0ee1 tags: %% Cell type:code id:315a0ee1 tags:
``` python ``` python
# Show neutral field # Show neutral field
eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field", matern=True) eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field", matern=True)
``` ```
%% Cell type:markdown id:9c209c3b tags: %% Cell type:markdown id:9c209c3b tags:
# Parameter Showcases # Parameter Showcases
## The `scale` parameters of `add_fluctuations_matern()` ## The `scale` parameters of `add_fluctuations_matern()`
determine the **overall amplitude scaling factor of fluctuations in the target subdomain** determine the **overall amplitude scaling factor of fluctuations in the target subdomain**
for which `add_fluctuations_matern` is called. for which `add_fluctuations_matern` is called.
**It does not set the absolute amplitude**, which depends on all other parameters, too. **It does not set the absolute amplitude**, which depends on all other parameters, too.
`scale[0]` set the _average_ amplitude scaling factor of the fields' fluctuations along the given dimension,\ `scale[0]` set the _average_ amplitude scaling factor of the fields' fluctuations along the given dimension,\
`scale[1]` sets the width and shape of the scaling factor distribution. `scale[1]` sets the width and shape of the scaling factor distribution.
The scaling factor is modelled as being log-normally distributed, The scaling factor is modelled as being log-normally distributed,
see `The Moment-Matched Log-Normal Distribution` above for details. see `The Moment-Matched Log-Normal Distribution` above for details.
#### `scale` mean: #### `scale` mean:
%% Cell type:code id:e852d50e tags: %% Cell type:code id:e852d50e tags:
``` python ``` python
vary_parameter('scale', [(0.01, 1e-16), (0.1, 1e-16), (1.0, 1e-16)], samples_vary_in='xi', matern=True) vary_parameter('scale', [(0.01, 1e-16), (0.1, 1e-16), (1.0, 1e-16)], samples_vary_in='xi', matern=True)
``` ```
%% Cell type:markdown id:abf0bab5 tags: %% Cell type:markdown id:abf0bab5 tags:
#### `scale` std: #### `scale` std:
%% Cell type:code id:ea2d890d tags: %% Cell type:code id:ea2d890d tags:
``` python ``` python
vary_parameter('scale', [(0.5, 0.01), (0.5, 0.1), (0.5, 0.5)], samples_vary_in='scale', matern=True) vary_parameter('scale', [(0.5, 0.01), (0.5, 0.1), (0.5, 0.5)], samples_vary_in='scale', matern=True)
cf_x_fluct_pars['scale'] = (0.5, 1e-16) cf_x_fluct_pars['scale'] = (0.5, 1e-16)
``` ```
%% Cell type:markdown id:7f11ef1f tags: %% Cell type:markdown id:7f11ef1f tags:
## The `loglogslope` parameters of `add_fluctuations_matern()` ## The `loglogslope` parameters of `add_fluctuations_matern()`
determine **the slope of the loglog-linear (power law) component of the power spectrum**. determine **the slope of the loglog-linear (power law) component of the power spectrum**.
`loglogslope[0]` set the _average_ power law exponent of the fields' power spectrum along the given dimension,\ `loglogslope[0]` set the _average_ power law exponent of the fields' power spectrum along the given dimension,\
`loglogslope[1]` sets the width and shape of the exponent distribution. `loglogslope[1]` sets the width and shape of the exponent distribution.
The `loglogslope` is modelled to be normally distributed. The `loglogslope` is modelled to be normally distributed.
#### `loglogslope` mean: #### `loglogslope` mean:
%% Cell type:code id:1b650520 tags: %% Cell type:code id:1b650520 tags:
``` python ``` python
vary_parameter('loglogslope', [(-4.0, 1e-16), (-2.0, 1e-16), (-1.0, 1e-16)], samples_vary_in='xi', matern=True) vary_parameter('loglogslope', [(-4.0, 1e-16), (-2.0, 1e-16), (-1.0, 1e-16)], samples_vary_in='xi', matern=True)
``` ```
%% Cell type:markdown id:9bc6502f tags: %% Cell type:markdown id:9bc6502f tags:
As one can see, the field amplitude also depends on the `loglogslope` parameter. As one can see, the field amplitude also depends on the `loglogslope` parameter.
#### `loglogslope` std: #### `loglogslope` std:
%% Cell type:code id:6adc2da6 tags: %% Cell type:code id:6adc2da6 tags:
``` python ``` python
vary_parameter('loglogslope', [(-3., 0.01), (-3., 0.5), (-3., 1.0)], samples_vary_in='loglogslope', matern=True) vary_parameter('loglogslope', [(-3., 0.01), (-3., 0.5), (-3., 1.0)], samples_vary_in='loglogslope', matern=True)
``` ```
%% Cell type:markdown id:47284a68 tags: %% Cell type:markdown id:47284a68 tags:
## The `cutoff` parameters of `add_fluctuations_matern()` ## The `cutoff` parameters of `add_fluctuations_matern()`
determines **at what wavevector length the power spectrum should transition from constant power determines **at what wavevector length the power spectrum should transition from constant power
to following the powerlaw set by `loglogslope`**. to following the powerlaw set by `loglogslope`**.
`cutoff[0]` set the _average_ wavevector length at which the power spectrum transition occurs,\ `cutoff[0]` set the _average_ wavevector length at which the power spectrum transition occurs,\
`cutoff[1]` sets the width and shape of the transition wavevector length distribution. `cutoff[1]` sets the width and shape of the transition wavevector length distribution.
The cutoff is modelled as being log-normally distributed, The cutoff is modelled as being log-normally distributed,
see `The Moment-Matched Log-Normal Distribution` above for details. see `The Moment-Matched Log-Normal Distribution` above for details.
#### `cutoff` mean: #### `cutoff` mean:
%% Cell type:code id:f19e065c tags: %% Cell type:code id:f19e065c tags:
``` python ``` python
cf_x_fluct_pars['loglogslope'] = (-8.0, 1e-16) cf_x_fluct_pars['loglogslope'] = (-8.0, 1e-16)
vary_parameter('cutoff', [(1.0, 1e-16), (3.16, 1e-16), (10.0, 1e-16)], samples_vary_in='xi', matern=True) vary_parameter('cutoff', [(1.0, 1e-16), (3.16, 1e-16), (10.0, 1e-16)], samples_vary_in='xi', matern=True)
``` ```
%% Cell type:markdown id:9d188a39 tags: %% Cell type:markdown id:9d188a39 tags:
#### `cutoff` std: #### `cutoff` std:
%% Cell type:code id:fd16625d tags: %% Cell type:code id:fd16625d tags:
``` python ``` python
vary_parameter('cutoff', [(10., 1.0), (10., 3.16), (10., 10.)], samples_vary_in='cutoff', matern=True) vary_parameter('cutoff', [(10., 1.0), (10., 3.16), (10., 10.)], samples_vary_in='cutoff', matern=True)
``` ```
......
%% Cell type:markdown id:41c34610 tags: %% Cell type:markdown id:41c34610 tags:
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or the Free Software Foundation, either version 3 of the License, or
(at your option) any later version. (at your option) any later version.
This program is distributed in the hope that it will be useful, This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. GNU General Public License for more details.
You should have received a copy of the GNU General Public License You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>. along with this program. If not, see <http://www.gnu.org/licenses/>.
Copyright(C) 2013-2022 Max-Planck-Society Copyright(C) 2013-2022 Max-Planck-Society
NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik. NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
%% Cell type:markdown id:40ff38e3 tags: %% Cell type:markdown id:40ff38e3 tags:
# Code example: Wiener filter # Code example: Wiener filter
%% Cell type:markdown id:751fb241 tags: %% Cell type:markdown id:751fb241 tags:
## Introduction to Information Field Theory(IFT) ## Introduction to Information Field Theory(IFT)
We start with the measurement equation We start with the measurement equation
$$d_i = (Rs)_i+n_i$$ $$d_i = (Rs)_i+n_i$$
Here, $s$ is a continuous field, $d$ a discrete data vector, $n$ is the discrete noise on each data point and $R$ is the instrument response. Here, $s$ is a continuous field, $d$ a discrete data vector, $n$ is the discrete noise on each data point and $R$ is the instrument response.
In most cases, $R$ is not invertible. In most cases, $R$ is not invertible.
IFT aims at **inverting** the above uninvertible problem in the **best possible way** using Bayesian statistics. IFT aims at **inverting** the above uninvertible problem in the **best possible way** using Bayesian statistics.
NIFTy (Numerical Information Field Theory) is a Python framework in which IFT problems can be tackled easily. NIFTy (Numerical Information Field Theory) is a Python framework in which IFT problems can be tackled easily.
Its main interfaces are: Its main interfaces are:
- **Spaces**: Cartesian, 2-Spheres (Healpix, Gauss-Legendre) and their respective harmonic spaces. - **Spaces**: Cartesian, 2-Spheres (Healpix, Gauss-Legendre) and their respective harmonic spaces.
- **Fields**: Defined on spaces. - **Fields**: Defined on spaces.
- **Operators**: Acting on fields. - **Operators**: Acting on fields.
%% Cell type:markdown id:1c53d3d5 tags: %% Cell type:markdown id:1c53d3d5 tags:
## Wiener filter on 1D- fields in IFT ## Wiener filter on 1D- fields in IFT
### Assumptions ### Assumptions
- We consider a linear response R in the measurement equation $d=Rs+n$. - We consider a linear response R in the measurement equation $d=Rs+n$.
- We assume the **signal** and the **noise** prior to be **Gaussian** $\mathcal P (s) = \mathcal G (s,S)$, $\mathcal P (n) = \mathcal G (n,N)$ - We assume the **signal** and the **noise** prior to be **Gaussian** $\mathcal P (s) = \mathcal G (s,S)$, $\mathcal P (n) = \mathcal G (n,N)$
- Here $S, N$ are signal and noise covariances. Therefore they are positive definite matrices. - Here $S, N$ are signal and noise covariances. Therefore they are positive definite matrices.
### Wiener filter solution ### Wiener filter solution
- Making use of Bayes' theorem, the posterior is proportional to the joint probability and is given by: - Making use of Bayes' theorem, the posterior is proportional to the joint probability and is given by:
$$\mathcal P (s|d) \propto P(s,d) = \mathcal G(d-Rs,N) \,\mathcal G(s,S) \propto \mathcal G (s-m,D)$$ $$\mathcal P (s|d) \propto P(s,d) = \mathcal G(d-Rs,N) \,\mathcal G(s,S) \propto \mathcal G (s-m,D)$$
- Here, $m$ is the posterior mean , $D$ is the information propagator and are defined as follows: - Here, $m$ is the posterior mean , $D$ is the information propagator and are defined as follows:
$$m = Dj, \quad D = (S^{-1} +R^\dagger N^{-1} R)^{-1} $$ $$m = Dj, \quad D = (S^{-1} +R^\dagger N^{-1} R)^{-1} $$
- There, $j$ is the information source defined as $$ j = R^\dagger N^{-1} d.$$ - There, $j$ is the information source defined as $$ j = R^\dagger N^{-1} d.$$
Let us implement this in **NIFTy!** So let's import all the packages we need. Let us implement this in **NIFTy!** So let's import all the packages we need.
%% Cell type:code id:73573037 tags: %% Cell type:code id:73573037 tags:
``` python ``` python
%matplotlib inline %matplotlib inline
import numpy as np import numpy as np
import nifty8 as ift import nifty8 as ift
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 100 plt.rcParams['figure.dpi'] = 100
plt.style.use("seaborn-notebook")
``` ```
%% Cell type:markdown id:1b447738 tags: %% Cell type:markdown id:1b447738 tags:
### Implementation in NIFTy ### Implementation in NIFTy
We assume statistical **homogeneity** and **isotropy**, so the signal covariance $S$ is **translation invariant** and only depends on the **absolute value** of the distance. We assume statistical **homogeneity** and **isotropy**, so the signal covariance $S$ is **translation invariant** and only depends on the **absolute value** of the distance.
According to Wiener-Khinchin theorem, the signal covariance $S$ is diagonal in harmonic space, $$S_{kk^{\prime}} = 2 \pi \delta(k-k^{\prime}) P(k)= \text{diag}(S) \equiv \widehat{S_k}$$ and is described by a one-dimensional power spectrum. According to Wiener-Khinchin theorem, the signal covariance $S$ is diagonal in harmonic space, $$S_{kk^{\prime}} = 2 \pi \delta(k-k^{\prime}) P(k)= \text{diag}(S) \equiv \widehat{S_k}$$ and is described by a one-dimensional power spectrum.
We assume the power spectrum to follow a power-law, $$P(k) = P_0\,\left(1+\left(\frac{k}{k_0}\right)^2\right)^{-\gamma /2},$$ with $P_0 = 2 \cdot 10^4, \ k_0 = 5, \ \gamma = 4$, thus the reconstruction starts in harmonic space. We assume the power spectrum to follow a power-law, $$P(k) = P_0\,\left(1+\left(\frac{k}{k_0}\right)^2\right)^{-\gamma /2},$$ with $P_0 = 2 \cdot 10^4, \ k_0 = 5, \ \gamma = 4$, thus the reconstruction starts in harmonic space.
%% Cell type:code id:c3a935c1 tags: %% Cell type:code id:c3a935c1 tags:
``` python ``` python
def pow_spec(k): def pow_spec(k):
P0, k0, gamma = [2e4, 5, 4] P0, k0, gamma = [2e4, 5, 4]
return P0 / ((1. + (k/k0)**2)**(gamma / 2)) return P0 / ((1. + (k/k0)**2)**(gamma / 2))
``` ```
%% Cell type:markdown id:5ce7c229 tags: %% Cell type:markdown id:5ce7c229 tags:
### Spaces and harmonic transformations ### Spaces and harmonic transformations
- We define our non-harmonic signal space to be Cartesian with $N_{pix} = 512$ being the number of grid cells. - We define our non-harmonic signal space to be Cartesian with $N_{pix} = 512$ being the number of grid cells.
- To connect harmonic and non-harmonic spaces we introduce the Hartley transform $H$ that is closely related to the Fourier transform but maps $\mathbb{R}\rightarrow\mathbb{R}$. - To connect harmonic and non-harmonic spaces we introduce the Hartley transform $H$ that is closely related to the Fourier transform but maps $\mathbb{R}\rightarrow\mathbb{R}$.
- The covariance S in non-harmonic space is given by $$S = H^{\dagger}\widehat{S_k} H \ .$$ - The covariance S in non-harmonic space is given by $$S = H^{\dagger}\widehat{S_k} H \ .$$
%% Cell type:code id:f754e1b4 tags: %% Cell type:code id:f754e1b4 tags:
``` python ``` python
# Signal space is a regular Cartesian grid space # Signal space is a regular Cartesian grid space
N_pix = 512 N_pix = 512
s_space = ift.RGSpace(N_pix) s_space = ift.RGSpace(N_pix)
``` ```
%% Cell type:code id:6c25ac41 tags: %% Cell type:code id:6c25ac41 tags:
``` python ``` python
# k_space is the harmonic conjugate space of s_space # k_space is the harmonic conjugate space of s_space
HT = ift.HartleyOperator(s_space) HT = ift.HartleyOperator(s_space)
k_space = HT.target k_space = HT.target
``` ```
%% Cell type:code id:359f07ea tags: %% Cell type:code id:359f07ea tags:
``` python ``` python
S_k = ift.create_power_operator(k_space, power_spectrum=pow_spec, sampling_dtype=float) S_k = ift.create_power_operator(k_space, power_spectrum=pow_spec, sampling_dtype=float)
``` ```
%% Cell type:code id:f55022ba tags: %% Cell type:code id:f55022ba tags:
``` python ``` python
# Sandwich Operator implements S = HT.adjoint @ S_k @ HT and enables NIFTy to sample from S # Sandwich Operator implements S = HT.adjoint @ S_k @ HT and enables NIFTy to sample from S
S = ift.SandwichOperator.make(bun=HT, cheese=S_k) S = ift.SandwichOperator.make(bun=HT, cheese=S_k)
``` ```
%% Cell type:markdown id:a84f4b80 tags: %% Cell type:markdown id:a84f4b80 tags:
### Synthetic Data ### Synthetic Data
- In order to demonstrate the Wiener filter, we use **synthetic data**. Therefore, we draw a sample $\tilde{s}$ from $S$. (see Sampling) - In order to demonstrate the Wiener filter, we use **synthetic data**. Therefore, we draw a sample $\tilde{s}$ from $S$. (see Sampling)
- For simplicity we define the response operator as a unit matrix, $R = \mathbb{1}$. - For simplicity we define the response operator as a unit matrix, $R = \mathbb{1}$.
- We assume the noise covariance to be uncorrelated and constant, $N = 0.2 \cdot \mathbb{1}$ and draw a sample $\tilde{n}$. - We assume the noise covariance to be uncorrelated and constant, $N = 0.2 \cdot \mathbb{1}$ and draw a sample $\tilde{n}$.
- Thus the synthetic data $d = R(\tilde{s}) + \tilde{n}$. - Thus the synthetic data $d = R(\tilde{s}) + \tilde{n}$.
%% Cell type:markdown id:387bd2c5 tags: %% Cell type:markdown id:387bd2c5 tags:
### Sampling ### Sampling
- Assuming we have a distribution $\mathcal{G}(b,B)$ we can sample from and we want to draw a sample from a distritbution $\mathcal{G}(c,C)$ with covariance $C$. The two distributions are connected via the relation $C = ABA^{\dagger}.$ One can show that $c= Ab$ with $b \curvearrowleft \mathcal{G}(b,B)$ has a probability distribution with covariance $C$ as desired. - Assuming we have a distribution $\mathcal{G}(b,B)$ we can sample from and we want to draw a sample from a distritbution $\mathcal{G}(c,C)$ with covariance $C$. The two distributions are connected via the relation $C = ABA^{\dagger}.$ One can show that $c= Ab$ with $b \curvearrowleft \mathcal{G}(b,B)$ has a probability distribution with covariance $C$ as desired.
$$ \langle cc^\dagger\rangle_{\mathcal{G}(c,C)} = \langle Ab(Ab)^\dagger\rangle_{\mathcal{G}(b,B)} = \langle Abb^\dagger A^\dagger \rangle = A \langle bb^\dagger \rangle A^\dagger = ABA^\dagger = C$$ $$ \langle cc^\dagger\rangle_{\mathcal{G}(c,C)} = \langle Ab(Ab)^\dagger\rangle_{\mathcal{G}(b,B)} = \langle Abb^\dagger A^\dagger \rangle = A \langle bb^\dagger \rangle A^\dagger = ABA^\dagger = C$$
- This is also true for the case that $B = \mathbb{1}$, meaning that $\mathcal{G}(b,\mathbb{1})$ Thus $C = AA^{\dagger}$ . - This is also true for the case that $B = \mathbb{1}$, meaning that $\mathcal{G}(b,\mathbb{1})$ Thus $C = AA^{\dagger}$ .
- Note that, if $C$ is diagonal, $A$ is diagonal as well. - Note that, if $C$ is diagonal, $A$ is diagonal as well.
- It can be shown that if $C = A + B$, then $c = a + b$ with $b \curvearrowleft \mathcal{G}(b,B)$ and $a \curvearrowleft \mathcal{G}(a,A)$ has a probability distribution with covariance $C$ as desired. - It can be shown that if $C = A + B$, then $c = a + b$ with $b \curvearrowleft \mathcal{G}(b,B)$ and $a \curvearrowleft \mathcal{G}(a,A)$ has a probability distribution with covariance $C$ as desired.
- If we can draw samples from $\mathcal{G}(a,A)$, and we want to draw a sample with the covariance $A^{-1}$, one can simply show that $c = A^{-1}a$ has a a probability distribution with covariance $A^{-1}$. - If we can draw samples from $\mathcal{G}(a,A)$, and we want to draw a sample with the covariance $A^{-1}$, one can simply show that $c = A^{-1}a$ has a a probability distribution with covariance $A^{-1}$.
$$\langle c c^{\dagger} \rangle = \langle A^{-1}aa^{\dagger}(A^{-1})^{\dagger} \rangle = A^{-1}\langle aa^{\dagger}\rangle(A^{-1})^{\dagger} = A^{-1} A(A^{-1})^{\dagger} =A^{-1}$$ $$\langle c c^{\dagger} \rangle = \langle A^{-1}aa^{\dagger}(A^{-1})^{\dagger} \rangle = A^{-1}\langle aa^{\dagger}\rangle(A^{-1})^{\dagger} = A^{-1} A(A^{-1})^{\dagger} =A^{-1}$$
as we assume $A^{-1}$ to be Hermitian. as we assume $A^{-1}$ to be Hermitian.
By this brief introduction to sampling, we apply it in order to get the synthetic data. By this brief introduction to sampling, we apply it in order to get the synthetic data.
All of these sampling rules are implemented in NIFTy so we do not need to take care. All of these sampling rules are implemented in NIFTy so we do not need to take care.
%% Cell type:code id:ec11f39e tags: %% Cell type:code id:ec11f39e tags:
``` python ``` python
# Draw a sample from signal with covariance S. # Draw a sample from signal with covariance S.
s = S.draw_sample() s = S.draw_sample()
``` ```
%% Cell type:code id:7ae02d13 tags: %% Cell type:code id:7ae02d13 tags:
``` python ``` python
# Define the responce operator that removes the geometry meaning it removes distances and volumes. # Define the responce operator that removes the geometry meaning it removes distances and volumes.
R = ift.GeometryRemover(s_space) R = ift.GeometryRemover(s_space)
# Define the data space that has an unstructured domain. # Define the data space that has an unstructured domain.
d_space = R.target d_space = R.target
``` ```
%% Cell type:code id:e589055b tags: %% Cell type:code id:e589055b tags:
``` python ``` python
noiseless_data = R(s) noiseless_data = R(s)
# This is the multiplicative factor going from a sample with unit covariance to N. # This is the multiplicative factor going from a sample with unit covariance to N.
noise_amplitude = np.sqrt(0.2) noise_amplitude = np.sqrt(0.2)
# Define the noise covariance # Define the noise covariance
N = ift.ScalingOperator(d_space, noise_amplitude**2, float) N = ift.ScalingOperator(d_space, noise_amplitude**2, float)
# Draw a sample from noise with covariance N. # Draw a sample from noise with covariance N.
n = N.draw_sample() n = N.draw_sample()
# Synthetic data # Synthetic data
d = noiseless_data + n d = noiseless_data + n
``` ```
%% Cell type:markdown id:de895047 tags: %% Cell type:markdown id:de895047 tags:
### Information source and information propagator ### Information source and information propagator
Now that we have the synthetic data, we are one step closer to the Wiener filter! Now that we have the synthetic data, we are one step closer to the Wiener filter!
In order to apply Wiener filter on the data we first need to define the information source $j$ along with the information propagator $D$. In order to apply Wiener filter on the data we first need to define the information source $j$ along with the information propagator $D$.
%% Cell type:code id:30f242fd tags: %% Cell type:code id:30f242fd tags:
``` python ``` python
# Define the information propagator. # Define the information propagator.
j = R.adjoint(N.inverse(d)) j = R.adjoint(N.inverse(d))
# Iteration controller # Iteration controller
ic = ift.GradientNormController(iteration_limit=50000, tol_abs_gradnorm=0.1) ic = ift.GradientNormController(iteration_limit=50000, tol_abs_gradnorm=0.1)
D_inv = S.inverse + R.adjoint @ N.inverse @ R D_inv = S.inverse + R.adjoint @ N.inverse @ R
# Enable .inverse to invert D via Conjugate Gradient. # Enable .inverse to invert D via Conjugate Gradient.
D_inv = ift.InversionEnabler(D_inv, ic) D_inv = ift.InversionEnabler(D_inv, ic)
D = D_inv.inverse D = D_inv.inverse
``` ```
%% Cell type:markdown id:c2ab51d4 tags: %% Cell type:markdown id:c2ab51d4 tags:
### Apply Wiener Filter ### Apply Wiener Filter
After defining the information source and propagator, we are able to apply the Wiener filter in order to get the posterior mean $m = \langle s \rangle_{\mathcal{P}(s|d)}$ that is our reconstruction of the signal: After defining the information source and propagator, we are able to apply the Wiener filter in order to get the posterior mean $m = \langle s \rangle_{\mathcal{P}(s|d)}$ that is our reconstruction of the signal:
%% Cell type:code id:8e7616c8 tags: %% Cell type:code id:8e7616c8 tags:
``` python ``` python
m = D(j) m = D(j)
``` ```
%% Cell type:markdown id:60b7922d tags: %% Cell type:markdown id:60b7922d tags:
### Results ### Results
%% Cell type:code id:3d4fdd46 tags: %% Cell type:code id:3d4fdd46 tags:
``` python ``` python
# `.val` retrieves the underlying numpy array from a NIFTy Field. # `.val` retrieves the underlying numpy array from a NIFTy Field.
plt.plot(s.val, 'r', label="signal ground truth", linewidth=2) plt.plot(s.val, 'r', label="signal ground truth", linewidth=2)
plt.plot(d.val, 'k.', label="noisy data") plt.plot(d.val, 'k.', label="noisy data")
plt.plot(m.val, 'k', label="posterior mean",linewidth=2) plt.plot(m.val, 'k', label="posterior mean",linewidth=2)
plt.title("Reconstruction") plt.title("Reconstruction")
plt.legend() plt.legend()
plt.show() plt.show()
``` ```
%% Cell type:markdown id:29cac729 tags: %% Cell type:markdown id:29cac729 tags:
To show the deviations with respect to the true signal (or ground truth), we plot the residuals as follows: To show the deviations with respect to the true signal (or ground truth), we plot the residuals as follows:
%% Cell type:code id:301c8dc7 tags: %% Cell type:code id:301c8dc7 tags:
``` python ``` python
plt.plot(s.val - s.val, 'r', label="ground truth ref [$s-s$]", linewidth=2) plt.plot(s.val - s.val, 'r', label="ground truth ref [$s-s$]", linewidth=2)
plt.plot(d.val - s.val, 'k.', label="noise [$d-Rs$]") plt.plot(d.val - s.val, 'k.', label="noise [$d-Rs$]")
plt.plot(m.val - s.val, 'k', label="posterior mean - ground truth",linewidth=2) plt.plot(m.val - s.val, 'k', label="posterior mean - ground truth",linewidth=2)
plt.axhspan(-noise_amplitude,noise_amplitude, facecolor='0.9', alpha=.5) plt.axhspan(-noise_amplitude,noise_amplitude, facecolor='0.9', alpha=.5)
plt.title("Residuals") plt.title("Residuals")
plt.legend() plt.legend()
plt.show() plt.show()
``` ```
%% Cell type:markdown id:cf366b0a tags: %% Cell type:markdown id:cf366b0a tags:
## Wiener Filter on Incomplete Data ## Wiener Filter on Incomplete Data
Now we consider a case that the data is not complete. Now we consider a case that the data is not complete.
This might be the case in real situations as the instrument might not be able to receive data. This might be the case in real situations as the instrument might not be able to receive data.
In order to apply the Wiener filter to this case, we first need to build the response corresponding to the incomplete measurement in NIFTy! In order to apply the Wiener filter to this case, we first need to build the response corresponding to the incomplete measurement in NIFTy!
%% Cell type:markdown id:dae020f7 tags: %% Cell type:markdown id:dae020f7 tags:
### Incomplete Measuring / Masking ### Incomplete Measuring / Masking
We need to build mask operator which cuts out all the unobserved parts of the signal. We need to build mask operator which cuts out all the unobserved parts of the signal.
Lets assume that we first observe the signal for some time, but then something goes wrong with our instrument and we don't collect data for a while. Lets assume that we first observe the signal for some time, but then something goes wrong with our instrument and we don't collect data for a while.
After fixing the instrument we can collect data again. After fixing the instrument we can collect data again.
This means that data lives on an unstructured domain as the there is data missing for the period of time $t_{\text{off}}$ when the instrument was offline. This means that data lives on an unstructured domain as the there is data missing for the period of time $t_{\text{off}}$ when the instrument was offline.
In order to implement this incomplete measurement we need to define a new response operator $R$ which masks the signal for the time $t_{\text{off}}$. In order to implement this incomplete measurement we need to define a new response operator $R$ which masks the signal for the time $t_{\text{off}}$.
%% Cell type:code id:14726428 tags: %% Cell type:code id:14726428 tags:
``` python ``` python
# Whole observation time # Whole observation time
npix = s_space.size npix = s_space.size
# Time when the instrument is turned off # Time when the instrument is turned off
l = int(npix * 0.2) l = int(npix * 0.2)
# Time when the instrument is turned on again # Time when the instrument is turned on again
h = int(npix * 0.4) h = int(npix * 0.4)
# Initialise a new array for the whole time frame # Initialise a new array for the whole time frame
mask = np.zeros(s_space.shape, bool) mask = np.zeros(s_space.shape, bool)
# Define the mask # Define the mask
mask[l:h] = 1 mask[l:h] = 1
# Turn the numpy array into a nifty field # Turn the numpy array into a nifty field
mask = ift.makeField(s_space, mask) mask = ift.makeField(s_space, mask)
# Define the response operator which masks the places where mask == 1 # Define the response operator which masks the places where mask == 1
R = ift.MaskOperator(mask) R = ift.MaskOperator(mask)
``` ```
%% Cell type:markdown id:dd3e8af1 tags: %% Cell type:markdown id:dd3e8af1 tags:
### Synthetic Data ### Synthetic Data
As in the Wiener filter example with complete data, we are generating some synthetic data now by propagating the previously drawn prior sample through the incomplete measurement response and adding a noise sample. As in the Wiener filter example with complete data, we are generating some synthetic data now by propagating the previously drawn prior sample through the incomplete measurement response and adding a noise sample.
%% Cell type:code id:8363298c tags: %% Cell type:code id:8363298c tags:
``` python ``` python
# Define the noise covariance # Define the noise covariance
N = ift.ScalingOperator(R.target, noise_amplitude**2, float) N = ift.ScalingOperator(R.target, noise_amplitude**2, float)
# Draw a noise sample # Draw a noise sample
n = N.draw_sample() n = N.draw_sample()
# Measure the signal sample with additional noise # Measure the signal sample with additional noise
d = R(s) + n d = R(s) + n
``` ```
%% Cell type:markdown id:798f5ca8 tags: %% Cell type:markdown id:798f5ca8 tags:
### Sampling from D ### Sampling from D
Since we have an incomplete measurement we want to know how uncertain we are about our Wiener filter solution. We can easily obtain both, the mean and the standard deviation by sampling from $D$ and computing them directly from the drawn samples. Since we have an incomplete measurement we want to know how uncertain we are about our Wiener filter solution. We can easily obtain both, the mean and the standard deviation by sampling from $D$ and computing them directly from the drawn samples.
In order to enable NIFTy to sample from $D$ we need to use some helper functions. In order to enable NIFTy to sample from $D$ we need to use some helper functions.
%% Cell type:code id:53b10df1 tags: %% Cell type:code id:53b10df1 tags:
``` python ``` python
# This implements the rule how to sample from a sum of covariances # This implements the rule how to sample from a sum of covariances
D_inv = ift.SamplingEnabler(ift.SandwichOperator.make(cheese=N.inverse, bun=R), S.inverse, ic, S.inverse) D_inv = ift.SamplingEnabler(ift.SandwichOperator.make(cheese=N.inverse, bun=R), S.inverse, ic, S.inverse)
# Allow for numerical inversion # Allow for numerical inversion
D_inv = ift.InversionEnabler(D_inv, ic) D_inv = ift.InversionEnabler(D_inv, ic)
D = D_inv.inverse D = D_inv.inverse
# Define the information source # Define the information source
j = R.adjoint(N.inverse(d)) j = R.adjoint(N.inverse(d))
# Posterior mean # Posterior mean
m = D(j) m = D(j)
# Number of samples to calculate the posterior standard deviation # Number of samples to calculate the posterior standard deviation
n_samples = 200 n_samples = 200
# Helper function that calculates the mean and the variance from a set of samples efficiently # Helper function that calculates the mean and the variance from a set of samples efficiently
sc = ift.StatCalculator() sc = ift.StatCalculator()
for _ in range(n_samples): for _ in range(n_samples):
# Draw a sample of G(s,D) and shifting it by m -> G(s-m,D) # Draw a sample of G(s,D) and shifting it by m -> G(s-m,D)
sample = m + D.draw_sample() sample = m + D.draw_sample()
# Add it to the StatCalculator # Add it to the StatCalculator
sc.add(sample) sc.add(sample)
# Standard deviation from samples # Standard deviation from samples
samples_std = sc.var.sqrt() samples_std = sc.var.sqrt()
# Mean from samples that converges to m in the limit of infinitely many samples # Mean from samples that converges to m in the limit of infinitely many samples
samples_mean = sc.mean samples_mean = sc.mean
``` ```
%% Cell type:markdown id:7c1cb854 tags: %% Cell type:markdown id:7c1cb854 tags:
### Plots ### Plots
Let us visualize the results of the Wiener filter $m$, the sampled standard deviation and mean, as well as the true signal (ground truth) and the data. Let us visualize the results of the Wiener filter $m$, the sampled standard deviation and mean, as well as the true signal (ground truth) and the data.
Since the data lives in data space, we first need to project it back into the signal space via $R^{\dagger}d$. Since the data lives in data space, we first need to project it back into the signal space via $R^{\dagger}d$.
%% Cell type:code id:09c0d576 tags: %% Cell type:code id:09c0d576 tags:
``` python ``` python
plt.axvspan(l, h, facecolor='0.8',alpha=0.3, label="masked area") # Shading the masked interval plt.axvspan(l, h, facecolor='0.8',alpha=0.3, label="masked area") # Shading the masked interval
plt.plot(s.val, '#f28109', label="Signal (ground truth)", alpha=1, linewidth=2) # plt.plot(s.val, '#f28109', label="Signal (ground truth)", alpha=1, linewidth=2) #
plt.plot(m.val, 'k', label="Posterior mean (reconstruction)", linewidth=2) plt.plot(m.val, 'k', label="Posterior mean (reconstruction)", linewidth=2)
plt.fill_between(range(m.size), (m - samples_std).val, (m + samples_std).val, plt.fill_between(range(m.size), (m - samples_std).val, (m + samples_std).val,
facecolor='#8592ff', alpha=0.8, label="Posterior std (samples)") facecolor='#8592ff', alpha=0.8, label="Posterior std (samples)")
plt.plot(samples_mean.val, 'k--', label="Posterior mean (samples)") plt.plot(samples_mean.val, 'k--', label="Posterior mean (samples)")
#.val would return a read only-array. `.val_rw()` returns a writeable copy #.val would return a read only-array. `.val_rw()` returns a writeable copy
tmp = R.adjoint(d).val_rw() tmp = R.adjoint(d).val_rw()
# Remove the "0" data points in the masked array # Remove the "0" data points in the masked array
tmp[l:h] = np.nan tmp[l:h] = np.nan
plt.plot(tmp, 'k.', label="Data") plt.plot(tmp, 'k.', label="Data")
plt.title("Reconstruction of incomplete data") plt.title("Reconstruction of incomplete data")
plt.legend() plt.legend()
plt.show() plt.show()
``` ```
%% Cell type:markdown id:4ed413ba tags: %% Cell type:markdown id:4ed413ba tags:
## Wiener Filter standardized ## Wiener Filter standardized
%% Cell type:code id:1c8b2921 tags: %% Cell type:code id:1c8b2921 tags:
``` python ``` python
sqrt_pspec = S_k(ift.full(S_k.domain, 1.)).sqrt() sqrt_pspec = S_k(ift.full(S_k.domain, 1.)).sqrt()
trafo = HT.adjoint @ ift.makeOp(sqrt_pspec) trafo = HT.adjoint @ ift.makeOp(sqrt_pspec)
R2 = R @ trafo R2 = R @ trafo
j2 = R2.adjoint(N.inverse(d)) j2 = R2.adjoint(N.inverse(d))
identity = ift.Operator.identity_operator(R2.domain) identity = ift.Operator.identity_operator(R2.domain)
Dinv = ift.InversionEnabler(identity + R2.adjoint @ N.inverse @ R2, ic) Dinv = ift.InversionEnabler(identity + R2.adjoint @ N.inverse @ R2, ic)
D2 = Dinv.inverse D2 = Dinv.inverse
m2 = D2(j2) m2 = D2(j2)
``` ```
%% Cell type:code id:fe148f44 tags: %% Cell type:code id:fe148f44 tags:
``` python ``` python
m2_s_space = trafo(m2) m2_s_space = trafo(m2)
plt.axvspan(l, h, facecolor='0.8',alpha=0.5) plt.axvspan(l, h, facecolor='0.8',alpha=0.5)
plt.plot(s.val, 'r', label="Signal", alpha=1, linewidth=2) plt.plot(s.val, 'r', label="Signal", alpha=1, linewidth=2)
plt.plot(tmp, 'k.', label="Data") plt.plot(tmp, 'k.', label="Data")
plt.plot(m2_s_space.val, 'k', label="Reconstruction", linewidth=2) plt.plot(m2_s_space.val, 'k', label="Reconstruction", linewidth=2)
plt.title("Reconstruction of incomplete data in normalized coordinates") plt.title("Reconstruction of incomplete data in normalized coordinates")
plt.legend() plt.legend()
plt.show() plt.show()
``` ```
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment