Skip to content
Snippets Groups Projects

Compare revisions

Changes are shown as if the source revision was being merged into the target revision. Learn more about comparing revisions.

Source

Select target project
No results found
Select Git revision
  • 420-tracerboolconversion-error-in-lognormal_moments-py
  • 423-minisanity-re-improve-likelihood-readability
  • NIFTy_1
  • NIFTy_2
  • NIFTy_3
  • NIFTy_4
  • NIFTy_5
  • NIFTy_6
  • NIFTy_7
  • NIFTy_8
  • TransitionFunction
  • adaptions_cfm_veberle
  • altVI
  • cf_matern_N_copies
  • cfm_demo_sliders
  • change_ncg_default
  • code_formatter
  • config_multifrequency_trans
  • cupy_backend
  • feature_nifty_re_static_newton_cg
  • fft_interpolator
  • fix_cfm_amplitude_normalization
  • fix_cfm_amplitude_normalization_nifty8
  • fix_color_mf_plotting
  • fix_nonlinearity_gradients
  • fix_saved_pickle_files
  • flexible_FieldZeroPadder
  • force_precompute_dists
  • format_to_fstring
  • frequency_model
  • gauss_markov_processes
  • general_sky
  • geomap_pf
  • hammer_projection
  • hmc
  • improve_getting_started_0
  • independent_SLAmplitudes
  • inline_issue
  • iwp_x0_interface
  • jaxopt_vi
  • joint_re_cl_tests
  • lanczos
  • likelihood_constant_terms
  • likelihoods
  • main
  • mf_plotting_refactor
  • mf_sky
  • more_derivatives_pf
  • more_restrictive_scaling_operator
  • mpi_samplelist_fix
  • msc
  • native_extension
  • new_sampling
  • nifty
  • nifty2jax_david_vmap
  • nifty8_philipps_unmerged_patches
  • nifty_jr
  • options_for_zeropadder
  • paper_lsp
  • paper_matteo
  • paper_pf
  • paper_ve
  • parametric_kl
  • perf_tweaks
  • playground_iwp_mf
  • projection_operators_maxim
  • pytorch_operator
  • qpo_model
  • qpo_model_rebased
  • re_fewer_tests
  • rmmap
  • symbolicDifferentiation
  • test_add_fluctuations
  • tests_jhk
  • tmp_mf_plotting
  • total_variation_prior
  • yango_minimizer
  • yapf_01
  • 0.2.0
  • 0.3.0
  • 0.4.0
  • 0.4.2
  • 0.6.0
  • 0.7.0
  • 0.8.0
  • 0.8.4
  • 0.9.0
  • 1.0.6
  • 1.0.7
  • 1.0.8
  • 4.0.0
  • 4.1.0
  • 4.2.0
  • 5.0.0
  • 6.0_kern
  • 9.0.0
  • 9.1.0
  • historical/head_RXTE_compatible
  • historical/head_field_testing
  • historical/head_master
  • historical/head_plotting
  • historical/head_theo_master
  • historical/nifty2go
  • historical/theo_master
  • point_source_separator
  • v5.0.1
  • v7.0
  • v7.1
  • v7.2
  • v7.3
  • v7.4
  • v7.5
  • v8.0
  • v8.1
  • v8.2
  • v8.3
  • v8.4
  • v8.5
  • v8.5.1
  • v8.5.2
  • v8.5.3
  • v8.5.4
  • v8.5.5
  • v8.5.6
  • v8.5.7
125 results

Target

Select target project
  • ift/nifty
  • g-philipparras/NIFTy
  • tpeters/NIFTy
  • g-neelshah/nifty
  • philiar/nifty
5 results
Select Git revision
  • NIFTy_1
  • NIFTy_2
  • RXTE_compatible
  • critical_nightly
  • dobj_experiments
  • field_testing
  • issue173_wienerFilterDemos
  • master
  • nifty2go
  • nightly
  • plotting
  • theo_master
  • wiener_filter_easy
  • 0.2.0
  • 0.3.0
  • 0.4.0
  • 0.4.2
  • 0.6.0
  • 0.7.0
  • 0.8.0
  • 0.8.4
  • 0.9.0
  • 1.0.6
  • 1.0.7
  • 1.0.8
25 results
Show changes
Commits on Source (6782)
Showing
with 2981 additions and 144 deletions
use flake
# nix-related
result
.nix-nifty-venv
.direnv
demos/result*
docs/source/user/0_intro.md
docs/source/user/old_nifty_custom_nonlinearities.md
docs/source/user/old_nifty/getting_started_0.md
docs/source/user/old_nifty/getting_started_4_CorrelatedFields.md
docs/source/user/results_intro/
docs/source/user/results_intro_full_reconstruction.png
docs/source/user/*.ipynb
# custom
*_results
*.h5
*.hdf5
*.fits
*.txt
*.pickle
*.pkl
setup.cfg
.idea
.DS_Store
......@@ -7,6 +28,9 @@ setup.cfg
.document
.svn/
*.csv
.pytest_cache/
*.png
*.prof
# from https://github.com/github/gitignore/blob/master/Python.gitignore
......@@ -75,13 +99,15 @@ instance/
.scrapy
# Sphinx documentation
docs/_build/
docs/build/
docs/source/mod
# PyBuilder
target/
# IPython Notebook
.ipynb_checkpoints
*.ipynb
# pyenv
.python-version
......
image: ubuntu:artful
image: python:latest
variables:
MPLBACKEND: agg
OMP_NUM_THREADS: 1
stages:
- test
- demo_runs
- release
variables:
DOCKER_DRIVER: overlay
default:
before_script:
- apt-get update && apt-get install -y libopenmpi-dev
- pip install .[test,cl_parallel]
test_cl:
stage: test
tags:
- docker
script:
- pytest -n auto -q --cov=nifty.cl test/test_cl
- mv .coverage .coverage.cl
artifacts:
paths:
- .coverage.cl
test_cl_nix:
stage: test
tags:
- docker
image: nixos/nix:latest
before_script:
- apt-get update
- chmod +x ci/*.sh
- ci/install_basics.sh
- pip install --process-dependency-links --upgrade -r ci/requirements.txt
- pip3 install --process-dependency-links --upgrade -r ci/requirements.txt
- ls
script:
- export NIX_CONFIG="extra-experimental-features = nix-command flakes"
- nix build --print-build-logs
- nix build --print-build-logs .#docs
allow_failure: true
test_re:
stage: test
tags:
- docker
script:
- pytest -n auto -q --cov=nifty.re test/test_re
- mv .coverage .coverage.re
artifacts:
paths:
- .coverage.re
test_min:
test_cl_mpi:
stage: test
tags:
- docker
variables:
OMPI_MCA_btl_vader_single_copy_mechanism: none
script:
- mpirun --allow-run-as-root -np 2 --bind-to none pytest -q test/test_cl/test_mpi
artifacts:
paths:
- .coverage.cl_mpi
merge_coverage:
stage: test
tags:
- docker
needs:
- job: test_cl
artifacts: true
- job: test_cl_mpi
artifacts: true
- job: test_re
artifacts: true
script:
- coverage combine .coverage.cl .coverage.cl_mpi .coverage.re
- coverage report --omit "*plot*" | tee coverage.txt
coverage: '/(?i)total.*? (100(?:\.0+)?\%|[1-9]?\d(?:\.\d+)?\%)$/'
pages:
stage: release
script:
- pip install .[doc]
- git config --global --add safe.directory /builds/ift/nifty
- git clean -xfd docs/source
- sh docs/generate.sh
- mv docs/build/ public/
artifacts:
paths:
- public
only:
- main
run_ipynb0:
stage: demo_runs
needs: [test_cl]
script:
- pip install .[doc]
- jupytext --to ipynb demos/cl/getting_started_0.py
- jupyter-nbconvert --to markdown --execute --ExecutePreprocessor.timeout=None demos/cl/getting_started_0.ipynb
run_ipynb1:
stage: demo_runs
needs: [test_cl]
script:
- pip install .[doc]
- jupytext --to ipynb demos/cl/getting_started_4_CorrelatedFields.py
- jupyter-nbconvert --to markdown --execute --ExecutePreprocessor.timeout=None demos/cl/getting_started_4_CorrelatedFields.ipynb
run_getting_started_1:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/getting_started_1.py
artifacts:
paths:
- '*.png'
run_getting_started_2:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/getting_started_2.py
artifacts:
paths:
- 'getting_started_2_results'
- '*.png'
run_getting_started_3:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/getting_started_3.py
artifacts:
paths:
- 'getting_started_3_results'
- '*.png'
run_getting_started_3_mpi:
stage: demo_runs
needs: [test_cl]
script:
- mpirun --allow-run-as-root -np 2 --bind-to none python3 demos/cl/getting_started_3.py
artifacts:
paths:
- 'getting_started_3_results'
- '*.png'
run_getting_started_mf:
stage: demo_runs
needs: [test_cl]
script:
- mpirun --allow-run-as-root -np 2 --bind-to none python3 demos/cl/getting_started_5_mf.py
artifacts:
paths:
- 'getting_started_mf_results'
- '*.png'
run_getting_started_niftyre_intro:
stage: demo_runs
needs: [test_re]
script:
- python3 demos/re/0_intro.py
allow_failure: true
artifacts:
paths:
- '*.png'
run_getting_started_niftyre_tomography:
stage: demo_runs
needs: [test_re]
script:
- python3 demos/re/1_tomography.py
allow_failure: true
artifacts:
paths:
- '*.png'
run_getting_started_niftyre_nonlinear_regression:
stage: demo_runs
needs: [test_re]
script:
- python3 demos/re/a_nonlinear_regression.py
allow_failure: true
artifacts:
paths:
- '*.png'
run_getting_started_niftyre_wiener_filter:
stage: demo_runs
needs: [test_re]
script:
- python3 demos/re/a_wiener_filter.py
allow_failure: true
artifacts:
paths:
- '*.png'
run_getting_started_niftyre_icr:
stage: demo_runs
needs: [test_re]
script:
- python3 demos/re/a_icr.py
allow_failure: true
artifacts:
paths:
- '*.png'
run_getting_started_7:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/getting_started_7_config_file.py demos/cl/getting_started_7_config_file.cfg
artifacts:
paths:
- '*.png'
run_getting_density:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/density_estimation.py
artifacts:
paths:
- '*.png'
run_model_comparison:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/model_comparison.py
artifacts:
paths:
- '*.png'
run_bernoulli:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/bernoulli_map.py
artifacts:
paths:
- '*.png'
run_curve_fitting:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/polynomial_fit.py
artifacts:
paths:
- '*.png'
run_visual_vi:
stage: demo_runs
needs: [test_cl]
script:
- python3 demos/cl/variational_inference_visualized.py
run_meanfield:
stage: demo_runs
needs: [test_cl]
script:
- python setup.py install --user -f
- python3 setup.py install --user -f
- nosetests
- nosetests3 -x --with-coverage --cover-package=nifty2go --cover-branches
- >
coverage report | grep TOTAL | awk '{ print "TOTAL: "$6; }'
- python3 demos/cl/parametric_variational_inference.py
# Contributors
## NIFTy9
- Jakob Roth
- Lukas Scheel-Platz
- Martin Reinecke
- Matteo Guardiani
- [Philipp Arras](https://philipp-arras.de)
## NIFTy8
- Ananya Shankar
- Andrija Kostic
- Dan F-M
- David Gorbunov
- David Outland
- Gordian Edenhofer
- Harth-Kitzerow Johannes
- Jakob Knollmüller
- Jakob Roth
- Julian Rüstig
- Laurin Söding
- Lukas Scheel-Platz
- Martin Reinecke
- Massin Guerdi
- Matteo Guardiani
- [Philipp Arras](https://philipp-arras.de)
- [Philipp Frank](http://www.ph-frank.de)
- Reimar Leike
- Simon Ding
- [Torsten Enßlin](https://wwwmpa.mpa-garching.mpg.de/~ensslin/)
- Vincent Eberle
- Margret Westerkamp
## NIFTy7
- Andrija Kostic
- Gordian Edenhofer
- Jakob Knollmüller
- Jakob Roth
- Lukas Platz
- Matteo Guardiani
- Martin Reinecke
- [Philipp Arras](https://philipp-arras.de)
- [Philipp Frank](http://www.ph-frank.de)
- Reimar Heinrich Leike
- Simon Ding
- Vincent Eberle
## NIFTy6
- Andrija Kostic
- Gordian Edenhofer
- Jakob Knollmüller
- Lukas Platz
- Martin Reinecke
- [Philipp Arras](https://philipp-arras.de)
- [Philipp Frank](http://www.ph-frank.de)
- Philipp Haim
- Reimar Heinrich Leike
- Rouven Lemmerz
- [Torsten Enßlin](https://wwwmpa.mpa-garching.mpg.de/~ensslin/)
- Vincent Eberle
## NIFTy5
- Christoph Lienhard
- Gordian Edenhofer
- Jakob Knollmüller
- Julia Stadler
- Julian Rüstig
- Lukas Platz
- Martin Reinecke
- Max-Niklas Newrzella
- Natalia
- [Philipp Arras](https://philipp-arras.de)
- [Philipp Frank](http://www.ph-frank.de)
- Philipp Haim
- Reimar Heinrich Leike
- Sebastian Hutschenreuter
- Silvan Streit
- [Torsten Enßlin](https://wwwmpa.mpa-garching.mpg.de/~ensslin/)
## NIFTy4
- Christoph Lienhard
- Jakob Knollmüller
- Lukas Platz
- Martin Reinecke
- Mihai Baltac
- [Philipp Arras](https://philipp-arras.de)
- [Philipp Frank](http://www.ph-frank.de)
- Reimar Heinrich Leike
- Silvan Streit
- [Torsten Enßlin](https://wwwmpa.mpa-garching.mpg.de/~ensslin/)
## NIFTy3
- Daniel Pumpe
- Jait Dixit
- Jakob Knollmüller
- Martin Reinecke
- Mihai Baltac
- Natalia
- [Philipp Arras](https://philipp-arras.de)
- [Philipp Frank](http://www.ph-frank.de)
- Reimar Heinrich Leike
- Matevz Sraml
- Theo Steininger
- csongor
## NIFTy2
- Jait Dixit
- Theo Steininger
- csongor
## NIFTy1
- Johannes Buchner
- Marco Selig
- Theo Steininger
This diff is collapsed.
FROM ubuntu:artful
# dependencies via apt
RUN apt-get update
ADD ci/install_basics.sh /tmp/install_basics.sh
RUN cd /tmp && chmod +x install_basics.sh && ./install_basics.sh
# python dependencies
ADD ci/requirements.txt /tmp/requirements.txt
RUN pip install --upgrade -r /tmp/requirements.txt
# copy sources and install nifty
COPY . /tmp/NIFTy
RUN pip install /tmp/NIFTy
# Cleanup
RUN rm -r /tmp/*
include ChangeLog.md
include demos/cl/*.py
include demos/re/*.py
graft tests
global-exclude *.py[cod]
WARNING:
========
# NIFTy - Numerical Information Field Theory
The code on this branch is not meant to be an official version of NIFTy.
As a consequence, it does not install as package "nifty", but rather as
"nifty2go", to allow parallel installation alongside the official NIFTy and
avoid any conflicts.
[![pipeline status](https://gitlab.mpcdf.mpg.de/ift/nifty/badges/main/pipeline.svg)](https://gitlab.mpcdf.mpg.de/ift/nifty/-/commits/main)
[![coverage report](https://gitlab.mpcdf.mpg.de/ift/nifty/badges/main/coverage.svg)](https://gitlab.mpcdf.mpg.de/ift/nifty/-/commits/main)
**NIFTy** project homepage: [ift.pages.mpcdf.de/nifty](https://ift.pages.mpcdf.de/nifty/)
| Found a bug? [github.com/nifty-ppl/nifty/issues](https://github.com/nifty-ppl/nifty/issues)
| Need help? [github.com/nifty-ppl/nifty/discussions](https://github.com/NIFTy-PPL/NIFTy/discussions)
NIFTY - Numerical Information Field Theory
==========================================
[![build status](https://gitlab.mpcdf.mpg.de/ift/NIFTy/badges/nifty2go/build.svg)](https://gitlab.mpcdf.mpg.de/ift/NIFTy/commits/nifty2go)
[![coverage report](https://gitlab.mpcdf.mpg.de/ift/NIFTy/badges/nifty2go/coverage.svg)](https://gitlab.mpcdf.mpg.de/ift/NIFTy/commits/nifty2go)
**NIFTy**, "**N**umerical **I**nformation **F**ield **T**heor<strong>y</strong>",
is a Bayesian inference library. It is designed to compute statistical
properties of high-dimensional posterior probability distributions (tested up to
billions of parameters) from noisy input data. At the core of NIFTy lies a set
of Gaussian Process (GP) models and Variational Inference (VI) algorithms - in
particular Metric Gaussian Variational Inference (MGVI) and Geometric Gaussian
Variational Inference (geoVI).
**NIFTY** project homepage:
[http://www.mpa-garching.mpg.de/ift/nifty/](http://www.mpa-garching.mpg.de/ift/nifty/)
There are two independent implementation variants of NIFTy:
Summary
-------
- [Re-envisioned NIFTy](#niftyre) (`nifty.re`)
- [Classical NIFTy](#niftycl) (`nifty.cl`)
### Description
These variants share lots of functionality:
**NIFTY**, "**N**umerical **I**nformation **F**ield **T**heor<strong>y</strong>", is
a versatile library designed to enable the development of signal
inference algorithms that operate regardless of the underlying spatial
grid and its resolution. Its object-oriented framework is written in
Python, although it accesses libraries written in C++ and C for
efficiency.
- Similar VI algorithms
- Similar GP models
- Similar interfaces (e.g., `nifty.cl/re.optimize_kl` and
`nifty.cl/re.CorrelatedFieldMaker`)
- Both can run on CPUs and GPUs
NIFTY offers a toolkit that abstracts discretized representations of
continuous spaces, fields in these spaces, and operators acting on
fields into classes. Thereby, the correct normalization of operations on
fields is taken care of automatically without concerning the user. This
allows for an abstract formulation and programming of inference
algorithms, including those derived within information field theory.
Thus, NIFTY permits its user to rapidly prototype algorithms in 1D, and
then apply the developed code in higher-dimensional settings of real
world problems. The set of spaces on which NIFTY operates comprises
point sets, *n*-dimensional regular grids, spherical spaces, their
harmonic counterparts, and product spaces constructed as combinations of
those.
The major differences between them are:
### Class & Feature Overview
- Philosophy: `nifty.cl` provides hackable transparent building blocks to
explore discretization-independent Bayesian inference algorithms with minimal
dependencies. On the other hand, `nifty.re` is built around JAX, provides a
more direct numpy-like interface and aims for high performance out of the box.
- Backend: numpy/cupy (`nifty.cl`) vs JAX (`nifty.re`).
- Performance: `nifty.re` leverages JIT from JAX and thereby runs generally
faster than `nifty.cl`.
- Functionality: `nifty.re` supports HMC and Multi Grids. `nifty.cl` does not
(yet).
- API: In `nifty.cl` algorithms are implemented independent of the chosen
discretization scheme represented explicitly by `nifty.cl.Domain`s. `nifty.re`
provides more direct access to arrays.
- License: `nifty.cl` is distributed under GPL-3.0+. `nifty.re` is distributed
under BSD-2-Clause or GPL-2.0+.
The NIFTY library features three main classes: **spaces** that represent
certain grids, **fields** that are defined on spaces, and **operators**
that apply to fields.
For a quick start, you can browse through the [informal introduction](https://ift.pages.mpcdf.de/nifty/user/)
or dive into NIFTy by running the scrips in the demos folder. The subfolders
`cl/` and `re/` contain the scripts relevant for the respective NIFTy flavor.
- [Spaces](http://www.mpa-garching.mpg.de/ift/nifty/space.html)
- `RGSpace` - *n*-dimensional regular Euclidean grid
- `LMSpace` - spherical harmonics
- `GLSpace` - Gauss-Legendre grid on the 2-sphere
- `HPSpace` - [HEALPix](http://sourceforge.net/projects/healpix/)
grid on the 2-sphere
- [Fields](http://www.mpa-garching.mpg.de/ift/nifty/field.html)
- `Field` - generic class for (discretized) fields
<!-- -->
Field.conjugate Field.dim Field.norm
Field.vdot Field.weight
- [Operators](http://www.mpa-garching.mpg.de/ift/nifty/operator.html)
- `DiagonalOperator` - purely diagonal matrices in a specified
basis
- `FFTOperator` - conversion between spaces and their harmonic
counterparts
- (and more)
- (and more)
*Parts of this summary are taken from* [1] *without marking them
explicitly as quotations.*
Installation
------------
## NIFTy.re
### Requirements
### Installation
- [Python](http://www.python.org/) (v2.7.x or 3.5.x)
- [NumPy](http://www.numpy.org/)
NIFTy is distributed on PyPI. For a minimal installation of `nifty.re` run:
```
pip install --user 'nifty[re]'
```
### Sources
To install NIFTy.re with GPU support please manually install JAX following the instructions in the [JAX installation guid](https://docs.jax.dev/en/latest/installation.html).
The current version of Nifty3 can be obtained by cloning the repository:
If you might want to adapt the NIFTy source code, we suggest installing NIFTy as editable python package with a command such as:
git clone https://gitlab.mpcdf.mpg.de/ift/NIFTy.git
```
git clone -b nifty https://gitlab.mpcdf.mpg.de/ift/nifty.git
cd nifty
pip install --user --editable '.[re]'
```
### Run the tests
### Installation via pip
To run the tests, install all optional requirements `'nifty[all]'` and afterwards run pytest (and create a coverage report) via
It is possible to simply install NIFTy with all its dependencies via the command
```
pytest -n auto --cov=nifty test
```
pip install --user --process-dependency-links --egg git+https://gitlab.mpcdf.mpg.de/ift/NIFTy.git@nifty2go
If you are writing your own tests, it is often sufficient to just install the optional test dependencies `'nifty[test]'`. However, to run the full test suit including tests of optional functionality, it is assumed that all optional dependencies are installed.
### Running the tests
### Contributing
In oder to run the tests one needs two additional packages:
Contributions are very welcome!
Feel free to reach out early on in the development process e.g. by opening a draft PR or filing an issue, we are happy to help in the development and provide feedback along the way.
Please open an issue first if you think your PR changes current code substantially.
Please format your code according to the existing style used in the file or with black for new files.
To advertise your changes, please update the public documentation and the ChangeLog if your PR affects the public API.
Please add appropriate tests to your PR.
### Citing
To cite the probabilistic programming framework `nifty.re`, please use the citation provided below.
In addition to citing NIFTy itself, please consider crediting the Gaussian process models you used and the inference machinery.
See [the corresponding entry on citing NIFTy in the documentation](https://ift.pages.mpcdf.de/nifty/user/citations.html) for further details.
```
@article{niftyre,
title = {Re-Envisioning Numerical Information Field Theory (NIFTy.re): A Library for Gaussian Processes and Variational Inference},
author = {Gordian Edenhofer and Philipp Frank and Jakob Roth and Reimar H. Leike and Massin Guerdi and Lukas I. Scheel-Platz and Matteo Guardiani and Vincent Eberle and Margret Westerkamp and Torsten A. Enßlin},
year = {2024},
journal = {Journal of Open Source Software},
publisher = {The Open Journal},
volume = {9},
number = {98},
pages = {6593},
doi = {10.21105/joss.06593},
url = {https://doi.org/10.21105/joss.06593},
}
```
pip install nose parameterized
Afterwards the tests (including a coverage report) are run using the following
command in the repository root:
nosetests --exe --cover-html
## NIFTy.cl
`nifty.cl` is a versatile library designed to enable the development of signal
inference algorithms that operate regardless of the underlying grids (spatial,
spectral, temporal, …) and their resolutions. Its object-oriented framework is
written in Python, although it accesses libraries written in C++ and C for
efficiency.
NIFTy offers a toolkit that abstracts discretized representations of continuous
spaces, fields in these spaces, and operators acting on these fields into
classes.
This allows for an abstract formulation and programming of inference algorithms,
including those derived within information field theory. NIFTy's interface is
designed to resemble IFT formulae in the sense that the user implements
algorithms in NIFTy independent of the topology of the underlying spaces and the
discretization scheme.
Thus, the user can develop algorithms on subsets of problems and on spaces where
the detailed performance of the algorithm can be properly evaluated and then
easily generalize them to other, more complex spaces and the full problem,
respectively.
The set of spaces on which NIFTy operates comprises point sets, *n*-dimensional
regular grids, spherical spaces, their harmonic counterparts, and product spaces
constructed as combinations of those. NIFTy takes care of numerical subtleties
like the normalization of operations on fields and the numerical representation
of model components, allowing the user to focus on formulating the abstract
inference procedures and process-specific model properties.
### Dependencies and installation
The latest version of `nifty.cl` can be installed from the sources:
pip install git+https://gitlab.mpcdf.mpg.de/ift/nifty@nifty
Releases can be found on PyPI:
pip install nifty
Both will install the basic required dependencies (numpy and scipy). Often users
may choose to install optional dependencies to enable additional features.
- `ducc0`: Use FFTs directly from ducc and enable several operators implemented
directly in C++ for speed.
- `cupy`: Enable GPU backend.
- `pyvkfft`: Use vkFFT instead of cufft.
- `cufinufft`: Enables nffts on the GPU.
- `mpi4py`: Parallelize computations via MPI.
- `astropy`: Save certain outputs as FITS files.
- `h5py`: Save certain outputs as HDF5 files.
- `matplotlib`: Enable plotting, e.g., via `nifty.cl.Plot`.
### First Steps
For a quick start, you can browse through the [informal
introduction](http://www.mpa-garching.mpg.de/ift/nifty/start.html) or
dive into NIFTY by running one of the demonstrations, e.g.:
introduction](https://ift.pages.mpcdf.de/nifty/user/code.html) or dive into
NIFTy by running one of the demonstrations, e.g.:
python3 demos/cl/getting_started_1.py
### Testing
To run the tests `pytest` is required. The tests can be run using the following
command in the repository root:
pytest test/test_cl
To run the full test suit including tests of optional functionality, it is
assumed that all optional dependencies are installed.
### Citing
```
@article{niftycl,
author = {{Arras}, Philipp and {Baltac}, Mihai and {Ensslin}, Torsten A. and {Frank}, Philipp and {Hutschenreuter}, Sebastian and {Knollmueller}, Jakob and {Leike}, Reimar and {Newrzella}, Max-Niklas and {Platz}, Lukas and {Reinecke}, Martin and {Stadler}, Julia},
title = {{NIFTy5: Numerical Information Field Theory v5}},
keywords = {Software},
howpublished = {Astrophysics Source Code Library, record ascl:1903.008},
year = 2019,
month = 03,
eid = {ascl:1903.008},
pages = {ascl:1903.008},
archiveprefix = {ascl},
eprint = {1903.008}
}
```
python demos/wiener_filter_via_curvature.py
Acknowledgement
---------------
Please acknowledge the use of NIFTY in your publication(s) by using a
phrase such as the following:
> *"Some of the results in this publication have been derived using the
> NIFTY package [Selig et al., 2013]."*
### References
## Building the Documentation
Release Notes
-------------
NIFTy's documentation is generated via [Sphinx](https://www.sphinx-doc.org/en/stable/) and is available online at [ift.pages.mpcdf.de/nifty](https://ift.pages.mpcdf.de/nifty/).
The NIFTY package is licensed under the
[GPLv3](http://www.gnu.org/licenses/gpl.html) and is distributed
*without any warranty*.
To build the documentation locally, run:
* * * * *
```
sudo apt-get install dvipng jupyter-nbconvert texlive-latex-base texlive-latex-extra
pip install --user sphinx==8.1.3 jupytext pydata-sphinx-theme myst-parser sphinxcontrib-bibtex
cd <nifty_directory>
bash docs/generate.sh
```
**NIFTY** project homepage:
[](http://www.mpa-garching.mpg.de/ift/nifty/)
To view the documentation, open `docs/build/index.html` in your browser.
[1] Selig et al., "NIFTY - Numerical Information Field Theory - a
versatile Python library for signal inference", [A&A, vol. 554, id.
A26](http://dx.doi.org/10.1051/0004-6361/201321236), 2013;
[arXiv:1301.4499](http://www.arxiv.org/abs/1301.4499)
Note: Make sure that you reinstall nifty after each change since sphinx imports nifty from the Python path.
#!/bin/bash
apt-get install -y build-essential git autoconf libtool pkg-config \
libfftw3-bin libfftw3-dev libfftw3-double3 libfftw3-long3 libfftw3-quad3 libfftw3-single3 \
python python-pip python-dev python-nose python-numpy python-matplotlib python-future \
python3 python3-pip python3-dev python3-nose python3-numpy python3-matplotlib python3-future
parameterized
coverage
pyfftw
git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git@setuptools_test
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2021 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
#####################################################################
# Bernoulli reconstruction
# Reconstruct an event probability field with values between 0 and 1
# from the observed events
# 1D (set mode=0), 2D (mode=1), or on the sphere (mode=2)
#####################################################################
import numpy as np
import nifty.cl as ift
def main():
# Set up the position space of the signal
mode = 2
if mode == 0:
# One-dimensional regular grid
position_space = ift.RGSpace(1024)
elif mode == 1:
# Two-dimensional regular grid
position_space = ift.RGSpace([512, 512])
else:
# Sphere
position_space = ift.HPSpace(128)
# Define harmonic space and transform
harmonic_space = position_space.get_default_codomain()
HT = ift.HarmonicTransformOperator(harmonic_space, position_space)
position = ift.from_random(harmonic_space, 'normal')
# Define power spectrum and amplitudes
def sqrtpspec(k):
return 1./(20. + k**2)
A = ift.create_power_operator(harmonic_space, sqrtpspec)
# Set up a sky operator and instrumental response
sky = ift.sigmoid(HT(A))
GR = ift.GeometryRemover(position_space)
R = GR
# Generate mock data
p = R(sky)
mock_position = ift.from_random(harmonic_space, 'normal')
tmp = p(mock_position).asnumpy().astype(np.float64)
data = ift.random.current_rng().binomial(1, tmp)
data = ift.Field.from_raw(R.target, data)
# Compute likelihood energy and Hamiltonian
position = ift.from_random(harmonic_space, 'normal')
likelihood_energy = ift.BernoulliEnergy(data) @ p
ic_newton = ift.DeltaEnergyController(
name='Newton', iteration_limit=100, tol_rel_deltaE=1e-8)
minimizer = ift.NewtonCG(ic_newton)
ic_sampling = ift.GradientNormController(iteration_limit=100)
# Minimize the Hamiltonian
H = ift.StandardHamiltonian(likelihood_energy, ic_sampling)
H = ift.EnergyAdapter(position, H, want_metric=True)
# minimizer = ift.L_BFGS(ic_newton)
H, convergence = minimizer(H)
reconstruction = sky(H.position)
plot = ift.Plot()
plot.add(reconstruction, title='reconstruction')
plot.add(GR.adjoint_times(data), title='data')
plot.add(sky(mock_position), title='truth')
plot.output(nx=3, xsize=16, ysize=9, title="results", name="bernoulli.png")
if __name__ == '__main__':
main()
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019-2020 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import numpy as np
import nifty.cl as ift
def convtest(test_signal, delta, func):
domain = test_signal.domain
# Create Convolution Operator
conv_op = ift.FuncConvolutionOperator(domain, func)
# Convolve, Adjoint-Convolve
conv_signal = conv_op(test_signal)
cac_signal = conv_op.adjoint_times(conv_signal)
print(test_signal.integrate(), conv_signal.integrate(),
cac_signal.integrate())
# generate kernel image
conv_delta = conv_op(delta)
# Plot results
plot = ift.Plot()
plot.add(test_signal, title='Signal')
plot.add(conv_signal, title='Signal Convolved')
plot.add(cac_signal, title='Signal, Conv, Adj-Conv')
plot.add(conv_delta, title='Kernel')
plot.output()
def main():
# Healpix test
nside = 64
npix = 12 * nside * nside
domain = ift.HPSpace(nside)
# Define test signal (some point sources)
signal_vals = np.zeros(npix, dtype=np.float64)
for i in range(0, npix, npix//12 + 27):
signal_vals[i] = 500.
signal = ift.makeField(domain, signal_vals)
delta_vals = np.zeros(npix, dtype=np.float64)
delta_vals[0] = 1.0
delta = ift.makeField(domain, delta_vals)
# Define kernel function
def func(theta):
ct = np.cos(theta)
return 1. * np.logical_and(ct > 0.7, ct <= 0.8)
convtest(signal, delta, func)
domain = ift.RGSpace((100, 100))
# Define test signal (some point sources)
signal_vals = np.zeros(domain.shape, dtype=np.float64)
signal_vals[35, 70] = 5000.
signal_vals[45, 8] = 5000.
signal = ift.makeField(domain, signal_vals)
# Define delta signal, generate kernel image
delta_vals = np.zeros(domain.shape, dtype=np.float64)
delta_vals[0, 0] = 1.0
delta = ift.makeField(domain, delta_vals)
convtest(signal, delta, lambda d: 1. * np.logical_and(d > 0.1, d <= 0.2))
if __name__ == '__main__':
main()
#!/usr/bin/env python3
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2021 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
############################################################
# Density estimation
#
# Compute a density estimate for a log-normal process measured by a
# Poissonian likelihood.
#
# Demo takes a while to compute
#############################################################
import nifty.cl as ift
if __name__ == "__main__":
filename = "getting_started_density_{}.png"
ift.random.push_sseq_from_seed(42)
# Set up signal domain
npix1 = 128
npix2 = 128
sp1 = ift.RGSpace(npix1)
sp2 = ift.RGSpace(npix2)
position_space = ift.DomainTuple.make((sp1, sp2))
signal, ops = ift.density_estimator(position_space)
correlated_field = ops["correlated_field"]
data_space = signal.target
# Generate mock signal and data
rng = ift.random.current_rng()
mock_position = ift.from_random(signal.domain, "normal")
data = ift.Field.from_raw(data_space, rng.poisson(signal(mock_position).asnumpy()))
# Rejoin domains for plotting
plotting_domain = ift.DomainTuple.make(ift.RGSpace((npix1, npix2)))
plotting_domain_expanded = ift.DomainTuple.make(ift.RGSpace((2 * npix1, 2 * npix2)))
plot = ift.Plot()
plot.add(
ift.Field.from_raw(
plotting_domain_expanded, ift.exp(correlated_field(mock_position)).asnumpy()
),
title="Pre-Slicing Truth",
)
plot.add(
ift.Field.from_raw(plotting_domain, signal(mock_position).asnumpy()),
title="Ground Truth",
)
plot.add(ift.Field.from_raw(plotting_domain, data.asnumpy()), title="Data")
plot.output(ny=1, nx=3, xsize=10, ysize=3, name=filename.format("setup"))
print("Setup saved as", filename.format("setup"))
# Minimization parameters
ic_sampling = ift.AbsDeltaEnergyController(
name="Sampling", deltaE=0.01, iteration_limit=100
)
ic_newton = ift.AbsDeltaEnergyController(
name="Newton", deltaE=0.01, iteration_limit=35
)
ic_sampling.enable_logging()
ic_newton.enable_logging()
minimizer = ift.NewtonCG(ic_newton, enable_logging=True)
# Number of samples used to estimate the KL
n_samples = 5
# Set up likelihood energy and information Hamiltonian
likelihood_energy = ift.PoissonianEnergy(data) @ signal
ham = ift.StandardHamiltonian(likelihood_energy, ic_sampling, float)
# Start minimization
initial_mean = 1e-2 * ift.from_random(ham.domain)
mean = initial_mean
for i in range(3):
# Draw new samples and minimize KL
kl = ift.SampledKLEnergy(mean, ham, n_samples, None)
kl, convergence = minimizer(kl)
mean = kl.position
# Plot current reconstruction
plot = ift.Plot()
plot.add(
ift.Field.from_raw(
plotting_domain_expanded, ift.exp(correlated_field(mock_position)).asnumpy()
),
title="Ground truth",
)
plot.add(
ift.Field.from_raw(plotting_domain, signal(mock_position).asnumpy()),
title="Ground truth",
)
plot.add(
ift.Field.from_raw(plotting_domain, kl.samples.average(signal).asnumpy()),
title="Reconstruction",
)
plot.add(
(ic_newton.history, ic_sampling.history, minimizer.inversion_history),
label=["kl", "Sampling", "Newton inversion"],
title="Cumulative energies",
s=[None, None, 1],
alpha=[None, 0.2, None],
)
plot.output(
nx=3, ny=2, ysize=10, xsize=15, name=filename.format(f"loop_{i:02d}")
)
# Done, compute posterior statistics
mean, var = kl.samples.sample_stat(signal)
mean_unsliced, var_unsliced = kl.samples.sample_stat(correlated_field.exp())
# Plotting
plot = ift.Plot()
plot.add(ift.Field.from_raw(plotting_domain, mean.asnumpy()), title="Posterior Mean")
plot.add(
ift.Field.from_raw(plotting_domain, ift.sqrt(var).asnumpy()),
title="Posterior Standard Deviation",
)
plot.add(
ift.Field.from_raw(plotting_domain_expanded, mean_unsliced.asnumpy()),
title="Posterior Unsliced Mean",
)
plot.add(
ift.Field.from_raw(plotting_domain_expanded, ift.sqrt(var_unsliced).asnumpy()),
title="Posterior Unsliced Standard Deviation",
)
plot.output(xsize=15, ysize=15, name=filename.format("results"))
print("Saved results as", filename.format("results"))
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.15.0
# kernelspec:
# display_name: Python 3 (ipykernel)
# language: python
# name: python3
# ---
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2022 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
# + [markdown] slideshow={"slide_type": "slide"}
# # Code example: Wiener filter
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Introduction to Information Field Theory(IFT)
# We start with the measurement equation
#
# $$d_i = (Rs)_i+n_i$$
#
# Here, $s$ is a continuous field, $d$ a discrete data vector, $n$ is the discrete noise on each data point and $R$ is the instrument response.
# In most cases, $R$ is not analytically invertible.
# IFT aims at **inverting** the above uninvertible problem in the **best possible way** using Bayesian statistics.
#
# NIFTy (Numerical Information Field Theory) is a Python framework in which IFT problems can be tackled easily.
#
# Its main interfaces are:
#
# - **Spaces**: Cartesian, 2-Spheres (Healpix, Gauss-Legendre) and their respective harmonic spaces.
# - **Fields**: Defined on spaces.
# - **Operators**: Acting on fields.
#
# + [markdown] slideshow={"slide_type": "subslide"}
# ## Wiener filter on 1D- fields in IFT
#
# ### Assumptions
# - We consider a linear response R in the measurement equation $d=Rs+n$.
# - We assume the **signal** and the **noise** prior to be **Gaussian** $\mathcal P (s) = \mathcal G (s,S)$, $\mathcal P (n) = \mathcal G (n,N)$.
# - Here $S, N$ are signal and noise covariances. Therefore they are positive definite matrices.
#
# ### Wiener filter solution
# - Making use of Bayes' theorem, the posterior is proportional to the joint probability and is given by:
#
# $$\mathcal P (s|d) \propto P(s,d) = \mathcal G(d-Rs,N) \,\mathcal G(s,S) \propto \mathcal G (s-m,D)$$
#
# - Here, $m$ is the posterior mean , $D$ is the information propagator and are defined as follows:
#
# $$m = Dj, \quad D = (S^{-1} +R^\dagger N^{-1} R)^{-1} $$
#
# - There, $j$ is the information source defined as $j = R^\dagger N^{-1} d$.
#
# Let us implement this in **NIFTy!**
# -
# %matplotlib inline
import numpy as np
import nifty.cl as ift
import matplotlib.pyplot as plt
plt.rcParams['figure.dpi'] = 100
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Implementation in NIFTy
#
# We assume statistical **homogeneity** and **isotropy**, so the signal covariance $S$ is **translation invariant** and only depends on the **absolute value** of the distance.
# According to Wiener-Khinchin theorem, the signal covariance $S$ is diagonal in harmonic space, $S_{kk^{\prime}} = 2 \pi \delta(k-k^{\prime}) P(k)= \text{diag}(S) \equiv \widehat{S_k}$ and is described by a one-dimensional power spectrum.
# We assume the power spectrum to follow a power-law, $P(k) = P_0\,\left(1+\left(\frac{k}{k_0}\right)^2\right)^{-\gamma /2},$ with $P_0 = 2 \cdot 10^4, \ k_0 = 5, \ \gamma = 4$, thus the reconstruction starts in harmonic space.
# + slideshow={"slide_type": "-"}
def pow_spec(k):
P0, k0, gamma = [2e4, 5, 4]
return P0 / ((1. + (k/k0)**2)**(gamma / 2))
# -
# ### Spaces and harmonic transformations
# - We define our non-harmonic signal space to be Cartesian with $N_{pix} = 512$ being the number of grid cells.
# - To connect harmonic and non-harmonic spaces we introduce the Hartley transform $H$ that is closely related to the Fourier transform but maps $\mathbb{R}\rightarrow\mathbb{R}$.
# - The covariance S in non-harmonic space is given by $S = H^{\dagger}\widehat{S_k} H \ .$
# Signal space is a regular Cartesian grid space
N_pix = 512
s_space = ift.RGSpace(N_pix)
# k_space is the harmonic conjugate space of s_space
HT = ift.HartleyOperator(s_space)
k_space = HT.target
S_k = ift.create_power_operator(k_space, power_spectrum=pow_spec,
sampling_dtype=float)
# The Sandwich Operator implements S = HT.adjoint @ S_k @ HT and enables NIFTy
# to sample from S
S = ift.SandwichOperator.make(bun=HT, cheese=S_k)
# ### Synthetic Data
# - In order to demonstrate the Wiener filter, we use **synthetic data**. Therefore, we draw a sample $\tilde{s}$ from $S$. (see Sampling)
# - For simplicity we define the response operator as a unit matrix, $R = \mathbb{1}$.
# - We assume the noise covariance to be uncorrelated and constant, $N = 0.2 \cdot \mathbb{1}$ and draw a sample $\tilde{n}$.
# - Thus the synthetic data $d = R(\tilde{s}) + \tilde{n}$.
# ### Sampling
#
# - Assuming we have a distribution $\mathcal{G}(b,B)$ we can sample from and we want to draw a sample from a distritbution $\mathcal{G}(c,C)$ with covariance $C$. The two distributions are connected via the relation $C = ABA^{\dagger}.$ One can show that $c= Ab$ with $b \curvearrowleft \mathcal{G}(b,B)$ has a probability distribution with covariance $C$ as desired.
#
# $$ \langle cc^\dagger\rangle_{\mathcal{G}(c,C)} = \langle Ab(Ab)^\dagger\rangle_{\mathcal{G}(b,B)} = \langle Abb^\dagger A^\dagger \rangle = A \langle bb^\dagger \rangle A^\dagger = ABA^\dagger = C$$
#
# - This is also true for the case that $B = \mathbb{1}$, meaning that $\mathcal{G}(b,\mathbb{1})$ Thus $C = AA^{\dagger}$ .
# - Note that, if $C$ is diagonal, $A$ is diagonal as well.
# - It can be shown that if $C = A + B$, then $c = a + b$ with $b \curvearrowleft \mathcal{G}(b,B)$ and $a \curvearrowleft \mathcal{G}(a,A)$ has a probability distribution with covariance $C$ as desired.
# - If we can draw samples from $\mathcal{G}(a,A)$, and we want to draw a sample with the covariance $A^{-1}$, one can simply show that $c = A^{-1}a$ has a a probability distribution with covariance $A^{-1}$.
#
# $$\langle c c^{\dagger} \rangle = \langle A^{-1}aa^{\dagger}(A^{-1})^{\dagger} \rangle = A^{-1}\langle aa^{\dagger}\rangle(A^{-1})^{\dagger} = A^{-1} A(A^{-1})^{\dagger} =A^{-1}$$
#
# as we assume $A^{-1}$ to be Hermitian.
#
# By this brief introduction to sampling, we apply it in order to get the synthetic data.
# All of these sampling rules are implemented in NIFTy so we do not need to take care of them.
# Draw a sample from signal with covariance S.
s = S.draw_sample()
# Define the responce operator that removes the geometry meaning it removes
# distances and volumes.
R = ift.GeometryRemover(s_space)
# Define the data space that has an unstructured domain.
d_space = R.target
# +
noiseless_data = R(s)
# This is the multiplicative factor going from a sample with unit covariance to N.
noise_amplitude = np.sqrt(0.2)
# Define the noise covariance
N = ift.ScalingOperator(d_space, noise_amplitude**2, float)
# Draw a sample from noise with covariance N.
n = N.draw_sample()
# Synthetic data
d = noiseless_data + n
# -
# ### Information source and information propagator
#
# Now that we have the synthetic data, we are one step closer to the Wiener filter!
# In order to apply Wiener filter on the data we first need to define the information source $j$ along with the information propagator $D$.
# +
# Define the information propagator.
j = R.adjoint(N.inverse(d))
# Iteration controller
ic = ift.GradientNormController(iteration_limit=50000, tol_abs_gradnorm=0.1)
D_inv = S.inverse + R.adjoint @ N.inverse @ R
# Enable .inverse to invert D via Conjugate Gradient.
D_inv = ift.InversionEnabler(D_inv, ic)
D = D_inv.inverse
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Apply Wiener Filter
#
# After defining the information source and propagator, we are able to apply the Wiener filter in order to get the posterior mean $m = \langle s \rangle_{\mathcal{P}(s|d)}$ that is our reconstruction of the signal:
# + slideshow={"slide_type": "-"}
m = D(j)
# + [markdown] slideshow={"slide_type": "subslide"}
# ### Results
# + slideshow={"slide_type": "-"}
# `.asnumpy()` retrieves the underlying numpy array from a NIFTy Field.
plt.plot(s.asnumpy(), 'r', label="signal ground truth", linewidth=2)
plt.plot(d.asnumpy(), 'k.', label="noisy data")
plt.plot(m.asnumpy(), 'k', label="posterior mean",linewidth=2)
plt.title("Reconstruction")
plt.legend()
plt.show()
# -
# To show the deviations with respect to the true signal (or ground truth), we plot the residuals as follows:
# + slideshow={"slide_type": "subslide"}
plt.plot(s.asnumpy() - s.asnumpy(), 'r', label="ground truth ref [$s-s$]", linewidth=2)
plt.plot(d.asnumpy() - s.asnumpy(), 'k.', label="noise [$d-Rs$]")
plt.plot(m.asnumpy() - s.asnumpy(), 'k', label="posterior mean - ground truth",linewidth=2)
plt.axhspan(-noise_amplitude,noise_amplitude, facecolor='0.9', alpha=.5)
plt.title("Residuals")
plt.legend()
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Wiener Filter on Incomplete Data
#
# Now we consider a case that the data is not complete.
# This might be the case in real situations as the instrument might not be able to receive data.
# In order to apply the Wiener filter to this case, we first need to build the response corresponding to the incomplete measurement in NIFTy!
# -
# ### Incomplete Measuring / Masking
# We need to build a mask operator which cuts out all the unobserved parts of the signal.
# Lets assume that we first observe the signal for some time, but then something goes wrong with our instrument and we don't collect data for a while.
# After fixing the instrument we can collect data again.
# This means that data lives on an unstructured domain as there is data missing for the period of time $t_{\text{off}}$ when the instrument was offline.
# In order to implement this incomplete measurement we need to define a new response operator $R$ which masks the signal for the time $t_{\text{off}}$.
#
# + slideshow={"slide_type": "-"}
# Whole observation time
npix = s_space.size
# Time when the instrument is turned off
l = int(npix * 0.2)
# Time when the instrument is turned on again
h = int(npix * 0.4)
# Initialise a new array for the whole time frame
mask = np.zeros(s_space.shape, bool)
# Define the mask
mask[l:h] = 1
# Turn the numpy array into a nifty field
mask = ift.makeField(s_space, mask)
# Define the response operator which masks the places where mask == 1
R = ift.MaskOperator(mask)
# -
# ### Synthetic Data
# As in the Wiener filter example with complete data, we generate some synthetic data now by propagating the previously drawn prior sample through the incomplete measurement response and adding a noise sample.
# Define the noise covariance
N = ift.ScalingOperator(R.target, noise_amplitude**2, float)
# Draw a noise sample
n = N.draw_sample()
# Measure the signal sample with additional noise
d = R(s) + n
# ### Sampling from D
# Since we have an incomplete measurement we want to know how uncertain we are about our Wiener filter solution. We can easily obtain both, the mean and the standard deviation by sampling from $D$ and computing them directly from the drawn samples.
# In order to enable NIFTy to sample from $D$ we need to use some helper functions.
# + slideshow={"slide_type": "skip"}
# This implements the rule how to sample from a sum of covariances
D_inv = ift.SamplingEnabler(ift.SandwichOperator.make(cheese=N.inverse, bun=R),
S.inverse, ic, S.inverse)
# Allow for numerical inversion
D_inv = ift.InversionEnabler(D_inv, ic)
D = D_inv.inverse
# Define the information source
j = R.adjoint(N.inverse(d))
# Posterior mean
m = D(j)
# Number of samples to calculate the posterior standard deviation
n_samples = 200
# Helper function that calculates the mean and the variance from a set of
# samples efficiently
sc = ift.StatCalculator()
for _ in range(n_samples):
# Draw a sample of G(s,D) and shifting it by m -> G(s-m,D)
sample = m + D.draw_sample()
# Add it to the StatCalculator
sc.add(sample)
# Standard deviation from samples
samples_std = sc.var.sqrt()
# Mean from samples that converges to m in the limit of infinitely many samples
samples_mean = sc.mean
# -
# ### Plots
# Let us visualize the results of the Wiener filter $m$, the sampled standard deviation and mean, as well as the true signal (ground truth) and the data.
# Since the data lives in data space, we first need to project it back into the signal space via $R^{\dagger}d$.
# + slideshow={"slide_type": "skip"}
plt.axvspan(l, h, facecolor='0.8',alpha=0.3, label="masked area")
plt.plot(s.asnumpy(), '#f28109', label="Signal (ground truth)", alpha=1, linewidth=2)
plt.plot(m.asnumpy(), 'k', label="Posterior mean (reconstruction)", linewidth=2)
plt.fill_between(range(m.size), (m - samples_std).asnumpy(), (m + samples_std).asnumpy(),
facecolor='#8592ff', alpha=0.8, label="Posterior std (samples)")
plt.plot(samples_mean.asnumpy(), 'k--', label="Posterior mean (samples)")
#.asnumpy() would return a read only-array. `.asnumpy_rw()` returns a writeable copy
data_s_space = R.adjoint(d).asnumpy_rw()
# Remove the "0" data points in the masked array
data_s_space[l:h] = np.nan
plt.plot(data_s_space, 'k.', label="Data")
plt.title("Reconstruction of incomplete data")
plt.legend()
plt.show()
# + [markdown] slideshow={"slide_type": "slide"}
# ## Wiener Filter standardized
# This problem can be solved in various coordinates and thus we can also choose the coordinate system, in which the prior $\mathcal{P}(s)=\mathcal{G}(0,\mathbb{1})$. And therefore the coordinate transformation from this to our original space is part of the prior description.
# +
# Coordinate transformation from Gaussian white noise to our prior from above
# (G(0,1)) to G(s,S)).
sqrt_pspec = S_k(ift.full(S_k.domain, 1.)).sqrt()
coord_trafo = HT.adjoint @ ift.makeOp(sqrt_pspec)
# The full Response from the standardized space to data space is a concatenation
# of R and the transformation
R_std = R @ coord_trafo
j_std = R_std.adjoint(N.inverse(d))
# standard_prior is now a indenty or unit matrix.
S_std = ift.Operator.identity_operator(R_std.domain)
# the new D
Dinv_std = ift.InversionEnabler(S_std + R_std.adjoint @ N.inverse @ R_std, ic)
D_std = Dinv_std.inverse
m_std = D_std(j_std)
# -
# We can see that we get to the same result.
posterior_mean_s_space = coord_trafo(m_std)
plt.axvspan(l, h, facecolor='0.8',alpha=0.5)
plt.plot(s.asnumpy(), 'r', label="Signal", alpha=1, linewidth=2)
plt.plot(data_s_space, 'k.', label="Data")
plt.plot(posterior_mean_s_space.asnumpy(), 'k', label="Reconstruction", linewidth=2)
plt.title("Reconstruction of incomplete data in normalized coordinates")
plt.legend()
plt.show()
# Often it is easier to solve problems in this standardized parametere coordinates. For more information read:
#
# - Knollmüller, Jakob, and Torsten A. Enßlin. "Metric Gaussian variational inference." arXiv preprint arXiv:1901.11033 (2019).
# - Frank, Philipp, Reimar Leike, and Torsten A. Enßlin. "Geometric variational inference." Entropy 23.7 (2021): 853.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2020 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
###############################################################################
# Compute a Wiener filter solution with NIFTy
# Shows how measurement gaps are filled in
# 1D (set mode=0), 2D (mode=1), or on the sphere (mode=2)
###############################################################################
import sys
import numpy as np
import nifty.cl as ift
def make_checkerboard_mask(position_space):
# Checkerboard mask for 2D mode
mask = np.ones(position_space.shape)
for i in range(4):
for j in range(4):
if (i + j) % 2 == 0:
mask[i*128//4:(i + 1)*128//4, j*128//4:(j + 1)*128//4] = 0
return mask
def make_random_mask(domain):
# Random mask for spherical mode
mask = ift.from_random(domain, 'pm1')
mask = (mask + 1)/2
return mask.asnumpy()
def main():
# Choose space on which the signal field is defined
if len(sys.argv) == 2:
mode = int(sys.argv[1])
else:
mode = 1
if mode == 0:
# One-dimensional regular grid
position_space = ift.RGSpace([1024])
mask = np.zeros(position_space.shape)
elif mode == 1:
# Two-dimensional regular grid with checkerboard mask
position_space = ift.RGSpace([128, 128])
mask = make_checkerboard_mask(position_space)
else:
# Sphere with half of its pixels randomly masked
position_space = ift.HPSpace(128)
mask = make_random_mask(position_space)
# Specify harmonic space corresponding to signal
harmonic_space = position_space.get_default_codomain()
# Harmonic transform from harmonic space to position space
HT = ift.HarmonicTransformOperator(harmonic_space, target=position_space)
# Set prior correlation covariance with a power spectrum leading to
# homogeneous and isotropic statistics
def power_spectrum(k):
return 100./(20. + k**3)
# 1D spectral space on which the power spectrum is defined
power_space = ift.PowerSpace(harmonic_space)
# Mapping to (higher dimensional) harmonic space
PD = ift.PowerDistributor(harmonic_space, power_space)
# Apply the mapping
prior_correlation_structure = PD(ift.PS_field(power_space, power_spectrum))
# Insert the result into the diagonal of an harmonic space operator
S = ift.makeOp(prior_correlation_structure, sampling_dtype=float)
# S is the prior field covariance
# Build instrument response consisting of a discretization, mask
# and harmonic transformaion
# Masking operator to model that parts of the field have not been observed
mask = ift.Field.from_raw(position_space, mask)
Mask = ift.MaskOperator(mask)
# The response operator consists of
# - a harmonic transform (to get to image space)
# - the application of the mask
# - the removal of geometric information
# The removal of geometric information is included in the MaskOperator
# it can also be implemented with a GeometryRemover
# Operators can be composed either with parenthesis
R = Mask(HT)
# or with @
R = Mask @ HT
data_space = R.target
# Set the noise covariance N
noise = 5.
N = ift.ScalingOperator(data_space, noise, float)
# Create mock data
MOCK_SIGNAL = S.draw_sample()
MOCK_NOISE = N.draw_sample()
data = R(MOCK_SIGNAL) + MOCK_NOISE
# Build inverse propagator D and information source j
D_inv = R.adjoint @ N.inverse @ R + S.inverse
j = R.adjoint_times(N.inverse_times(data))
# Make D_inv invertible (via Conjugate Gradient)
IC = ift.GradientNormController(iteration_limit=500, tol_abs_gradnorm=1e-3)
D = ift.InversionEnabler(D_inv, IC, approximation=S.inverse).inverse
# Calculate WIENER FILTER solution
m = D(j)
# Plotting
rg = isinstance(position_space, ift.RGSpace)
plot = ift.Plot()
filename = "getting_started_1_mode_{}.png".format(mode)
if rg and len(position_space.shape) == 1:
plot.add(
[HT(MOCK_SIGNAL), Mask.adjoint(data),
HT(m)],
label=['Mock signal', 'Data', 'Reconstruction'],
alpha=[1, .3, 1])
plot.add(Mask.adjoint(Mask(HT(m - MOCK_SIGNAL))), title='Residuals')
plot.output(nx=2, ny=1, xsize=10, ysize=4, name=filename)
else:
plot.add(HT(MOCK_SIGNAL), title='Mock Signal')
plot.add(Mask.adjoint(data), title='Data')
plot.add(HT(m), title='Reconstruction')
plot.add(Mask.adjoint(Mask(HT(m) - HT(MOCK_SIGNAL))),
title='Residuals')
plot.output(nx=2, ny=2, xsize=10, ysize=10, name=filename)
print("Saved results as '{}'.".format(filename))
if __name__ == '__main__':
main()
#!/usr/bin/env python
# %% [markdown]
# # Nonlinear Models in NIFTy
# 1. Posterior: We would like to know $P(\theta|d)$ with $\theta$ the sky
# brightness and $d$ measured count data of the sky brightness
# 1. Likelihood: We assume that $P(d|\theta)$ is a Poisson distribution
# 1. Prior: We assume that the sky brightness is a priori log-normal and
# $\log \theta$ is spatially smooth
#
# To build a model in NIFTy, we go bottom and start from the prior, next we
# define the likelihood and finally retrieve (an approximation to) the posterior.
# %%
import nifty.cl as ift
# %% [markdown]
# ## Prior
#
# In NIFTy, we always start from a standard normal prior.
# Thus, instead of trying to directly create a smooth log-normal $\theta$, we
# instead ask ourselves (1) how we can make a standard normal smooth and
# afterwards (2) how we can make it log-normal.
# %%
position_space = ift.RGSpace([64, 64]) # domain on which our parameters live
# We need to apply the sqrt of the power spectrum to give a standard normal
# prior the desired power spectrum.
def power_spectrum_sqrt(k):
return (1.0 / (20.0 + k**4))**0.5
p_space = ift.PowerSpace(position_space.get_default_codomain())
# Create an operator to distribute the power of the power-spectrum to indiviudal
# modes assuming the underlying field to be isotropic
pd = ift.PowerDistributor(position_space.get_default_codomain(), p_space)
a = ift.PS_field(p_space, power_spectrum_sqrt)
amplitude = pd(a)
amplitude = ift.makeOp(amplitude)
harmonic2pos = ift.HarmonicTransformOperator(amplitude.target, position_space)
# %%
r = ift.from_random(amplitude.domain)
ift.single_plot(harmonic2pos(amplitude(r)))
# %% [markdown]
# YAY, we achieved (1)
# %%
# Let's make it log-normal distributed. To do so we really only have to
# exponentiate it.
r = ift.from_random(amplitude.domain)
harmonic2pos = ift.HarmonicTransformOperator(amplitude.target, position_space)
ift.single_plot(ift.exp(harmonic2pos(amplitude(r))))
# %%
# We can also apply the operators to one another to retrieve a new operator that
# joins all of them. Here we create an operator to propagate our standard
# normally distributed prior parameters to smooth log-normal distributed
# parameter.
signal = ift.exp(harmonic2pos(amplitude))
# %% [markdown]
# YAY, we achieved (2)!
# %% [markdown]
# ## Likelihood
#
# We've done (1) and (2). Next, let us look at the likelihood $P(d|\theta)$.
# %%
# In any real life application, one would read in the actual data here. For
# simplicity, we synthetically create some data from our model.
# Create synthetic "true" latent parameters and propagate them through the model
r = ift.from_random(signal.domain)
synthetic_signal_realization = signal(r)
# Retrieve synthetic noisy data
rng = ift.random.current_rng() # numpy random number generator
synthetic_data = rng.poisson(
lam=synthetic_signal_realization.asnumpy(), size=position_space.shape
)
synthetic_data = ift.makeField(position_space, synthetic_data)
# %%
likelihood = ift.PoissonianEnergy(synthetic_data)
# %% [markdown]
# ## Posterior
#
# We now have our prior model and our likelihood model.
# Let's do some inference!
# %%
forward = likelihood @ signal
# NOTE, the optimization method only works with models that have named parameters.
# Thus, we need to give our parameters a name. This is usually not necessary
# for more complicated models (e.g. the correlated field model) as they are
# automatically assigned a name.
forward = forward.ducktape("domain")
ic_sampling = ift.DeltaEnergyController(
name="Sampling", iteration_limit=200, tol_rel_deltaE=1e-8
)
ic_newton = ift.DeltaEnergyController(
name="Newton", iteration_limit=35, tol_rel_deltaE=1e-8
)
minimizer = ift.NewtonCG(ic_newton)
# Increase this number (and/or the convergence criteria in `ic_*`) if you don't
# think your model converged yet
n_vi_iterations = 5
# Increase this number if you believe you got stuck in a weird local minimum
n_samples = 4
state = ift.optimize_kl(forward, n_vi_iterations, n_samples, minimizer, ic_sampling)
# %%
posterior_signal_samples = [
signal.ducktape("domain")(sample) for sample in state.iterator()
]
p = ift.Plot()
p.add(synthetic_data, title="Synthetic Data")
for i in range(3): # Show the first three samples
p.add(posterior_signal_samples[i], title=f"Sample {i+1:02d}")
p.output()
# %%
p = ift.Plot()
m, v = state.sample_stat(signal.ducktape("domain"))
s = v**0.5
p.add(synthetic_signal_realization, title="Synthetic Signal Realization")
p.add(m, title="Posterior Mean")
p.add(s, title="Posterior Standard Deviation")
p.add(s / m, title="Posterior Relative Uncertainty")
p.output()
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2022 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
############################################################
# Non-linear tomography
#
# The signal is a sigmoid-normal distributed field.
# The data is the field integrated along lines of sight that are
# randomly (set mode=0) or radially (mode=1) distributed
#
# Demo takes a while to compute
#############################################################
import sys
import numpy as np
import nifty.cl as ift
ift.random.push_sseq_from_seed(27)
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
# for models with latent vectors > 2 GiB use `pkl5` communicators:
#from mpi4py.util import pkl5
#comm = pkl5.Intracomm(comm)
master = comm.Get_rank() == 0
except ImportError:
comm = None
master = True
def random_los(n_los):
starts = list(ift.random.current_rng().random((n_los, 2)).T)
ends = list(ift.random.current_rng().random((n_los, 2)).T)
return starts, ends
def radial_los(n_los):
starts = list(ift.random.current_rng().random((n_los, 2)).T)
ends = list(0.5 + 0*ift.random.current_rng().random((n_los, 2)).T)
return starts, ends
def main():
# Choose between random line-of-sight response (mode=0) and radial lines
# of sight (mode=1)
if len(sys.argv) == 2:
mode = int(sys.argv[1])
else:
mode = 0
filename = "getting_started_3_mode_{}_".format(mode) + "{}.png"
position_space = ift.RGSpace([128, 128])
# For a detailed showcase of the effects the parameters
# of the CorrelatedField model have on the generated fields,
# see 'getting_started_4_CorrelatedFields.ipynb'.
args = {
'offset_mean': 0,
'offset_std': (1e-3, 1e-6),
# Amplitude of field fluctuations
'fluctuations': (1., 0.8), # 1.0, 1e-2
# Exponent of power law power spectrum component
'loglogavgslope': (-3., 1), # -6.0, 1
# Amplitude of integrated Wiener process power spectrum component
'flexibility': (2, 1.), # 1.0, 0.5
# How ragged the integrated Wiener process component is
'asperity': (0.5, 0.4) # 0.1, 0.5
}
correlated_field = ift.SimpleCorrelatedField(position_space, **args)
pspec = correlated_field.power_spectrum
# Apply a nonlinearity
signal = ift.sigmoid(correlated_field)
# Build the line-of-sight response and define signal response
LOS_starts, LOS_ends = random_los(100) if mode == 0 else radial_los(100)
R = ift.LOSResponse(position_space, starts=LOS_starts, ends=LOS_ends)
signal_response = R(signal)
# Specify noise
data_space = R.target
noise = .001
N = ift.ScalingOperator(data_space, noise, np.float64)
# Generate mock signal and data
mock_position = ift.from_random(signal_response.domain, 'normal')
data = signal_response(mock_position) + N.draw_sample()
# Plot setup
plot = ift.Plot()
plot.add(signal(mock_position), title='Ground Truth', vmin=0, vmax=1)
plot.add(R.adjoint_times(data), title='Data')
plot.add([pspec.force(mock_position)], title='Power Spectrum')
plot.output(ny=1, nx=3, xsize=24, ysize=6, name=filename.format("setup"))
# Minimization parameters
ic_sampling = ift.AbsDeltaEnergyController(name="Sampling (linear)",
deltaE=0.05, iteration_limit=100)
ic_newton = ift.AbsDeltaEnergyController(name='Newton', deltaE=0.5,
convergence_level=2, iteration_limit=35)
ic_sampling_nl = ift.AbsDeltaEnergyController(name='Sampling (nonlin)',
deltaE=0.5, iteration_limit=15,
convergence_level=2)
minimizer = ift.NewtonCG(ic_newton)
minimizer_sampling = ift.NewtonCG(ic_sampling_nl)
# Set up likelihood energy
likelihood_energy = (ift.GaussianEnergy(data, inverse_covariance=N.inverse) @
signal_response)
# Minimize KL
n_iterations = 6
n_samples = lambda iiter: 10 if iiter < 5 else 20
# TODO Write callback for nice plots
samples = ift.optimize_kl(likelihood_energy, n_iterations, n_samples, minimizer, ic_sampling,
nonlinear_sampling_minimizer=minimizer_sampling,
export_operator_outputs={"signal": signal},
output_directory="getting_started_3_results", comm=comm)
if True:
# Load result from disk. May be useful for long inference runs, where
# inference and posterior analysis are split into two steps
samples = ift.ResidualSampleList.load("getting_started_3_results/pickle/latest", comm=comm)
# Plotting
filename_res = filename.format("results")
plot = ift.Plot()
mean, var = samples.sample_stat(signal)
plot.add(mean, title="Posterior Mean", vmin=0, vmax=1)
plot.add(var.sqrt(), title="Posterior Standard Deviation", vmin=0)
nsamples = samples.n_samples
logspec = pspec.log()
plot.add(list(samples.iterator(pspec)) +
[pspec.force(mock_position), samples.average(logspec).exp()],
title="Sampled Posterior Power Spectrum",
linewidth=[1.]*nsamples + [3., 3.],
label=[None]*nsamples + ['Ground truth', 'Posterior mean'])
if master:
plot.output(ny=1, nx=3, xsize=24, ysize=6, name=filename_res)
print("Saved results as '{}'.".format(filename_res))
if __name__ == '__main__':
main()
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.7
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # Showcasing the Correlated Field model
#
# **Skip to `Parameter Showcases` for the meat/veggies ;)**
#
# The field model roughly works like this:
#
# `f = HT( A * zero_mode * xi ) + offset`
#
# `A` is a spectral power field which is constructed from power spectra that hold on subdomains of the target domain.
# It is scaled by a zero mode operator and then pointwise multiplied by a gaussian excitation field, yielding
# a representation of the field in harmonic space.
# It is then transformed into the target real space and a offset added.
#
# The power spectra `A` is constructed of are in turn constructed as the sum of a power law component
# and an integrated Wiener process whose amplitude and roughness can be set.
#
# ## Preliminaries
#
# ### Setup code
# +
# %matplotlib inline
import nifty.cl as ift
import matplotlib.pyplot as plt
import numpy as np
plt.rcParams['figure.dpi'] = 100
n_pix = 256
x_space = ift.RGSpace(n_pix)
ift.random.push_sseq_from_seed(1)
# +
# Plotting routine
def plot(fields, spectra, title=None):
# Plotting preparation is normally handled by nifty.Plot
# It is done manually here to be able to tweak details
# Fields are assumed to have identical domains
fig = plt.figure(tight_layout=True, figsize=(10, 3))
if title is not None:
fig.suptitle(title, fontsize=14)
# Field
ax1 = fig.add_subplot(1, 2, 1)
ax1.axhline(y=0., color='k', linestyle='--', alpha=0.25)
for field in fields:
dom = field.domain[0]
xcoord = np.arange(dom.shape[0]) * dom.distances[0]
ax1.plot(xcoord, field.asnumpy())
ax1.set_xlim(xcoord[0], xcoord[-1])
ax1.set_ylim(-5., 5.)
ax1.set_xlabel('x')
ax1.set_ylabel('f(x)')
ax1.set_title('Field realizations')
# Spectrum
ax2 = fig.add_subplot(1, 2, 2)
for spectrum in spectra:
xcoord = spectrum.domain[0].k_lengths
ycoord = spectrum.asnumpy_rw()
ycoord[0] = ycoord[1]
ax2.plot(xcoord, ycoord)
ax2.set_ylim(1e-6, 10.)
ax2.set_xscale('log')
ax2.set_yscale('log')
ax2.set_xlabel('k')
ax2.set_ylabel('p(k)')
ax2.set_title('Power Spectrum')
fig.align_labels()
plt.show()
# Helper: draw main sample
main_sample = None
def init_model(m_pars, fl_pars, matern=False):
global main_sample
cf = ift.CorrelatedFieldMaker(m_pars["prefix"])
cf.set_amplitude_total_offset(m_pars["offset_mean"], m_pars["offset_std"])
cf.add_fluctuations_matern(**fl_pars) if matern else cf.add_fluctuations(**fl_pars)
field = cf.finalize(prior_info=0)
main_sample = ift.from_random(field.domain)
print("model domain keys:", field.domain.keys())
# Helper: field and spectrum from parameter dictionaries + plotting
def eval_model(m_pars, fl_pars, title=None, samples=None, matern=False):
cf = ift.CorrelatedFieldMaker(m_pars["prefix"])
cf.set_amplitude_total_offset(m_pars["offset_mean"], m_pars["offset_std"])
cf.add_fluctuations_matern(**fl_pars) if matern else cf.add_fluctuations(**fl_pars)
field = cf.finalize(prior_info=0)
spectrum = cf.amplitude
if samples is None:
samples = [main_sample]
field_realizations = [field(s) for s in samples]
spectrum_realizations = [spectrum.force(s) for s in samples]
plot(field_realizations, spectrum_realizations, title)
def gen_samples(key_to_vary):
if key_to_vary is None:
return [main_sample]
dct = main_sample.to_dict()
subdom_to_vary = dct.pop(key_to_vary).domain
samples = []
for i in range(8):
d = dct.copy()
d[key_to_vary] = ift.from_random(subdom_to_vary)
samples.append(ift.MultiField.from_dict(d))
return samples
def vary_parameter(parameter_key, values, samples_vary_in=None, matern=False):
s = gen_samples(samples_vary_in)
for v in values:
if parameter_key in cf_make_pars.keys():
m_pars = {**cf_make_pars, parameter_key: v}
eval_model(m_pars, cf_x_fluct_pars, f"{parameter_key} = {v}", s, matern)
else:
fl_pars = {**cf_x_fluct_pars, parameter_key: v}
eval_model(cf_make_pars, fl_pars, f"{parameter_key} = {v}", s, matern)
# -
# ### Before the Action: The Moment-Matched Log-Normal Distribution
# Many properties of the correlated field are modelled as being lognormally distributed.
#
# The distribution models are parametrized via their means and standard-deviations (first and second position in tuple).
#
# To get a feeling of how the ratio of the `mean` and `stddev` parameters influences the distribution shape,
# here are a few example histograms: (observe the x-axis!)
# +
fig = plt.figure(figsize=(13, 3.5))
mean = 1.0
sigmas = [1.0, 0.5, 0.1]
for i in range(3):
op = ift.LognormalTransform(mean=mean, sigma=sigmas[i],
key='foo', N_copies=0)
op_samples = np.array(
[op(s).asnumpy() for s in [ift.from_random(op.domain) for i in range(10000)]])
ax = fig.add_subplot(1, 3, i + 1)
ax.hist(op_samples, bins=50)
ax.set_title(f"mean = {mean}, sigma = {sigmas[i]}")
ax.set_xlabel('x')
del op_samples
plt.show()
# -
# ## The Neutral Field
#
# To demonstrate the effect of all parameters, first a 'neutral' set of parameters
# is defined which then are varied one by one, showing the effect of the variation
# on the generated field realizations and the underlying power spectrum from which
# they were drawn.
#
# As a neutral field, a model with a white power spectrum and vanishing spectral power was chosen.
# +
# Neutral model parameters yielding a quasi-constant field
cf_make_pars = {
'offset_mean': 0.,
'offset_std': (1e-3, 1e-16),
'prefix': ''
}
cf_x_fluct_pars = {
'target_subdomain': x_space,
'fluctuations': (1e-3, 1e-16),
'flexibility': (1e-3, 1e-16),
'asperity': (1e-3, 1e-16),
'loglogavgslope': (0., 1e-16)
}
init_model(cf_make_pars, cf_x_fluct_pars)
# -
# Show neutral field
eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field")
#
# ### The `fluctuations` parameters of `add_fluctuations()`
#
# determine the **amplitude of variations along the field dimension**
# for which `add_fluctuations` is called.
#
# `fluctuations[0]` set the _average_ amplitude of the fields fluctuations along the given dimension,\
# `fluctuations[1]` sets the width and shape of the amplitude distribution.
#
# The amplitude is modelled as being log-normally distributed,
# see `The Moment-Matched Log-Normal Distribution` above for details.
#
# #### `fluctuations` mean:
vary_parameter('fluctuations', [(0.05, 1e-16), (0.5, 1e-16), (2., 1e-16)], samples_vary_in='xi')
# #### `fluctuations` std:
vary_parameter('fluctuations', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='fluctuations')
cf_x_fluct_pars['fluctuations'] = (1., 1e-16)
# ### The `loglogavgslope` parameters of `add_fluctuations()`
#
# determine **the slope of the loglog-linear (power law) component of the power spectrum**.
#
# The slope is modelled to be normally distributed.
#
# #### `loglogavgslope` mean:
vary_parameter('loglogavgslope', [(-6., 1e-16), (-2., 1e-16), (2., 1e-16)], samples_vary_in='xi')
# #### `loglogavgslope` std:
vary_parameter('loglogavgslope', [(-2., 0.02), (-2., 0.2), (-2., 2.0)], samples_vary_in='loglogavgslope')
cf_x_fluct_pars['loglogavgslope'] = (-2., 1e-16)
# ### The `flexibility` parameters of `add_fluctuations()`
#
# determine **the amplitude of the integrated Wiener process component of the power spectrum**
# (how strong the power spectrum varies besides the power-law).
#
# `flexibility[0]` sets the _average_ amplitude of the i.g.p. component,\
# `flexibility[1]` sets how much the amplitude can vary.\
# These two parameters feed into a moment-matched log-normal distribution model,
# see above for a demo of its behavior.
#
# #### `flexibility` mean:
vary_parameter('flexibility', [(0.4, 1e-16), (4.0, 1e-16), (12.0, 1e-16)], samples_vary_in='spectrum')
# #### `flexibility` std:
vary_parameter('flexibility', [(4., 0.02), (4., 0.2), (4., 2.)], samples_vary_in='flexibility')
cf_x_fluct_pars['flexibility'] = (4., 1e-16)
# ### The `asperity` parameters of `add_fluctuations()`
#
# `asperity` determines **how rough the integrated Wiener process component of the power spectrum is**.
#
# `asperity[0]` sets the average roughness, `asperity[1]` sets how much the roughness can vary.\
# These two parameters feed into a moment-matched log-normal distribution model,
# see above for a demo of its behavior.
#
# #### `asperity` mean:
vary_parameter('asperity', [(0.001, 1e-16), (1.0, 1e-16), (5., 1e-16)], samples_vary_in='spectrum')
# #### `asperity` std:
vary_parameter('asperity', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='asperity')
cf_x_fluct_pars['asperity'] = (1., 1e-16)
# ### The `offset_mean` parameter of `CorrelatedFieldMaker()`
#
# The `offset_mean` parameter defines a global additive offset on the field realizations.
#
# If the field is used for a lognormal model `f = field.exp()`, this acts as a global signal magnitude offset.
# Reset model to neutral
cf_x_fluct_pars['fluctuations'] = (1e-3, 1e-16)
cf_x_fluct_pars['flexibility'] = (1e-3, 1e-16)
cf_x_fluct_pars['asperity'] = (1e-3, 1e-16)
cf_x_fluct_pars['loglogavgslope'] = (1e-3, 1e-16)
vary_parameter('offset_mean', [3., 0., -2.])
# ### The `offset_std` parameters of `CorrelatedFieldMaker()`
#
# Variation of the global offset of the field are modelled as being log-normally distributed.
# See `The Moment-Matched Log-Normal Distribution` above for details.
#
# The `offset_std[0]` parameter sets how much NIFTy will vary the offset *on average*.\
# The `offset_std[1]` parameters defines the with and shape of the offset variation distribution.
#
# #### `offset_std` mean:
vary_parameter('offset_std', [(1e-16, 1e-16), (0.5, 1e-16), (2., 1e-16)], samples_vary_in='xi')
# #### `offset_std` std:
vary_parameter('offset_std', [(1., 0.01), (1., 0.1), (1., 1.)], samples_vary_in='zeromode')
# ## Matern fluctuation kernels
#
# The correlated fields model also supports parametrizing the power spectra of field dimensions
# using Matern kernels. In the following, the effects of their parameters are demonstrated.
#
# Contrary to the field fluctuations parametrization showed above, the Matern kernel
# parameters show strong interactions. For example, the field amplitude does not only depend on the
# amplitude scaling parameter `scale`, but on the combination of all three parameters `scale`,
# `cutoff` and `loglogslope`.
# Neutral model parameters yielding a quasi-constant field
cf_make_pars = {
'offset_mean': 0.,
'offset_std': (1e-3, 1e-16),
'prefix': ''
}
cf_x_fluct_pars = {
'target_subdomain': x_space,
'scale': (1e-2, 1e-16),
'cutoff': (1., 1e-16),
'loglogslope': (-2.0, 1e-16)
}
init_model(cf_make_pars, cf_x_fluct_pars, matern=True)
# Show neutral field
eval_model(cf_make_pars, cf_x_fluct_pars, "Neutral Field", matern=True)
#
# ### The `scale` parameters of `add_fluctuations_matern()`
#
# determine the **overall amplitude scaling factor of fluctuations in the target subdomain**
# for which `add_fluctuations_matern` is called.
#
# **It does not set the absolute amplitude**, which depends on all other parameters, too.
#
# `scale[0]` set the _average_ amplitude scaling factor of the fields' fluctuations along the given dimension,\
# `scale[1]` sets the width and shape of the scaling factor distribution.
#
# The scaling factor is modelled as being log-normally distributed,
# see `The Moment-Matched Log-Normal Distribution` above for details.
#
# #### `scale` mean:
vary_parameter('scale', [(0.01, 1e-16), (0.1, 1e-16), (1.0, 1e-16)], samples_vary_in='xi', matern=True)
# #### `scale` std:
vary_parameter('scale', [(0.5, 0.01), (0.5, 0.1), (0.5, 0.5)], samples_vary_in='scale', matern=True)
cf_x_fluct_pars['scale'] = (0.5, 1e-16)
# ### The `loglogslope` parameters of `add_fluctuations_matern()`
#
# determine **the slope of the loglog-linear (power law) component of the power spectrum**.
#
# `loglogslope[0]` set the _average_ power law exponent of the fields' power spectrum along the given dimension,\
# `loglogslope[1]` sets the width and shape of the exponent distribution.
#
# The `loglogslope` is modelled to be normally distributed.
#
# #### `loglogslope` mean:
vary_parameter('loglogslope', [(-4.0, 1e-16), (-2.0, 1e-16), (-1.0, 1e-16)], samples_vary_in='xi', matern=True)
# As one can see, the field amplitude also depends on the `loglogslope` parameter.
#
# #### `loglogslope` std:
vary_parameter('loglogslope', [(-3., 0.01), (-3., 0.5), (-3., 1.0)], samples_vary_in='loglogslope', matern=True)
# ### The `cutoff` parameters of `add_fluctuations_matern()`
#
# determines **at what wavevector length the power spectrum should transition from constant power
# to following the powerlaw set by `loglogslope`**.
#
# `cutoff[0]` set the _average_ wavevector length at which the power spectrum transition occurs,\
# `cutoff[1]` sets the width and shape of the transition wavevector length distribution.
#
# The cutoff is modelled as being log-normally distributed,
# see `The Moment-Matched Log-Normal Distribution` above for details.
#
# #### `cutoff` mean:
cf_x_fluct_pars['loglogslope'] = (-8.0, 1e-16)
vary_parameter('cutoff', [(1.0, 1e-16), (3.16, 1e-16), (10.0, 1e-16)], samples_vary_in='xi', matern=True)
# #### `cutoff` std:
vary_parameter('cutoff', [(10., 1.0), (10., 3.16), (10., 10.)], samples_vary_in='cutoff', matern=True)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2022 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
############################################################
# Non-linear tomography
#
# The signal is a sigmoid-normal distributed field.
# The data is the field integrated along lines of sight that are
# randomly (set mode=0) or radially (mode=1) distributed
#
# Demo takes a while to compute
#############################################################
import sys
import numpy as np
import nifty.cl as ift
try:
from mpi4py import MPI
comm = MPI.COMM_WORLD
# for models with latent vectors > 2 GiB use `pkl5` communicators:
#from mpi4py.util import pkl5
#comm = pkl5.Intracomm(comm)
master = comm.Get_rank() == 0
except ImportError:
comm = None
master = True
class SingleDomain(ift.LinearOperator):
def __init__(self, domain, target):
self._domain = ift.makeDomain(domain)
self._target = ift.makeDomain(target)
self._capability = self.TIMES | self.ADJOINT_TIMES
def apply(self, x, mode):
self._check_input(x, mode)
return ift.makeField(self._tgt(mode), x.asnumpy())
def random_los(n_los):
starts = list(ift.random.current_rng().random((n_los, 2)).T)
ends = list(ift.random.current_rng().random((n_los, 2)).T)
return starts, ends
def radial_los(n_los):
starts = list(ift.random.current_rng().random((n_los, 2)).T)
ends = list(0.5 + 0*ift.random.current_rng().random((n_los, 2)).T)
return starts, ends
def main():
# Choose between random line-of-sight response (mode=0) and radial lines
# of sight (mode=1)
if len(sys.argv) == 2:
mode = int(sys.argv[1])
else:
mode = 0
# Preparing the filename string for store results
filename = "getting_started_mf_mode_{}_".format(mode) + "{}.png"
# Set up signal domain
npix1, npix2 = 128, 128
position_space = ift.RGSpace([npix1, npix2])
sp1 = ift.RGSpace(npix1)
sp2 = ift.RGSpace(npix2)
# Set up signal model
cfmaker = ift.CorrelatedFieldMaker('')
cfmaker.add_fluctuations(sp1, (0.1, 1e-2), (2, .2), (.01, .5), (-4, 2.),
'amp1')
cfmaker.add_fluctuations(sp2, (0.1, 1e-2), (2, .2), (.01, .5), (-3, 1),
'amp2')
cfmaker.set_amplitude_total_offset(0., (1e-2, 1e-6))
correlated_field = cfmaker.finalize()
normalized_amp = cfmaker.get_normalized_amplitudes()
pspec1 = normalized_amp[0]**2
pspec2 = normalized_amp[1]**2
DC = SingleDomain(correlated_field.target, position_space)
# Apply a nonlinearity
signal = DC @ ift.sigmoid(correlated_field)
# Build the line-of-sight response and define signal response
LOS_starts, LOS_ends = random_los(100) if mode == 0 else radial_los(100)
R = ift.LOSResponse(position_space, starts=LOS_starts, ends=LOS_ends)
signal_response = R(signal)
# Specify noise
data_space = R.target
noise = .001
N = ift.ScalingOperator(data_space, noise, float)
# Generate mock signal and data
mock_position = ift.from_random(signal_response.domain, 'normal')
data = signal_response(mock_position) + N.draw_sample()
plot = ift.Plot()
plot.add(signal(mock_position), title='Ground Truth')
plot.add(R.adjoint_times(data), title='Data')
plot.add([pspec1.force(mock_position)], title='Power Spectrum 1')
plot.add([pspec2.force(mock_position)], title='Power Spectrum 2')
plot.output(ny=2, nx=2, xsize=10, ysize=10, name=filename.format("setup"))
# Minimization parameters
ic_sampling = ift.AbsDeltaEnergyController(name='Sampling', deltaE=0.01, iteration_limit=100)
ic_newton = ift.AbsDeltaEnergyController(name='Newton', deltaE=0.01, iteration_limit=35)
minimizer = ift.NewtonCG(ic_newton, enable_logging=True)
# Set up likelihood energy and information Hamiltonian
likelihood_energy = ift.GaussianEnergy(data, inverse_covariance=N.inverse) @ signal_response
n_samples = 20
n_iterations = 5
samples = ift.optimize_kl(likelihood_energy, n_iterations, n_samples,
minimizer, ic_sampling, comm=comm,
output_directory="getting_started_5_results",
export_operator_outputs={"signal": signal, "power spectrum 1": pspec1,
"power spectrum 2": pspec2})
# Plotting
filename_res = filename.format("results")
plot = ift.Plot()
mean, var = samples.sample_stat(signal)
plot.add(mean, title="Posterior Mean")
plot.add(ift.sqrt(var), title="Posterior Standard Deviation")
n_samples = samples.n_samples
plot.add(list(samples.iterator(pspec1)) + [samples.average(pspec1.log()).exp(),
pspec1.force(mock_position)],
title="Sampled Posterior Power Spectrum 1",
linewidth=[1.]*n_samples + [3., 3.])
plot.add(list(samples.iterator(pspec2)) + [samples.average(pspec2.log()).exp(),
pspec2.force(mock_position)],
title="Sampled Posterior Power Spectrum 2",
linewidth=[1.]*n_samples + [3., 3.])
if master:
plot.output(ny=2, nx=2, xsize=15, ysize=15, name=filename_res)
print("Saved results as '{}'.".format(filename_res))
if __name__ == '__main__':
main()
[optimization]
# output directory could also be specified here (see following line)
# output directory = getting_started_7_results
save strategy = latest
[optimization.1]
base = base.optimization
total iterations = 4
likelihood energy = *lh0
transitions = None
n samples = 2*3,5
kl minimizer = *mini0, *mini1
fresh stochasticity = True
[optimization.02]
base = base.optimization
total iterations = 2
transitions = *trans01,None
likelihood energy = *lh1
n samples = 5
kl minimizer = *mini1
fresh stochasticity = False
[lh0]
sky = *sky0
noise var = float :: 0.01
[lh1]
sky = *sky1
noise var = float :: 0.01
[sky0]
npix = int :: 128
vol = float :: 2
offset mean = float :: 0
offset std mean = float :: 1e-3
offset std std = float :: 1e-6
fluctuations mean = float :: 1.
fluctuations std = float :: 0.8
loglogavgslope mean = float :: -3.
loglogavgslope std = float :: 1
flexibility mean = float :: 2
flexibility std = float :: 1.
asperity mean = float :: 0.5
asperity std = float :: 0.4
[sky1]
base = sky0
npix = int :: 1000
[trans01]
sky before = *sky0
sky after = *sky1
[mini0]
custom function = nifty.cl.VL_BFGS
controller = *icmini
[mini1]
custom function = nifty.cl.NewtonCG
controller = *icmini
[icsamp]
custom function = nifty.cl.AbsDeltaEnergyController
name = Sampling (linear)
iteration limit = int :: 100
deltaE = float :: 0.05
[icmini]
custom function = nifty.cl.AbsDeltaEnergyController
name = KL
iteration limit = int :: 10
deltaE = float :: 0.5
convergence level = int :: 2
[base.optimization]
point estimates = None
constants = None
nonlinear sampling minimizer = None
sampling iteration controller = *icsamp