Commit a1502898 authored by M Selig's avatar M Selig

Merge pull request #4 from mselig/develop

version update.
parents 8384f0ca de59bf62
......@@ -15,4 +15,6 @@ build
demos/*
!demos/demo_faraday.py
!demos/demo_faraday_map.npy
!demos/demo_excaliwir.py
\ No newline at end of file
!demos/demo_excaliwir.py
!demos/demo_wf1.py
!demos/demo_wf2.py
\ No newline at end of file
......@@ -63,6 +63,7 @@ apply to fields.
vector
* ``response_operator`` - exemplary responses that include a convolution,
masking and projection
* ``propagator_operator`` - information propagator in Wiener filter theory
* (and more)
* (and more)
......@@ -96,7 +97,7 @@ Requirements
Download
........
The latest release is tagged **v0.4.2** and is available as a source package
The latest release is tagged **v0.6.0** and is available as a source package
at `<https://github.com/mselig/nifty/tags>`_. The current version can be
obtained by cloning the repository::
......@@ -122,7 +123,7 @@ Please, acknowledge the use of NIFTY in your publication(s) by using a phrase
such as the following:
*"Some of the results in this publication have been derived using the NIFTY
[Selig et al., 2013] package."*
package [Selig et al., 2013]."*
References
..........
......
......@@ -23,6 +23,7 @@ from __future__ import division
from nifty_core import *
from nifty_cmaps import *
from nifty_power import *
from nifty_tools import *
......
......@@ -33,8 +33,6 @@
"""
from __future__ import division
from nifty import *
from nifty.nifty_cmaps import *
from nifty.nifty_power import *
from scipy.sparse.linalg import LinearOperator as lo
from scipy.sparse.linalg import cg
......
......@@ -39,7 +39,6 @@
"""
from __future__ import division
from nifty import *
from nifty.nifty_cmaps import *
about.warnings.off()
......
## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. __ ____ __
.. /__/ / _/ / /_
.. __ ___ __ / /_ / _/ __ __
.. / _ | / / / _/ / / / / / /
.. / / / / / / / / / /_ / /_/ /
.. /__/ /__/ /__/ /__/ \___/ \___ / demo
.. /______/
NIFTY demo applying a Wiener filter using conjugate gradient.
"""
from __future__ import division
from nifty import * # version 0.6.0
# some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([256, 256]) # define signal space
k_space = x_space.get_codomain() # get conjugate space
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 3)
S = power_operator(k_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
R = response_operator(x_space, sigma=0.0, mask=1.0, assign=None) # define response
d_space = R.target # get data space
# some noise variance; e.g., 100
N = diagonal_operator(d_space, diag=100, bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
d = R(s) + n # compute data
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
m = D(j, tol=1E-4, note=True) # reconstruct map
s.plot(title="signal") # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.val.min(), vmax=s.val.max()) # plot data
m.plot(title="reconstructed map", vmin=s.val.min(), vmax=s.val.max()) # plot map
## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. __ ____ __
.. /__/ / _/ / /_
.. __ ___ __ / /_ / _/ __ __
.. / _ | / / / _/ / / / / / /
.. / / / / / / / / / /_ / /_/ /
.. /__/ /__/ /__/ /__/ \___/ \___ / demo
.. /______/
NIFTY demo applying a Wiener filter using steepest descent.
"""
from __future__ import division
from nifty import * # version 0.6.0
# some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([256, 256]) # define signal space
k_space = x_space.get_codomain() # get conjugate space
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 3)
S = power_operator(k_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
R = response_operator(x_space, sigma=0.0, mask=1.0, assign=None) # define response
d_space = R.target # get data space
# some noise variance; e.g., 100
N = diagonal_operator(d_space, diag=100, bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
d = R(s) + n # compute data
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
def eggs(x):
"""
Calculation of the information Hamiltonian and its gradient.
"""
DIx = D.inverse_times(x)
H = 0.5 * DIx.dot(x) - j.dot(x) # compute information Hamiltonian
g = DIx - j # compute its gradient
return H, g
m = field(x_space, target=k_space) # reconstruct map
m,convergence = steepest_descent(eggs=eggs, note=True)(m, tol=1E-4, clevel=3)
s.plot(title="signal") # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.val.min(), vmax=s.val.max()) # plot data
m.plot(title="reconstructed map", vmin=s.val.min(), vmax=s.val.max()) # plot map
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -28,11 +28,11 @@
.. /__/ /__/ /__/ /__/ \___/ \___ / power
.. /______/
NIFTy offers a number of automatized routines for handling
NIFTY offers a number of automatized routines for handling
power spectra. It is possible to draw a field from a random distribution
with a certain autocorrelation or, equivalently, with a certain
power spectrum in its conjugate space (see :py:func:`field.random`). In
NIFTy, it is usually assumed that such a field follows statistical
NIFTY, it is usually assumed that such a field follows statistical
homogeneity and isotropy. Fields which are only statistically homogeneous
can also be created using the diagonal operator routine.
......@@ -43,7 +43,7 @@
from __future__ import division
from scipy.interpolate import interp1d as ip ## conflicts with sphinx's autodoc
#import numpy as np
from nifty.nifty_core import *
from nifty_core import *
import smoothing as gs
......@@ -291,7 +291,7 @@ def _calc_inverse(tk,var,kindex,rho,b1,Amem): ## > computes the inverse Hessian
## inversion
return np.linalg.inv(T2+np.diag(b2,k=0)),b2,Amem
def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None,rho=None,q=1E-42,alpha=1,perception=(1,0),smoothness=True,var=10,bare=True,**kwargs):
def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None,rho=None,q=1E-42,alpha=1,perception=(1,0),smoothness=True,var=10,force=False,bare=True,**kwargs):
"""
Infers the power spectrum.
......@@ -338,6 +338,9 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
(default: True).
var : {scalar, list, array}, *optional*
Variance of the assumed spectral smoothness prior (default: 10).
force : bool, *optional*, *experimental*
Indicates whether smoothness is to be enforces or not
(default: False).
bare : bool, *optional*
Indicates whether the power spectrum entries returned are "bare"
or not (mandatory for the correct incorporation of volume weights)
......@@ -401,7 +404,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
derived, and the implications of a certain choise of the perception
tuple (delta,epsilon) are discussed.
The further incorporation of a smoothness prior as detailed in [#]_,
where the underlying formula(s), Eq.(27), of this implementation are
where the underlying formula(s), Eq.(26), of this implementation are
derived and discussed in terms of their applicability.
References
......@@ -505,6 +508,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
numerator = weight_power(domain,numerator,power=-1,pindex=pindex,pundex=pundex) ## bare(!)
## smoothness prior
permill = 0
divergence = 1
while(divergence):
pk = numerator/denominator1 ## bare(!)
......@@ -524,7 +528,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
absdelta = np.abs(delta).max()
tk += min(1,0.1/absdelta)*delta # adaptive step width
pk *= np.exp(min(1,0.1/absdelta)*delta) # adaptive step width
var_ /= 1.1 # lowering the variance when converged
var_ /= 1.1+permill # lowering the variance when converged
if(var_<var):
if(breakinfo): # making sure there's one iteration with the correct variance
break
......@@ -538,6 +542,14 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
break
else:
divergence += 1
if(force):
permill = 0.001
elif(force)and(var_/var_OLD>1.001):
permill = 0
pot = int(np.log10(var_))
var = int(1+var_*10**-pot)*10**pot
about.warnings.cprint("WARNING: smoothness variance increased ( var = "+str(var)+" ).")
break
else:
var_OLD = var_
if(breakinfo):
......
## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. __ ____ __
.. /__/ / _/ / /_
.. __ ___ __ / /_ / _/ __ __
.. / _ | / / / _/ / / / / / /
.. / / / / / / / / / /_ / /_/ /
.. /__/ /__/ /__/ /__/ \___/ \___ / tools
.. /______/
This module extends NIFTY with a nifty set of tools including further
operators, namely the :py:class:`invertible_operator` and the
:py:class:`propagator_operator`, and minimization schemes, namely
:py:class:`steepest_descent` and :py:class:`conjugate_gradient`. Those
tools are supposed to support the user in solving information field
theoretical problems (almost) without numerical pain.
"""
from __future__ import division
#import numpy as np
from nifty_core import *
##-----------------------------------------------------------------------------
class invertible_operator(operator):
"""
.. __ __ __ __ __
.. /__/ / /_ /__/ / / / /
.. __ __ ___ __ __ _______ _____ / _/ __ / /___ / / _______
.. / / / _ || |/ / / __ / / __/ / / / / / _ | / / / __ /
.. / / / / / / | / / /____/ / / / /_ / / / /_/ / / /_ / /____/
.. /__/ /__/ /__/ |____/ \______/ /__/ \___/ /__/ \______/ \___/ \______/ operator class
NIFTY subclass for invertible, self-adjoint (linear) operators
The invertible operator class is an abstract class for self-adjoint or
symmetric (linear) operators from which other more specific operator
subclassescan be derived. Such operators inherit an automated inversion
routine, namely conjugate gradient.
Parameters
----------
domain : space
The space wherein valid arguments live.
uni : bool, *optional*
Indicates whether the operator is unitary or not.
(default: False)
imp : bool, *optional*
Indicates whether the incorporation of volume weights in
multiplications is already implemented in the `multiply`
instance methods or not (default: False).
para : {single object, tuple/list of objects}, *optional*
This is a freeform tuple/list of parameters that derivatives of
the operator class can use (default: None).
See Also
--------
operator
Notes
-----
This class is not meant to be instantiated. Operator classes derived
from this one only need a `_multiply` or `_inverse_multiply` instance
method to perform the other. However, one of them needs to be defined.
Attributes
----------
domain : space
The space wherein valid arguments live.
sym : bool
Indicates whether the operator is self-adjoint or not.
uni : bool
Indicates whether the operator is unitary or not.
imp : bool
Indicates whether the incorporation of volume weights in
multiplications is already implemented in the `multiply`
instance methods or not.
target : space
The space wherein the operator output lives.
para : {single object, list of objects}
This is a freeform tuple/list of parameters that derivatives of
the operator class can use. Not used in the base operators.
"""
def __init__(self,domain,uni=False,imp=False,para=None):
"""
Sets the standard operator properties.
Parameters
----------
domain : space
The space wherein valid arguments live.
uni : bool, *optional*
Indicates whether the operator is unitary or not.
(default: False)
imp : bool, *optional*
Indicates whether the incorporation of volume weights in
multiplications is already implemented in the `multiply`
instance methods or not (default: False).
para : {single object, tuple/list of objects}, *optional*
This is a freeform tuple/list of parameters that derivatives of
the operator class can use (default: None).
"""
if(not isinstance(domain,space)):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
self.domain = domain
self.sym = True
self.uni = bool(uni)
if(self.domain.discrete):
self.imp = True
else:
self.imp = bool(imp)
self.target = self.domain
if(para is not None):
self.para = para
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def _multiply(self,x,force=False,W=None,spam=None,reset=None,note=False,x0=None,tol=1E-4,clevel=1,limii=None,**kwargs):
"""
Applies the invertible operator to a given field by invoking a
conjugate gradient.
Parameters
----------
x : field
Valid input field.
force : bool
Indicates wheter to return a field instead of ``None`` is
forced incase the conjugate gradient fails.
Returns
-------
OIIx : field
Mapped field with suitable domain.
See Also
--------
conjugate_gradient
Other Parameters
----------------
W : {operator, function}, *optional*
Operator `W` that is a preconditioner on `A` and is applicable to a
field (default: None).
spam : function, *optional*
Callback function which is given the current `x` and iteration
counter each iteration (default: None).
reset : integer, *optional*
Number of iterations after which to restart; i.e., forget previous
conjugated directions (default: sqrt(b.dim())).
note : bool, *optional*
Indicates whether notes are printed or not (default: False).
x0 : field, *optional*
Starting guess for the minimization.
tol : scalar, *optional*
Tolerance specifying convergence; measured by current relative
residual (default: 1E-4).
clevel : integer, *optional*
Number of times the tolerance should be undershot before
exiting (default: 1).
limii : integer, *optional*
Maximum number of iterations performed (default: 10 * b.dim()).
"""
x_,convergence = conjugate_gradient(self.inverse_times,x,W=W,spam=spam,reset=reset,note=note)(x0=x0,tol=tol,clevel=clevel,limii=limii)
## check convergence
if(not convergence):
if(not force)or(x_ is None):
return None
about.warnings.cprint("WARNING: conjugate gradient failed.")
## weight if ...
if(not self.imp): ## continiuos domain/target
x_.weight(power=-1,overwrite=True)
return x_
def _inverse_multiply(self,x,force=False,W=None,spam=None,reset=None,note=False,x0=None,tol=1E-4,clevel=1,limii=None,**kwargs):
"""
Applies the inverse of the invertible operator to a given field by
invoking a conjugate gradient.
Parameters
----------
x : field
Valid input field.
force : bool
Indicates wheter to return a field instead of ``None`` is
forced incase the conjugate gradient fails.
Returns
-------
OIx : field
Mapped field with suitable domain.
See Also
--------
conjugate_gradient
Other Parameters
----------------
W : {operator, function}, *optional*
Operator `W` that is a preconditioner on `A` and is applicable to a
field (default: None).
spam : function, *optional*
Callback function which is given the current `x` and iteration
counter each iteration (default: None).
reset : integer, *optional*
Number of iterations after which to restart; i.e., forget previous
conjugated directions (default: sqrt(b.dim())).
note : bool, *optional*
Indicates whether notes are printed or not (default: False).
x0 : field, *optional*
Starting guess for the minimization.
tol : scalar, *optional*
Tolerance specifying convergence; measured by current relative
residual (default: 1E-4).
clevel : integer, *optional*
Number of times the tolerance should be undershot before
exiting (default: 1).
limii : integer, *optional*
Maximum number of iterations performed (default: 10 * b.dim()).
"""
x_,convergence = conjugate_gradient(self.times,x,W=W,spam=spam,reset=reset,note=note)(x0=x0,tol=tol,clevel=clevel,limii=limii)
## check convergence
if(not convergence):
if(not force)or(x_ is None):
return None
about.warnings.cprint("WARNING: conjugate gradient failed.")
## weight if ...
if(not self.imp): ## continiuos domain/target
x_.weight(power=1,overwrite=True)
return x_
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def __repr__(self):
return "<nifty.invertible_operator>"
##-----------------------------------------------------------------------------
##-----------------------------------------------------------------------------
class propagator_operator(operator):
"""
.. __
.. / /_
.. _______ _____ ______ ______ ____ __ ____ __ ____ __ / _/ ______ _____
.. / _ / / __/ / _ | / _ | / _ / / _ / / _ / / / / _ | / __/
.. / /_/ / / / / /_/ / / /_/ / / /_/ / / /_/ / / /_/ / / /_ / /_/ / / /
.. / ____/ /__/ \______/ / ____/ \______| \___ / \______| \___/ \______/ /__/ operator class
.. /__/ /__/ /______/
NIFTY subclass for propagator operators (of a certain family)
The propagator operators :math:`D` implemented here have an inverse
formulation like :math:`(S^{-1} + M)`, :math:`(S^{-1} + N^{-1})`, or
:math:`(S^{-1} + R^\dagger N^{-1} R)` as appearing in Wiener filter
theory.
Parameters
----------
S : operator
Covariance of the signal prior.
M : operator
Likelihood contribution.
R : operator
Response operator translating signal to (noiseless) data.
N : operator
Covariance of the noise prior or the likelihood, respectively.
See Also
--------
conjugate_gradient
Notes
-----
The propagator will puzzle the operators `S` and `M` or `R`, `N` or
only `N` together in the predefined from, a domain is set
automatically. The application of the inverse is done by invoking a
conjugate gradient.
Note that changes to `S`, `M`, `R` or `N` auto-update the propagator.
Examples
--------
>>> f = field(rg_space(4), val=[2, 4, 6, 8])
>>> S = power_operator(f.target, spec=1)
>>> N = diagonal_operator(f.domain, diag=1)
>>> D = propagator_operator(S=S, N=N) # D^{-1} = S^{-1} + N^{-1}
>>> D(f).val
array([ 1., 2., 3., 4.])
Attributes
----------
domain : space
A space wherein valid arguments live.
codomain : space
An alternative space wherein valid arguments live; commonly the
codomain of the `domain` attribute.
sym : bool
Indicates that the operator is self-adjoint.
uni : bool
Indicates that the operator is not unitary.
imp : bool
Indicates that volume weights are implemented in the `multiply`
instance methods.
target : space
The space wherein the operator output lives.
_A1 : {operator, function}
Application of :math:`S^{-1}` to a field.
_A2 : {operator, function}
Application of all operations not included in `A1` to a field.
RN : {2-tuple of operators}, *optional*
Contains `R` and `N` if given.
"""
def __init__(self,S=None,M=None,R=None,N=None):
"""
Sets the standard operator properties and `codomain`, `_A1`, `_A2`,
and `RN` if required.
Parameters
----------
S : operator
Covariance of the signal prior.
M : operator
Likelihood contribution.
R : operator
Response operator translating signal to (noiseless) data.
N : operator
Covariance of the noise prior or the likelihood, respectively.
"""
## check signal prior covariance
if(S is None):
raise Exception(about._errors.cstring("ERROR: insufficient input."))
elif(not isinstance(S,operator)):
raise ValueError(about._errors.cstring("ERROR: invalid input."))
space1 = S.domain
## check likelihood (pseudo) covariance
if(M is None):
if(N is None):
raise Exception(about._errors.cstring("ERROR: insufficient input."))