Commit a1502898 authored by M Selig's avatar M Selig

Merge pull request #4 from mselig/develop

version update.
parents 8384f0ca de59bf62
......@@ -15,4 +15,6 @@ build
demos/*
!demos/demo_faraday.py
!demos/demo_faraday_map.npy
!demos/demo_excaliwir.py
\ No newline at end of file
!demos/demo_excaliwir.py
!demos/demo_wf1.py
!demos/demo_wf2.py
\ No newline at end of file
......@@ -63,6 +63,7 @@ apply to fields.
vector
* ``response_operator`` - exemplary responses that include a convolution,
masking and projection
* ``propagator_operator`` - information propagator in Wiener filter theory
* (and more)
* (and more)
......@@ -96,7 +97,7 @@ Requirements
Download
........
The latest release is tagged **v0.4.2** and is available as a source package
The latest release is tagged **v0.6.0** and is available as a source package
at `<https://github.com/mselig/nifty/tags>`_. The current version can be
obtained by cloning the repository::
......@@ -122,7 +123,7 @@ Please, acknowledge the use of NIFTY in your publication(s) by using a phrase
such as the following:
*"Some of the results in this publication have been derived using the NIFTY
[Selig et al., 2013] package."*
package [Selig et al., 2013]."*
References
..........
......
......@@ -23,6 +23,7 @@ from __future__ import division
from nifty_core import *
from nifty_cmaps import *
from nifty_power import *
from nifty_tools import *
......
......@@ -33,8 +33,6 @@
"""
from __future__ import division
from nifty import *
from nifty.nifty_cmaps import *
from nifty.nifty_power import *
from scipy.sparse.linalg import LinearOperator as lo
from scipy.sparse.linalg import cg
......
......@@ -39,7 +39,6 @@
"""
from __future__ import division
from nifty import *
from nifty.nifty_cmaps import *
about.warnings.off()
......
## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. __ ____ __
.. /__/ / _/ / /_
.. __ ___ __ / /_ / _/ __ __
.. / _ | / / / _/ / / / / / /
.. / / / / / / / / / /_ / /_/ /
.. /__/ /__/ /__/ /__/ \___/ \___ / demo
.. /______/
NIFTY demo applying a Wiener filter using conjugate gradient.
"""
from __future__ import division
from nifty import * # version 0.6.0
# some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([256, 256]) # define signal space
k_space = x_space.get_codomain() # get conjugate space
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 3)
S = power_operator(k_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
R = response_operator(x_space, sigma=0.0, mask=1.0, assign=None) # define response
d_space = R.target # get data space
# some noise variance; e.g., 100
N = diagonal_operator(d_space, diag=100, bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
d = R(s) + n # compute data
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
m = D(j, tol=1E-4, note=True) # reconstruct map
s.plot(title="signal") # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.val.min(), vmax=s.val.max()) # plot data
m.plot(title="reconstructed map", vmin=s.val.min(), vmax=s.val.max()) # plot map
## NIFTY (Numerical Information Field Theory) has been developed at the
## Max-Planck-Institute for Astrophysics.
##
## Copyright (C) 2013 Max-Planck-Society
##
## Author: Marco Selig
## Project homepage: <http://www.mpa-garching.mpg.de/ift/nifty/>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
## See the GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
.. __ ____ __
.. /__/ / _/ / /_
.. __ ___ __ / /_ / _/ __ __
.. / _ | / / / _/ / / / / / /
.. / / / / / / / / / /_ / /_/ /
.. /__/ /__/ /__/ /__/ \___/ \___ / demo
.. /______/
NIFTY demo applying a Wiener filter using steepest descent.
"""
from __future__ import division
from nifty import * # version 0.6.0
# some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([256, 256]) # define signal space
k_space = x_space.get_codomain() # get conjugate space
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 3)
S = power_operator(k_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
R = response_operator(x_space, sigma=0.0, mask=1.0, assign=None) # define response
d_space = R.target # get data space
# some noise variance; e.g., 100
N = diagonal_operator(d_space, diag=100, bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
d = R(s) + n # compute data
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
def eggs(x):
"""
Calculation of the information Hamiltonian and its gradient.
"""
DIx = D.inverse_times(x)
H = 0.5 * DIx.dot(x) - j.dot(x) # compute information Hamiltonian
g = DIx - j # compute its gradient
return H, g
m = field(x_space, target=k_space) # reconstruct map
m,convergence = steepest_descent(eggs=eggs, note=True)(m, tol=1E-4, clevel=3)
s.plot(title="signal") # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.val.min(), vmax=s.val.max()) # plot data
m.plot(title="reconstructed map", vmin=s.val.min(), vmax=s.val.max()) # plot map
This source diff could not be displayed because it is too large. You can view the blob instead.
......@@ -28,11 +28,11 @@
.. /__/ /__/ /__/ /__/ \___/ \___ / power
.. /______/
NIFTy offers a number of automatized routines for handling
NIFTY offers a number of automatized routines for handling
power spectra. It is possible to draw a field from a random distribution
with a certain autocorrelation or, equivalently, with a certain
power spectrum in its conjugate space (see :py:func:`field.random`). In
NIFTy, it is usually assumed that such a field follows statistical
NIFTY, it is usually assumed that such a field follows statistical
homogeneity and isotropy. Fields which are only statistically homogeneous
can also be created using the diagonal operator routine.
......@@ -43,7 +43,7 @@
from __future__ import division
from scipy.interpolate import interp1d as ip ## conflicts with sphinx's autodoc
#import numpy as np
from nifty.nifty_core import *
from nifty_core import *
import smoothing as gs
......@@ -291,7 +291,7 @@ def _calc_inverse(tk,var,kindex,rho,b1,Amem): ## > computes the inverse Hessian
## inversion
return np.linalg.inv(T2+np.diag(b2,k=0)),b2,Amem
def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None,rho=None,q=1E-42,alpha=1,perception=(1,0),smoothness=True,var=10,bare=True,**kwargs):
def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None,rho=None,q=1E-42,alpha=1,perception=(1,0),smoothness=True,var=10,force=False,bare=True,**kwargs):
"""
Infers the power spectrum.
......@@ -338,6 +338,9 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
(default: True).
var : {scalar, list, array}, *optional*
Variance of the assumed spectral smoothness prior (default: 10).
force : bool, *optional*, *experimental*
Indicates whether smoothness is to be enforces or not
(default: False).
bare : bool, *optional*
Indicates whether the power spectrum entries returned are "bare"
or not (mandatory for the correct incorporation of volume weights)
......@@ -401,7 +404,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
derived, and the implications of a certain choise of the perception
tuple (delta,epsilon) are discussed.
The further incorporation of a smoothness prior as detailed in [#]_,
where the underlying formula(s), Eq.(27), of this implementation are
where the underlying formula(s), Eq.(26), of this implementation are
derived and discussed in terms of their applicability.
References
......@@ -505,6 +508,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
numerator = weight_power(domain,numerator,power=-1,pindex=pindex,pundex=pundex) ## bare(!)
## smoothness prior
permill = 0
divergence = 1
while(divergence):
pk = numerator/denominator1 ## bare(!)
......@@ -524,7 +528,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
absdelta = np.abs(delta).max()
tk += min(1,0.1/absdelta)*delta # adaptive step width
pk *= np.exp(min(1,0.1/absdelta)*delta) # adaptive step width
var_ /= 1.1 # lowering the variance when converged
var_ /= 1.1+permill # lowering the variance when converged
if(var_<var):
if(breakinfo): # making sure there's one iteration with the correct variance
break
......@@ -538,6 +542,14 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
break
else:
divergence += 1
if(force):
permill = 0.001
elif(force)and(var_/var_OLD>1.001):
permill = 0
pot = int(np.log10(var_))
var = int(1+var_*10**-pot)*10**pot
about.warnings.cprint("WARNING: smoothness variance increased ( var = "+str(var)+" ).")
break
else:
var_OLD = var_
if(breakinfo):
......
This diff is collapsed.
......@@ -65,7 +65,7 @@ def draw_vector_nd(axes,dgrid,ps,symtype=0,fourier=False,zerocentered=False,kpac
"""
if(kpack is None):
kdict = np.fft.fftshift(nkdict(axes,dgrid,fourier))
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier))
klength = nklength(kdict)
else:
kdict = kpack[1][np.fft.ifftshift(kpack[0],axes=shiftaxes(zerocentered,st_to_zero_mode=False))]
......@@ -164,7 +164,7 @@ def draw_vector_nd(axes,dgrid,ps,symtype=0,fourier=False,zerocentered=False,kpac
# foufield = field
# fieldabs = np.abs(foufield)**2
#
# kdict = nkdict(axes,dgrid,fourier)
# kdict = nkdict_fast(axes,dgrid,fourier)
# klength = nklength(kdict)
#
# ## power spectrum
......@@ -228,7 +228,7 @@ def calc_ps_fast(field,axes,dgrid,zerocentered=False,fourier=False,pindex=None,k
if(rho is None):
if(pindex is None):
## kdict
kdict = nkdict(axes,dgrid,fourier)
kdict = nkdict_fast(axes,dgrid,fourier)
## klength
if(kindex is None):
klength = nklength(kdict)
......@@ -253,7 +253,7 @@ def calc_ps_fast(field,axes,dgrid,zerocentered=False,fourier=False,pindex=None,k
rho[pindex[ii]] += 1
elif(pindex is None):
## kdict
kdict = nkdict(axes,dgrid,fourier)
kdict = nkdict_fast(axes,dgrid,fourier)
## klength
if(kindex is None):
klength = nklength(kdict)
......@@ -317,9 +317,9 @@ def get_power_index(axes,dgrid,zerocentered,irred=False,fourier=True):
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict(axes,dgrid,fourier)
kdict = nkdict_fast(axes,dgrid,fourier)
klength = nklength(kdict)
## output
if(irred):
......@@ -372,9 +372,9 @@ def get_power_indices(axes,dgrid,zerocentered,fourier=True):
## kdict, klength
if(np.any(zerocentered==False)):
kdict = np.fft.fftshift(nkdict(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
kdict = np.fft.fftshift(nkdict_fast(axes,dgrid,fourier),axes=shiftaxes(zerocentered,st_to_zero_mode=True))
else:
kdict = nkdict(axes,dgrid,fourier)
kdict = nkdict_fast(axes,dgrid,fourier)
klength = nklength(kdict)
## output
ind = np.empty(axes,dtype=np.int)
......@@ -587,13 +587,11 @@ def shiftaxes(zerocentered,st_to_zero_mode=False):
def nkdict(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the Fourier grid.
"""
if(fourier):
dk = dgrid
else:
......@@ -605,6 +603,25 @@ def nkdict(axes,dgrid,fourier=True):
return kdict
def nkdict_fast(axes,dgrid,fourier=True):
"""
Calculates an n-dimensional array with its entries being the lengths of
the k-vectors from the zero point of the Fourier grid.
"""
if(fourier):
dk = dgrid
else:
dk = np.array([1/dgrid[i]/axes[i] for i in range(len(axes))])
temp_vecs = np.array(np.where(np.ones(axes)),dtype='float').reshape(np.append(len(axes),axes))
temp_vecs = np.rollaxis(temp_vecs,0,len(temp_vecs.shape))
temp_vecs -= axes//2
temp_vecs *= dk
temp_vecs *= temp_vecs
return np.sqrt(np.sum((temp_vecs),axis=-1))
def nklength(kdict):
return np.sort(list(set(kdict.flatten())))
......
......@@ -23,7 +23,7 @@ from distutils.core import setup
import os
setup(name="nifty",
version="0.4.2",
version="0.6.0",
description="Numerical Information Field Theory",
author="Marco Selig",
author_email="mselig@mpa-garching.mpg.de",
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment