Commit 99af81a0 authored by ultimanet's avatar ultimanet
Browse files

Started a clean documentation of the d2o package.

Wrapped the demos code into if __name__ == "__main__" statements in order to make autodoc possible.
parent 61ca188f
......@@ -2,71 +2,75 @@
from nifty import *
about.warnings.off()
shape = (256, 256)
x_space = rg_space(shape)
k_space = x_space.get_codomain()
power = lambda k: 42/((1+k*shape[0])**3)
S = power_operator(k_space, codomain=x_space, spec=power)
s = S.get_random_field(domain=x_space)
#n_points = 360.
#starts = [[(np.cos(i/n_points*np.pi)+1)*shape[0]/2.,
# (np.sin(i/n_points*np.pi)+1)*shape[0]/2.] for i in xrange(int(n_points))]
#starts = list(np.array(starts).T)
#
#ends = [[(np.cos(i/n_points*np.pi + np.pi)+1)*shape[0]/2.,
# (np.sin(i/n_points*np.pi + np.pi)+1)*shape[0]/2.] for i in xrange(int(n_points))]
#ends = list(np.array(ends).T)
def make_los(n=10, angle=0, d=1):
starts_list = []
ends_list = []
for i in xrange(n):
starts_list += [[(-0.2)*d, (-0.2 + 1.2*i/n)*d]]
ends_list += [[(1.2)*d, (-0.2 + 1.2*i/n)*d]]
starts_list = np.array(starts_list)
ends_list = np.array(ends_list)
rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
starts_list = rot_matrix.dot(starts_list.T-0.5*d).T+0.5*d
ends_list = rot_matrix.dot(ends_list.T-0.5*d).T+0.5*d
return (starts_list, ends_list)
temp_coords = (np.empty((0, 2)), np.empty((0, 2)))
n = 250
m = 250
for alpha in [np.pi/n*j for j in xrange(n)]:
temp = make_los(n=m, angle=alpha)
temp_coords = np.concatenate([temp_coords, temp], axis=1)
starts = list(temp_coords[0].T)
ends = list(temp_coords[1].T)
R = los_response(x_space, starts=starts, ends=ends,
sigmas_up=0.1, sigmas_low=0.1)
d_space = R.target
N = diagonal_operator(d_space, diag=s.var()/100000, bare=True)
n = N.get_random_field(domain=d_space)
d = R(s) + n
j = R.adjoint_times(N.inverse_times(d))
D = propagator_operator(S=S, N=N, R=R)
m = D(j, W=S, tol=1E-14, limii=100, note=True)
s.plot(title="signal", save='1_plot_s.png')
s.plot(save='plot_s_power.png', power=True, other=power)
j.plot(save='plot_j.png')
#d_ = field(x_space, val=d.val, target=k_space)
#d_.plot(title="data", vmin=s.min(), vmax=s.max(), save='plot_d.png')
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save='1_plot_m.png')
m.plot(title="reconstructed map", save='2_plot_m.png')
m.plot(save='plot_m_power.png', power=True, other=power)
\ No newline at end of file
if __name__ == "__main__":
about.warnings.off()
shape = (256, 256)
x_space = rg_space(shape)
k_space = x_space.get_codomain()
power = lambda k: 42/((1+k*shape[0])**3)
S = power_operator(k_space, codomain=x_space, spec=power)
s = S.get_random_field(domain=x_space)
#n_points = 360.
#starts = [[(np.cos(i/n_points*np.pi)+1)*shape[0]/2.,
# (np.sin(i/n_points*np.pi)+1)*shape[0]/2.] for i in xrange(int(n_points))]
#starts = list(np.array(starts).T)
#
#ends = [[(np.cos(i/n_points*np.pi + np.pi)+1)*shape[0]/2.,
# (np.sin(i/n_points*np.pi + np.pi)+1)*shape[0]/2.] for i in xrange(int(n_points))]
#ends = list(np.array(ends).T)
def make_los(n=10, angle=0, d=1):
starts_list = []
ends_list = []
for i in xrange(n):
starts_list += [[(-0.2)*d, (-0.2 + 1.2*i/n)*d]]
ends_list += [[(1.2)*d, (-0.2 + 1.2*i/n)*d]]
starts_list = np.array(starts_list)
ends_list = np.array(ends_list)
rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
starts_list = rot_matrix.dot(starts_list.T-0.5*d).T+0.5*d
ends_list = rot_matrix.dot(ends_list.T-0.5*d).T+0.5*d
return (starts_list, ends_list)
temp_coords = (np.empty((0, 2)), np.empty((0, 2)))
n = 250
m = 250
for alpha in [np.pi/n*j for j in xrange(n)]:
temp = make_los(n=m, angle=alpha)
temp_coords = np.concatenate([temp_coords, temp], axis=1)
starts = list(temp_coords[0].T)
ends = list(temp_coords[1].T)
R = los_response(x_space, starts=starts, ends=ends,
sigmas_up=0.1, sigmas_low=0.1)
d_space = R.target
N = diagonal_operator(d_space, diag=s.var()/100000, bare=True)
n = N.get_random_field(domain=d_space)
d = R(s) + n
j = R.adjoint_times(N.inverse_times(d))
D = propagator_operator(S=S, N=N, R=R)
m = D(j, W=S, tol=1E-14, limii=100, note=True)
s.plot(title="signal", save='1_plot_s.png')
s.plot(save='plot_s_power.png', power=True, other=power)
j.plot(save='plot_j.png')
#d_ = field(x_space, val=d.val, target=k_space)
#d_.plot(title="data", vmin=s.min(), vmax=s.max(), save='plot_d.png')
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save='1_plot_m.png')
m.plot(title="reconstructed map", save='2_plot_m.png')
m.plot(save='plot_m_power.png', power=True, other=power)
\ No newline at end of file
......@@ -41,75 +41,77 @@ nifty = imp.load_module('nifty', None,
'/home/steininger/Downloads/nifty', ('','',5))
from nifty import * # version 0.8.0
about.warnings.off()
# some signal space; e.g., a two-dimensional regular grid
shape = [1024]
x_space = rg_space(shape)
#y_space = point_space(1280*1280)
#x_space = hp_space(32)
#x_space = gl_space(800)
if __name__ == "__main__":
about.warnings.off()
k_space = x_space.get_codomain() # get conjugate space
# some signal space; e.g., a two-dimensional regular grid
shape = [1024]
x_space = rg_space(shape)
#y_space = point_space(1280*1280)
#x_space = hp_space(32)
#x_space = gl_space(800)
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 4)
k_space = x_space.get_codomain() # get conjugate space
S = power_operator(k_space, codomain=x_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
#my_mask = x_space.cast(1)
#stretch = 0.6
#my_mask[shape[0]/2*stretch:shape[0]/2/stretch, shape[1]/2*stretch:shape[1]/2/stretch] = 0
my_mask = 1
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 4)
R = response_operator(x_space, sigma=0.01, mask=my_mask, assign=None) # define response
R = response_operator(x_space, assign=None) #
#R = identity_operator(x_space)
S = power_operator(k_space, codomain=x_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
#my_mask = x_space.cast(1)
#stretch = 0.6
#my_mask[shape[0]/2*stretch:shape[0]/2/stretch, shape[1]/2*stretch:shape[1]/2/stretch] = 0
my_mask = 1
d_space = R.target # get data space
R = response_operator(x_space, sigma=0.01, mask=my_mask, assign=None) # define response
R = response_operator(x_space, assign=None) #
#R = identity_operator(x_space)
# some noise variance; e.g., signal-to-noise ratio of 1
N = diagonal_operator(d_space, diag=s.var(), bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
d_space = R.target # get data space
# some noise variance; e.g., signal-to-noise ratio of 1
N = diagonal_operator(d_space, diag=s.var(), bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
d = R(s) + n # compute data
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
d = R(s) + n # compute data
#m = D(j, W=S, tol=1E-8, limii=100, note=True)
#m = D(j, tol=1E-8, limii=20, note=True, force=True)
ident = identity(x_space)
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
#xi = field(x_space, random='gau', target=k_space)
#m = D(j, W=S, tol=1E-8, limii=100, note=True)
#m = D(j, tol=1E-8, limii=20, note=True, force=True)
ident = identity(x_space)
#xi = field(x_space, random='gau', target=k_space)
m = D(j, W=S, tol=1E-8, limii=100, note=True)
m = D(j, W=S, tol=1E-8, limii=100, note=True)
#temp_result = (D.inverse_times(m)-xi)
#temp_result = (D.inverse_times(m)-xi)
n_power = x_space.enforce_power(s.var()/np.prod(shape))
s_power = S.get_power()
s.plot(title="signal", save = 'plot_s.png')
s.plot(title="signal power", power=True, other=power,
mono=False, save = 'power_plot_s.png', nbin=1000, log=True,
vmax = 100, vmin=10e-7)
n_power = x_space.enforce_power(s.var()/np.prod(shape))
s_power = S.get_power()
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_d.png')
s.plot(title="signal", save = 'plot_s.png')
s.plot(title="signal power", power=True, other=power,
mono=False, save = 'power_plot_s.png', nbin=1000, log=True,
vmax = 100, vmin=10e-7)
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_d.png')
n_ = field(x_space, val=n.val, target=k_space)
n_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_n.png')
n_ = field(x_space, val=n.val, target=k_space)
n_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_n.png')
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save = 'plot_m.png')
m.plot(title="reconstructed power", power=True, other=(n_power, s_power),
save = 'power_plot_m.png', vmin=0.001, vmax=10, mono=False)
#
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save = 'plot_m.png')
m.plot(title="reconstructed power", power=True, other=(n_power, s_power),
save = 'power_plot_m.png', vmin=0.001, vmax=10, mono=False)
#
......@@ -52,65 +52,66 @@ from __future__ import division
from nifty import * # version 0.8.0
from nifty.operators.nifty_minimization import steepest_descent_new
if __name__ == "__main__":
# some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([256, 256]) # define signal space
# some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([256, 256]) # define signal space
k_space = x_space.get_codomain() # get conjugate space
k_space = x_space.get_codomain() # get conjugate space
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 3)
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 3)
S = power_operator(k_space, codomain=x_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space, codomain=k_space) # generate signal
S = power_operator(k_space, codomain=x_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space, codomain=k_space) # generate signal
R = response_operator(x_space, codomain=k_space, sigma=0.0, mask=1.0, assign=None) # define response
d_space = R.target # get data space
R = response_operator(x_space, codomain=k_space, sigma=0.0, mask=1.0, assign=None) # define response
d_space = R.target # get data space
# some noise variance; e.g., signal-to-noise ratio of 1
N = diagonal_operator(d_space, diag=s.var(), bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
# some noise variance; e.g., signal-to-noise ratio of 1
N = diagonal_operator(d_space, diag=s.var(), bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
d = R(s) + n # compute data
d = R(s) + n # compute data
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
def energy(x):
DIx = D.inverse_times(x)
H = 0.5 * DIx.dot(x) - j.dot(x)
return H
def energy(x):
DIx = D.inverse_times(x)
H = 0.5 * DIx.dot(x) - j.dot(x)
return H
def gradient(x):
DIx = D.inverse_times(x)
g = DIx - j
return g
def gradient(x):
DIx = D.inverse_times(x)
g = DIx - j
return g
def eggs(x):
"""
Calculation of the information Hamiltonian and its gradient.
def eggs(x):
"""
Calculation of the information Hamiltonian and its gradient.
"""
# DIx = D.inverse_times(x)
# H = 0.5 * DIx.dot(x) - j.dot(x) # compute information Hamiltonian
# g = DIx - j # compute its gradient
# return H, g
return energy(x), gradient(x)
"""
# DIx = D.inverse_times(x)
# H = 0.5 * DIx.dot(x) - j.dot(x) # compute information Hamiltonian
# g = DIx - j # compute its gradient
# return H, g
return energy(x), gradient(x)
m = field(x_space, codomain=k_space) # reconstruct map
m = field(x_space, codomain=k_space) # reconstruct map
#with PyCallGraph(output=graphviz, config=config):
m, convergence = steepest_descent(eggs=eggs, note=True)(m, tol=1E-3, clevel=3)
#with PyCallGraph(output=graphviz, config=config):
m, convergence = steepest_descent(eggs=eggs, note=True)(m, tol=1E-3, clevel=3)
m = field(x_space, codomain=k_space)
m, convergence = steepest_descent_new(energy, gradient, note=True)(m, tol=1E-3, clevel=3)
#s.plot(title="signal") # plot signal
#d_ = field(x_space, val=d.val, target=k_space)
#d_.plot(title="data", vmin=s.min(), vmax=s.max()) # plot data
#m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max()) # plot map
m = field(x_space, codomain=k_space)
m, convergence = steepest_descent_new(energy, gradient, note=True)(m, tol=1E-3, clevel=3)
#s.plot(title="signal") # plot signal
#d_ = field(x_space, val=d.val, target=k_space)
#d_.plot(title="data", vmin=s.min(), vmax=s.max()) # plot data
#m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max()) # plot map
......@@ -35,49 +35,52 @@ from __future__ import division
from nifty import * # version 0.7.0
# some signal space; e.g., a one-dimensional regular grid
x_space = rg_space([128,]) # define signal space
if __name__ == "__main__":
k_space = x_space.get_codomain() # get conjugate space
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 3)
# some signal space; e.g., a one-dimensional regular grid
x_space = rg_space([128,]) # define signal space
S = power_operator(k_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
s -= s.val.mean()
k_space = x_space.get_codomain() # get conjugate space
R = response_operator(x_space, sigma=0.0, mask=1.0, assign=None) # define response
d_space = R.target # get data space
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 3)
# some noise variance
delta = 1000 * (np.arange(1, x_space.dim() + 1) / x_space.dim()) ** 5
N = diagonal_operator(d_space, diag=delta, bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
S = power_operator(k_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
s -= s.val.mean()
d = R(s) + n # compute data
R = response_operator(x_space, sigma=0.0, mask=1.0, assign=None) # define response
d_space = R.target # get data space
j = R.adjoint_times(N.inverse_times(d)) # define information source
# some noise variance
delta = 1000 * (np.arange(1, x_space.dim() + 1) / x_space.dim()) ** 5
N = diagonal_operator(d_space, diag=delta, bare=True) # define noise covariance
n = N.get_random_field(domain=d_space) # generate noise
d = R(s) + n # compute data
class M_operator(operator):
j = R.adjoint_times(N.inverse_times(d)) # define information source
def _multiply(self, x):
N, R = self.para
return R.adjoint_times(N.inverse_times(R.times(x)))
class M_operator(operator):
C = explicify(S, newdomain=x_space, newtarget=x_space) # explicify S
M = M_operator(x_space, sym=True, uni=False, imp=True, para=(N, R))
M = explicify(M) # explicify M
D = (C.inverse() + M).inverse() # define information propagator
def _multiply(self, x):
N, R = self.para
return R.adjoint_times(N.inverse_times(R.times(x)))
m = D(j) # reconstruct map
vminmax = {"vmin":1.5 * s.val.min(), "vmax":1.5 * s.val.max()}
s.plot(title="signal", **vminmax) # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", **vminmax) # plot data
m.plot(title="reconstructed map", error=D.diag(bare=True), **vminmax) # plot map
D.plot(title="information propagator", bare=True) # plot information propagator
C = explicify(S, newdomain=x_space, newtarget=x_space) # explicify S
M = M_operator(x_space, sym=True, uni=False, imp=True, para=(N, R))
M = explicify(M) # explicify M
D = (C.inverse() + M).inverse() # define information propagator
m = D(j) # reconstruct map
vminmax = {"vmin":1.5 * s.val.min(), "vmax":1.5 * s.val.max()}
s.plot(title="signal", **vminmax) # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", **vminmax) # plot data
m.plot(title="reconstructed map", error=D.diag(bare=True), **vminmax) # plot map
D.plot(title="information propagator", bare=True) # plot information propagator
......@@ -48,47 +48,129 @@ else:
class distributed_data_object(object):
"""
NIFTY class for distributed data
"""A multidimensional array with modular MPI-based distribution schemes.
The purpose of a distributed_data_object (d2o) is to provide the user
with a numpy.ndarray like interface while storing the data on an arbitrary
number of MPI nodes. The logic of a certain distribution strategy is
implemented by an associated distributor.
Parameters
----------
global_data : array-like, at least 1-dimensional
Used with global-type distribution strategies in order to fill the
d2o with data during initialization.
global_shape : tuple of ints
Used with global-type distribution strategies. If no global_data is
supplied, it will be used.
dtype : {np.dtype, type}
Used as the d2o's datatype. Overwrites the data-type of any init data.
local_data : array-like, at least 1-dimensional
Used with local-type distribution strategies in order to fill the
d2o with data during initialization.
local_shape : tuple of ints
Used with local-type distribution strategies. If no local_data is
supplied, local_shape will be used.
distribution_strategy : {'fftw', 'equal', 'not', 'freeform'}, optional
Specifies which distributor will be created and used.
'fftw' uses the distribution strategy of pyfftw,
'equal' tries to distribute the data as uniform as possible
'not' does not distribute the data at all
'freeform' distribute the data according to the given local data/shape
hermitian : boolean
Specifies if the given init-data is hermitian or not. The
self.hermitian attribute will be set accordingly.
alias : String
Used in order to initialize the d2o from a hdf5 file.
path : String
Used in order to initialize the d2o from a hdf5 file. If no path is
given, '$working_directory/alias' is used.
comm : mpi4py.MPI.Intracomm
The MPI communicator on which the d2o lives.
copy : boolean
If true it is guaranteed that the input data will be copied. If false
copying is tried to be avoided.
*args
Although not directly used during the init process, further parameters
are stored in the self.init_args attribute.
**kwargs
Additional keyword arguments are passed to the distributor_factory and
furthermore get stored in the self.init_kwargs attribute.
skip_parsing : boolean (optional keyword argument)
If true, the distribution_factory will skip all sanity checks and
completions of the given (keyword-)arguments. It just uses what it
gets. Hence the user is fully responsible for supplying complete and
consistent parameters. This can be used in order to speed up the init
process. Also see notes section.
Attributes
----------
data : numpy.ndarray
The numpy.ndarray in which the individual node's data is stored.
dtype : type
Data type of the data object.
distribution_strategy : string
Name of the used distribution_strategy.
distributor : distributor
The distributor object which takes care of all distribution and
consolidation of the data.
shape : tuple of int
The global shape of the data.
local_shape : tuple of int
The nodes individual local shape of the stored data.
comm : mpi4py.MPI.Intracomm
The MPI communicator on which the d2o lives.
hermitian : boolean
Specfies whether the d2o's data definitely possesses hermitian
symmetry.
index : int
The d2o's registration index it got from the d2o_librarian.
init_args : list
Any additional initialization arguments are stored here.
init_kwargs : dict
Any additional initialization keyword arguments are stored here.
Raises
------
ValueError
Raised if
* the supplied distribution strategy is not known,
* comm is None,
* different distribution strategies where given on the
individual nodes,
* different dtypes where given on the individual nodes,
* neither a non-0-dimensional global_data nor global_shape nor
hdf5 file supplied,
* global_shape == (),
* different global_shapes where given on the individual nodes,
* neither non-0-dimensional local_data nor local_shape nor
global d2o supplied,
* local_shape == ()
* the first entry of local_shape is not the same on all nodes,
Notes
-----
The index is the d2o's global unique indentifier. One may use it in order
to assemble the corresponding local d2o objects on different nodes if
only one local object on a specific node is given.
In order to speed up the init process the distributor_factory checks
if the global_configuration object gc yields gc['d2o_init_checks'] == True.
If yes, all checks expensive checks are skipped; namely those which need
mpi communication. Use this in order to get a fast init speed without
loosing d2o's init parsing logic.
Examples
--------
>>> a = np.arange(16, dtype=np.float).reshape((4,4))
>>> obj = distributed_data_object(a, dtype=np.complex)
>>> obj
<distributed_data_object>
array([[ 0.+0.j, 1.+0.j, 2.+0.j, 3.+0.j],
[ 4.+0.j, 5.+0.j, 6.+0.j, 7.+0.j],
[ 8.+0.j, 9.+0.j, 10.+0.j, 11.+0.j],
[ 12.+0.j, 13.+0.j, 14.+0.j, 15.+0.j]])
Parameters
----------
global_data : {tuple, list, numpy.ndarray} *at least 1-dimensional*
Initial data which will be casted to a numpy.ndarray and then
stored according to the distribution strategy. The global_data's
shape overwrites global_shape.
global_shape : tuple of ints, *optional*