Commit b2d496b2 authored by Ultima's avatar Ultima

Started implementing operator tests.

Removed nrow, ncol, ndim from operator class.
Made a tomography demo.
Fixed hashing of spaces and fft cache.
Fixed two bugs in the los_response line_integrator.
parent e5f8538c
......@@ -32,6 +32,13 @@
"""
from __future__ import division
import matplotlib as mpl
mpl.use('Agg')
import imp
#nifty = imp.load_module('nifty', None,
# '/home/steininger/Downloads/nifty', ('','',5))
from nifty import *
......@@ -39,7 +46,7 @@ from nifty import *
class problem(object):
def __init__(self, x_space, s2n=2, **kwargs):
def __init__(self, x_space, s2n=0.5, **kwargs):
"""
Sets up a Wiener filter problem.
......@@ -51,14 +58,16 @@ class problem(object):
Signal-to-noise ratio (default: 2).
"""
self.store = []
## set signal space
self.z = x_space
## set conjugate space
self.k = self.z.get_codomain()
#self.k.power_indices.set_default()
#self.k.set_power_indices(**kwargs)
## set some power spectrum
self.power = (lambda k: 42 / (k + 1) ** 3)
self.power = (lambda k: 42 / (k + 1) ** 5)
## define signal covariance
self.S = power_operator(self.k, spec=self.power, bare=True)
......@@ -151,7 +160,10 @@ class problem(object):
## pre-compute denominator
denominator = self.k.power_indices["rho"] + 2 * (alpha - 1 + abs(epsilon))
self.save_signal_and_data()
## iterate
i = 0
iterating = True
while(iterating):
......@@ -159,7 +171,7 @@ class problem(object):
self.m = self.D(self.j, W=self.S, tol=1E-3, note=True)
if(self.m is None):
break
print 'Reconstructed m'
#print'Reconstructed m'
## reconstruct power spectrum
tr_B1 = self.Sk.pseudo_tr(self.m) ## == Sk(m).pseudo_dot(m)
print 'Calculated trace B1'
......@@ -173,17 +185,49 @@ class problem(object):
print ('denominator', denominator)
print ('power', power)
print 'Calculated power'
power = np.clip(power, 0.1, np.max(power))
power = np.clip(power, 0.00000001, np.max(power))
self.store += [{'tr_B1': tr_B1,
'tr_B2': tr_B2,
'num': numerator,
'denom': denominator}]
## check convergence
dtau = log(power / self.S.get_power(), base=self.S.get_power())
print ('dtau', np.max(np.abs(dtau)))
iterating = (np.max(np.abs(dtau)) > 2E-2)
print max(np.abs(dtau))
#printmax(np.abs(dtau))
self.save_map(i)
i += 1
## update signal covariance
self.S.set_power(power, bare=False) ## auto-updates D
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def save_signal_and_data(self):
self.s.plot(title="signal", save="img/signal.png")
try:
d_ = field(self.z, val=self.d.val, target=self.k)
d_.plot(title="data", vmin=self.s.min(), vmax=self.s.max(),
save="img/data.png")
except:
pass
def save_map(self, index=0):
# save map
if(self.m is None):
pass
else:
self.m.plot(title="reconstructed map",
vmin=self.s.min(), vmax=self.s.max(),
save="img/map_"+str(index)+".png")
self.m.plot(power=True, mono=False,
other=(self.power, self.S.get_power()),
nbin=None, binbounds=None, log=False,
save='img/map_power_'+str(index)+".png")
def plot(self):
"""
Produces plots.
......@@ -209,7 +253,7 @@ class problem(object):
##-----------------------------------------------------------------------------
#
if(__name__=="__main__"):
x = rg_space((128,))
x = rg_space((128,128), zerocenter=True)
p = problem(x, log = False)
about.warnings.off()
## pl.close("all")
......
# -*- coding: utf-8 -*-
from nifty import *
about.warnings.off()
shape = (256, 256)
x_space = rg_space(shape)
k_space = x_space.get_codomain()
power = lambda k: 42/((1+k*shape[0])**2)
S = power_operator(k_space, codomain=x_space, spec=power)
s = S.get_random_field(domain=x_space)
def make_los(n=10, angle=0, d=1):
starts_list = []
ends_list = []
for i in xrange(n):
starts_list += [[(-0.2)*d, (-0.2 + 1.2*i/n)*d]]
ends_list += [[(1.2)*d, (-0.2 + 1.2*i/n)*d]]
starts_list = np.array(starts_list)
ends_list = np.array(ends_list)
rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
starts_list = rot_matrix.dot(starts_list.T-0.5*d).T+0.5*d
ends_list = rot_matrix.dot(ends_list.T-0.5*d).T+0.5*d
return (starts_list, ends_list)
temp_coords = (np.empty((0,2)), np.empty((0,2)))
n = 256
m = 256
for alpha in [np.pi/n*j for j in xrange(n)]:
temp = make_los(n=m, angle=alpha)
temp_coords = np.concatenate([temp_coords, temp], axis=1)
starts = list(temp_coords[0].T)
ends = list(temp_coords[1].T)
#n_points = 360.
#starts = [[(np.cos(i/n_points*np.pi)+1)*shape[0]/2.,
# (np.sin(i/n_points*np.pi)+1)*shape[0]/2.] for i in xrange(int(n_points))]
#starts = list(np.array(starts).T)
#
#ends = [[(np.cos(i/n_points*np.pi + np.pi)+1)*shape[0]/2.,
# (np.sin(i/n_points*np.pi + np.pi)+1)*shape[0]/2.] for i in xrange(int(n_points))]
#ends = list(np.array(ends).T)
R = los_response(x_space, starts=starts, ends=ends, sigmas_up=0.1, sigmas_low=0.1)
d_space = R.target
N = diagonal_operator(d_space, diag=s.var(), bare=True)
n = N.get_random_field(domain=d_space)
d = R(s) + n
j = R.adjoint_times(N.inverse_times(d))
D = propagator_operator(S=S, N=N, R=R)
m = D(j, W=S, tol=1E-14, limii=50, note=True)
s.plot(title="signal", save='1_plot_s.png')
s.plot(save='plot_s_power.png', power=True, other=power)
j.plot(save='plot_j.png')
#d_ = field(x_space, val=d.val, target=k_space)
#d_.plot(title="data", vmin=s.min(), vmax=s.max(), save='plot_d.png')
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save='1_plot_m.png')
m.plot(title="reconstructed map", save='2_plot_m.png')
m.plot(save='plot_m_power.png', power=True, other=power)
\ No newline at end of file
......@@ -35,6 +35,7 @@ from __future__ import division
import matplotlib as mpl
mpl.use('Agg')
import gc
import imp
#nifty = imp.load_module('nifty', None,
# '/home/steininger/Downloads/nifty', ('','',5))
......@@ -43,7 +44,7 @@ from nifty import * # version 0.8.0
about.warnings.off()
# some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([4280, 1280]) # define signal space
x_space = rg_space([128, 128]) # define signal space
#y_space = point_space(1280*1280)
#x_space = hp_space(32)
......@@ -53,12 +54,15 @@ x_space = rg_space([4280, 1280]) # define sign
k_space = x_space.get_codomain() # get conjugate space
# some power spectrum
power = (lambda k: 42 / (k + 1) ** 4)
power = (lambda k: 42 / (k + 1) ** 3)
S = power_operator(k_space, codomain=x_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal
#my_mask = x_space.cast(1)
#my_mask[400:900,400:900] = 0
my_mask = 1
R = response_operator(x_space, sigma=0.0, mask=1.0, assign=None) # define response
R = response_operator(x_space, sigma=0.01, mask=my_mask, assign=None) # define response
d_space = R.target # get data space
# some noise variance; e.g., signal-to-noise ratio of 1
......@@ -71,10 +75,18 @@ d = R(s) + n # compute data
j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator
m = D(j, W=S, tol=1E-8, limii=10, note=True) # reconstruct map
#m = D(j, W=S, tol=1E-8, limii=100, note=True)
#m = D(j, tol=1E-8, limii=20, note=True, force=True)
ident = identity(x_space)
#s.plot(title="signal", save = 'plot_s.png') # plot signal
#d_ = field(x_space, val=d.val, target=k_space)
#d_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_d.png') # plot data
#m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save = 'plot_m.png') # plot map
xi = field(x_space, random='gau', target=k_space)
m = D(xi, W=ident, tol=1E-8, limii=10, note=True, force=True)
temp_result = (D.inverse_times(m)-xi)
print (temp_result.dot(temp_result))
print (temp_result.val)
s.plot(title="signal", save = 'plot_s.png') # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_d.png') # plot data
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save = 'plot_m.png') # plot map
#
......@@ -193,6 +193,14 @@ class lm_space(point_space):
self.paradict['lmax'] = x[0]
self.paradict['mmax'] = x[1]
def __hash__(self):
result_hash = 0
for (key, item) in vars(self).items():
if key in ['power_indices']:
continue
result_hash ^= item.__hash__() * hash(key)
return result_hash
def _identifier(self):
# Extract the identifying parts from the vars(self) dict.
temp = [(ii[0],
......@@ -605,7 +613,7 @@ class lm_space(point_space):
x = self.cast(x)
# check sigma
if sigma == 0:
return x
return self.unary_operation(x, op='copy')
elif sigma == -1:
about.infos.cprint("INFO: invalid sigma reset.")
sigma = np.sqrt(2) * np.pi / (self.paradict['lmax'] + 1)
......@@ -1158,11 +1166,11 @@ class gl_space(point_space):
nlon = self.paradict['nlon']
lmax = nlat - 1
if self.dtype == np.dtype('float32'):
x = gl.synfast_f(arg['syn'],
x = gl.synfast_f(arg['spec'],
nlat=nlat, nlon=nlon,
lmax=lmax, mmax=lmax, alm=False)
else:
x = gl.synfast(arg['syn'],
x = gl.synfast(arg['spec'],
nlat=nlat, nlon=nlon,
lmax=lmax, mmax=lmax, alm=False)
# weight if discrete
......@@ -1296,7 +1304,7 @@ class gl_space(point_space):
x = self.cast(x)
# check sigma
if sigma == 0:
return x
return self.unary_operation(x, op='copy')
elif sigma == -1:
about.infos.cprint("INFO: invalid sigma reset.")
sigma = np.sqrt(2) * np.pi / self.paradict['nlat']
......@@ -1873,7 +1881,7 @@ class hp_space(point_space):
x = self.cast(x)
# check sigma
if sigma == 0:
return x
return self.unary_operation(x, op='copy')
elif sigma == -1:
about.infos.cprint("INFO: invalid sigma reset.")
# sqrt(2)*pi/(lmax+1)
......
......@@ -241,6 +241,9 @@ class space(object):
def para(self, x):
self.paradict['default'] = x
def __hash__(self):
return hash(())
def _identifier(self):
"""
_identiftier returns an object which contains all information needed
......@@ -835,6 +838,13 @@ class point_space(space):
def para(self, x):
self.paradict['num'] = x[0]
def __hash__(self):
# Extract the identifying parts from the vars(self) dict.
result_hash = 0
for (key, item) in vars(self).items():
result_hash ^= item.__hash__() * hash(key)
return result_hash
def _identifier(self):
# Extract the identifying parts from the vars(self) dict.
temp = [(ii[0],
......@@ -2476,7 +2486,7 @@ class field(object):
return return_field
def smooth(self, sigma=0, overwrite=False, **kwargs):
def smooth(self, sigma=0, inplace=False, **kwargs):
"""
Smoothes the field by convolution with a Gaussian kernel.
......@@ -2501,7 +2511,7 @@ class field(object):
Otherwise, nothing is returned.
"""
if overwrite:
if inplace:
new_field = self
else:
new_field = self.copy_empty()
......
......@@ -37,6 +37,16 @@ class space_paradict(object):
def __getitem__(self, key):
return self.parameters.__getitem__(key)
def __hash__(self):
result_hash = 0
for (key, item) in self.parameters.items():
try:
temp_hash = hash(item)
except TypeError:
temp_hash = hash(tuple(item))
result_hash ^= temp_hash * hash(key)
return result_hash
class point_space_paradict(space_paradict):
......
......@@ -104,9 +104,10 @@ class power_indices(object):
if temp_config_dict is not None:
return self._cast_config_helper(**temp_config_dict)
else:
temp_log = kwargs.get("log", None)
temp_nbin = kwargs.get("nbin", None)
temp_binbounds = kwargs.get("binbounds", None)
defaults = self.default_parameters
temp_log = kwargs.get("log", defaults['log'])
temp_nbin = kwargs.get("nbin", defaults['nbin'])
temp_binbounds = kwargs.get("binbounds", defaults['binbounds'])
return self._cast_config_helper(log=temp_log,
nbin=temp_nbin,
......
......@@ -135,9 +135,9 @@ class random(object):
pindex = kwargs.get('pindex', None)
kindex = kwargs.get('kindex', None)
size = kwargs.get('size', None)
log = kwargs.get('log', False)
nbin = kwargs.get('nbin', None)
binbounds = kwargs.get('binbounds', None)
log = kwargs.get('log', 'default')
nbin = kwargs.get('nbin', 'default')
binbounds = kwargs.get('binbounds', 'default')
spec = kwargs.get('spec', 1)
codomain = kwargs.get('codomain', None)
......
......@@ -27,7 +27,9 @@ from nifty_operators import operator,\
vecvec_operator,\
response_operator,\
invertible_operator,\
propagator_operator
propagator_operator,\
identity,\
identity_operator
from nifty_probing import prober,\
......@@ -36,5 +38,7 @@ from nifty_probing import prober,\
diagonal_prober,\
inverse_diagonal_prober
from nifty_los import los_response
from nifty_minimization import conjugate_gradient,\
steepest_descent
\ No newline at end of file
......@@ -182,8 +182,8 @@ cdef class line_integrator(object):
cdef FLOAT_t weight
# estimate the maximum number of cells that could be hit
# the current estimator is: norm of the vector times number of dims
num_estimate = INT(ceil(list_norm(list_sub(end, start))))*len(start)
# the current estimator is: norm of the vector times number of dims + 1
num_estimate = INT(ceil(list_norm(list_sub(end, start))))*len(start)+1
cdef np.ndarray[INT_t, ndim=2] index_list = np.empty((num_estimate,
len(start)),
......@@ -203,12 +203,18 @@ cdef class line_integrator(object):
index_list[i, j] = floor_current_position[j]
else:
index_list[i, j] = floor_next_position[j]
weight_list[i] = weight
if next_position == end:
break
current_position = next_position
i += 1
for j in xrange(len(start)):
if index_list[i, j] == self.shape[j]:
return (list(index_list[:i].T), weight_list[:i])
return (list(index_list[:i+1].T), weight_list[:i+1])
......@@ -279,7 +285,6 @@ cpdef list multi_integrator(list starts,
end[j] = ends[j][i]
sigma_low[j] = sigmas_low[j][i]
sigma_up[j] = sigmas_up[j][i]
indices_and_weights_list += single_integrator(i,
start,
end,
......
......@@ -13,9 +13,9 @@ from nifty.nifty_core import point_space,\
field
from nifty.rg import rg_space
from nifty.operators import operator
MPI = gdi['MPI']
class los_response(operator):
def __init__(self, domain, starts, ends, sigmas_low=None, sigmas_up=None,
......@@ -155,7 +155,7 @@ class los_response(operator):
"numpy ndarray."))
return parsed_sig
def convert_physical_to_indices(self, physical_positions):
def _convert_physical_to_indices(self, physical_positions):
pixel_coordinates = [None]*len(physical_positions)
local_zero_point = self._get_local_zero_point()
......@@ -168,12 +168,12 @@ class los_response(operator):
return pixel_coordinates
def _convert_physical_to_pixel_lengths(self, lengths, starts, ends):
directions = np.array(ends) - np.array(starts)
distances = np.array(self.domain.distances)[:, None]
rescalers = (np.linalg.norm(directions / distances, axis=0) /
np.linalg.norm(directions, axis=0))
return lengths * rescalers
# def _convert_physical_to_pixel_lengths(self, lengths, starts, ends):
# directions = np.array(ends) - np.array(starts)
# distances = np.array(self.domain.distances)[:, None]
# rescalers = (np.linalg.norm(directions / distances, axis=0) /
# np.linalg.norm(directions, axis=0))
# return lengths * rescalers
def _convert_sigmas_to_physical_coordinates(self, starts, ends,
sigmas_low, sigmas_up):
......@@ -222,8 +222,8 @@ class los_response(operator):
def _compute_weights_and_indices(self):
# compute the local pixel coordinates for the starts and ends
localized_pixel_starts = self.convert_physical_to_indices(self.starts)
localized_pixel_ends = self.convert_physical_to_indices(self.ends)
localized_pixel_starts = self._convert_physical_to_indices(self.starts)
localized_pixel_ends = self._convert_physical_to_indices(self.ends)
# Convert the sigmas from physical distances to pixel coordinates
# Therefore transform the distances to physical coordinates...
......@@ -233,9 +233,9 @@ class los_response(operator):
self.sigmas_low,
self.sigmas_up)
# ...and then transform them to pixel coordinates
localized_pixel_sigmas_low = self.convert_physical_to_indices(
localized_pixel_sigmas_low = self._convert_physical_to_indices(
sigmas_low_coords)
localized_pixel_sigmas_up = self.convert_physical_to_indices(
localized_pixel_sigmas_up = self._convert_physical_to_indices(
sigmas_up_coords)
# get the shape of the local data slice
......
......@@ -31,7 +31,7 @@ from nifty_probing import trace_prober,\
diagonal_prober,\
inverse_diagonal_prober
import nifty.nifty_utilities as utilities
import nifty.nifty_simple_math
import nifty.nifty_simple_math as nifty_simple_math
# =============================================================================
......@@ -108,8 +108,7 @@ class operator(object):
"""
def __init__(self, domain, codomain=None, sym=False, uni=False,
imp=False, target=None, cotarget=None, bare=False,
para=None):
imp=False, target=None, cotarget=None, bare=False):
"""
Sets the attributes for an operator class instance.
......@@ -184,8 +183,6 @@ class operator(object):
else:
self.imp = bool(imp)
self.para = para
def set_val(self, new_val):
"""
Resets the field values.
......@@ -196,77 +193,12 @@ class operator(object):
New field values either as a constant or an arbitrary array.
"""
self.val = self.domain.cast(new_val)
self.val = new_val
return self.val
def get_val(self):
return self.val
def nrow(self):
"""
Computes the number of rows.
Returns
-------
nrow : int
number of rows (equal to the dimension of the codomain)
"""
return self.target.get_dim(split=False)
def ncol(self):
"""
Computes the number of columns
Returns
-------
ncol : int
number of columns (equal to the dimension of the domain)
"""
return self.domain.get_dim(split=False)
def dim(self, axis=None):
"""
Computes the dimension of the space
Parameters
----------
axis : int, *optional*
Axis along which the dimension is to be calculated.
(default: None)
Returns
-------
dim : {int, ndarray}
The dimension(s) of the operator.
"""
if axis is None:
return np.array([self.nrow(), self.ncol()])
elif axis == 0:
return self.nrow()
elif axis == 1:
return self.ncol()
else:
raise ValueError(about._errors.cstring(
"ERROR: invalid input axis."))
def set_para(self, newpara):
"""
Sets the parameters and creates the `para` property if it does
not exist
Parameters
----------
newpara : {object, list of objects}
A single parameter or a list of parameters.
Returns
-------
None
"""
self.para = newpara
def _multiply(self, x, **kwargs):