Commit b2d496b2 authored by Ultima's avatar Ultima

Started implementing operator tests.

Removed nrow, ncol, ndim from operator class.
Made a tomography demo.
Fixed hashing of spaces and fft cache.
Fixed two bugs in the los_response line_integrator.
parent e5f8538c
...@@ -32,6 +32,13 @@ ...@@ -32,6 +32,13 @@
""" """
from __future__ import division from __future__ import division
import matplotlib as mpl
mpl.use('Agg')
import imp
#nifty = imp.load_module('nifty', None,
# '/home/steininger/Downloads/nifty', ('','',5))
from nifty import * from nifty import *
...@@ -39,7 +46,7 @@ from nifty import * ...@@ -39,7 +46,7 @@ from nifty import *
class problem(object): class problem(object):
def __init__(self, x_space, s2n=2, **kwargs): def __init__(self, x_space, s2n=0.5, **kwargs):
""" """
Sets up a Wiener filter problem. Sets up a Wiener filter problem.
...@@ -51,14 +58,16 @@ class problem(object): ...@@ -51,14 +58,16 @@ class problem(object):
Signal-to-noise ratio (default: 2). Signal-to-noise ratio (default: 2).
""" """
self.store = []
## set signal space ## set signal space
self.z = x_space self.z = x_space
## set conjugate space ## set conjugate space
self.k = self.z.get_codomain() self.k = self.z.get_codomain()
#self.k.power_indices.set_default()
#self.k.set_power_indices(**kwargs) #self.k.set_power_indices(**kwargs)
## set some power spectrum ## set some power spectrum
self.power = (lambda k: 42 / (k + 1) ** 3) self.power = (lambda k: 42 / (k + 1) ** 5)
## define signal covariance ## define signal covariance
self.S = power_operator(self.k, spec=self.power, bare=True) self.S = power_operator(self.k, spec=self.power, bare=True)
...@@ -151,7 +160,10 @@ class problem(object): ...@@ -151,7 +160,10 @@ class problem(object):
## pre-compute denominator ## pre-compute denominator
denominator = self.k.power_indices["rho"] + 2 * (alpha - 1 + abs(epsilon)) denominator = self.k.power_indices["rho"] + 2 * (alpha - 1 + abs(epsilon))
self.save_signal_and_data()
## iterate ## iterate
i = 0
iterating = True iterating = True
while(iterating): while(iterating):
...@@ -159,7 +171,7 @@ class problem(object): ...@@ -159,7 +171,7 @@ class problem(object):
self.m = self.D(self.j, W=self.S, tol=1E-3, note=True) self.m = self.D(self.j, W=self.S, tol=1E-3, note=True)
if(self.m is None): if(self.m is None):
break break
print 'Reconstructed m' #print'Reconstructed m'
## reconstruct power spectrum ## reconstruct power spectrum
tr_B1 = self.Sk.pseudo_tr(self.m) ## == Sk(m).pseudo_dot(m) tr_B1 = self.Sk.pseudo_tr(self.m) ## == Sk(m).pseudo_dot(m)
print 'Calculated trace B1' print 'Calculated trace B1'
...@@ -173,17 +185,49 @@ class problem(object): ...@@ -173,17 +185,49 @@ class problem(object):
print ('denominator', denominator) print ('denominator', denominator)
print ('power', power) print ('power', power)
print 'Calculated power' print 'Calculated power'
power = np.clip(power, 0.1, np.max(power))
power = np.clip(power, 0.00000001, np.max(power))
self.store += [{'tr_B1': tr_B1,
'tr_B2': tr_B2,
'num': numerator,
'denom': denominator}]
## check convergence ## check convergence
dtau = log(power / self.S.get_power(), base=self.S.get_power()) dtau = log(power / self.S.get_power(), base=self.S.get_power())
print ('dtau', np.max(np.abs(dtau)))
iterating = (np.max(np.abs(dtau)) > 2E-2) iterating = (np.max(np.abs(dtau)) > 2E-2)
print max(np.abs(dtau)) #printmax(np.abs(dtau))
self.save_map(i)
i += 1
## update signal covariance ## update signal covariance
self.S.set_power(power, bare=False) ## auto-updates D self.S.set_power(power, bare=False) ## auto-updates D
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def save_signal_and_data(self):
self.s.plot(title="signal", save="img/signal.png")
try:
d_ = field(self.z, val=self.d.val, target=self.k)
d_.plot(title="data", vmin=self.s.min(), vmax=self.s.max(),
save="img/data.png")
except:
pass
def save_map(self, index=0):
# save map
if(self.m is None):
pass
else:
self.m.plot(title="reconstructed map",
vmin=self.s.min(), vmax=self.s.max(),
save="img/map_"+str(index)+".png")
self.m.plot(power=True, mono=False,
other=(self.power, self.S.get_power()),
nbin=None, binbounds=None, log=False,
save='img/map_power_'+str(index)+".png")
def plot(self): def plot(self):
""" """
Produces plots. Produces plots.
...@@ -209,7 +253,7 @@ class problem(object): ...@@ -209,7 +253,7 @@ class problem(object):
##----------------------------------------------------------------------------- ##-----------------------------------------------------------------------------
# #
if(__name__=="__main__"): if(__name__=="__main__"):
x = rg_space((128,)) x = rg_space((128,128), zerocenter=True)
p = problem(x, log = False) p = problem(x, log = False)
about.warnings.off() about.warnings.off()
## pl.close("all") ## pl.close("all")
......
# -*- coding: utf-8 -*-
from nifty import *
about.warnings.off()
shape = (256, 256)
x_space = rg_space(shape)
k_space = x_space.get_codomain()
power = lambda k: 42/((1+k*shape[0])**2)
S = power_operator(k_space, codomain=x_space, spec=power)
s = S.get_random_field(domain=x_space)
def make_los(n=10, angle=0, d=1):
starts_list = []
ends_list = []
for i in xrange(n):
starts_list += [[(-0.2)*d, (-0.2 + 1.2*i/n)*d]]
ends_list += [[(1.2)*d, (-0.2 + 1.2*i/n)*d]]
starts_list = np.array(starts_list)
ends_list = np.array(ends_list)
rot_matrix = np.array([[np.cos(angle), -np.sin(angle)],
[np.sin(angle), np.cos(angle)]])
starts_list = rot_matrix.dot(starts_list.T-0.5*d).T+0.5*d
ends_list = rot_matrix.dot(ends_list.T-0.5*d).T+0.5*d
return (starts_list, ends_list)
temp_coords = (np.empty((0,2)), np.empty((0,2)))
n = 256
m = 256
for alpha in [np.pi/n*j for j in xrange(n)]:
temp = make_los(n=m, angle=alpha)
temp_coords = np.concatenate([temp_coords, temp], axis=1)
starts = list(temp_coords[0].T)
ends = list(temp_coords[1].T)
#n_points = 360.
#starts = [[(np.cos(i/n_points*np.pi)+1)*shape[0]/2.,
# (np.sin(i/n_points*np.pi)+1)*shape[0]/2.] for i in xrange(int(n_points))]
#starts = list(np.array(starts).T)
#
#ends = [[(np.cos(i/n_points*np.pi + np.pi)+1)*shape[0]/2.,
# (np.sin(i/n_points*np.pi + np.pi)+1)*shape[0]/2.] for i in xrange(int(n_points))]
#ends = list(np.array(ends).T)
R = los_response(x_space, starts=starts, ends=ends, sigmas_up=0.1, sigmas_low=0.1)
d_space = R.target
N = diagonal_operator(d_space, diag=s.var(), bare=True)
n = N.get_random_field(domain=d_space)
d = R(s) + n
j = R.adjoint_times(N.inverse_times(d))
D = propagator_operator(S=S, N=N, R=R)
m = D(j, W=S, tol=1E-14, limii=50, note=True)
s.plot(title="signal", save='1_plot_s.png')
s.plot(save='plot_s_power.png', power=True, other=power)
j.plot(save='plot_j.png')
#d_ = field(x_space, val=d.val, target=k_space)
#d_.plot(title="data", vmin=s.min(), vmax=s.max(), save='plot_d.png')
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save='1_plot_m.png')
m.plot(title="reconstructed map", save='2_plot_m.png')
m.plot(save='plot_m_power.png', power=True, other=power)
\ No newline at end of file
...@@ -35,6 +35,7 @@ from __future__ import division ...@@ -35,6 +35,7 @@ from __future__ import division
import matplotlib as mpl import matplotlib as mpl
mpl.use('Agg') mpl.use('Agg')
import gc
import imp import imp
#nifty = imp.load_module('nifty', None, #nifty = imp.load_module('nifty', None,
# '/home/steininger/Downloads/nifty', ('','',5)) # '/home/steininger/Downloads/nifty', ('','',5))
...@@ -43,7 +44,7 @@ from nifty import * # version 0.8.0 ...@@ -43,7 +44,7 @@ from nifty import * # version 0.8.0
about.warnings.off() about.warnings.off()
# some signal space; e.g., a two-dimensional regular grid # some signal space; e.g., a two-dimensional regular grid
x_space = rg_space([4280, 1280]) # define signal space x_space = rg_space([128, 128]) # define signal space
#y_space = point_space(1280*1280) #y_space = point_space(1280*1280)
#x_space = hp_space(32) #x_space = hp_space(32)
...@@ -53,12 +54,15 @@ x_space = rg_space([4280, 1280]) # define sign ...@@ -53,12 +54,15 @@ x_space = rg_space([4280, 1280]) # define sign
k_space = x_space.get_codomain() # get conjugate space k_space = x_space.get_codomain() # get conjugate space
# some power spectrum # some power spectrum
power = (lambda k: 42 / (k + 1) ** 4) power = (lambda k: 42 / (k + 1) ** 3)
S = power_operator(k_space, codomain=x_space, spec=power) # define signal covariance S = power_operator(k_space, codomain=x_space, spec=power) # define signal covariance
s = S.get_random_field(domain=x_space) # generate signal s = S.get_random_field(domain=x_space) # generate signal
#my_mask = x_space.cast(1)
#my_mask[400:900,400:900] = 0
my_mask = 1
R = response_operator(x_space, sigma=0.0, mask=1.0, assign=None) # define response R = response_operator(x_space, sigma=0.01, mask=my_mask, assign=None) # define response
d_space = R.target # get data space d_space = R.target # get data space
# some noise variance; e.g., signal-to-noise ratio of 1 # some noise variance; e.g., signal-to-noise ratio of 1
...@@ -71,10 +75,18 @@ d = R(s) + n # compute data ...@@ -71,10 +75,18 @@ d = R(s) + n # compute data
j = R.adjoint_times(N.inverse_times(d)) # define information source j = R.adjoint_times(N.inverse_times(d)) # define information source
D = propagator_operator(S=S, N=N, R=R) # define information propagator D = propagator_operator(S=S, N=N, R=R) # define information propagator
m = D(j, W=S, tol=1E-8, limii=10, note=True) # reconstruct map #m = D(j, W=S, tol=1E-8, limii=100, note=True)
#m = D(j, tol=1E-8, limii=20, note=True, force=True)
ident = identity(x_space)
#s.plot(title="signal", save = 'plot_s.png') # plot signal xi = field(x_space, random='gau', target=k_space)
#d_ = field(x_space, val=d.val, target=k_space) m = D(xi, W=ident, tol=1E-8, limii=10, note=True, force=True)
#d_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_d.png') # plot data temp_result = (D.inverse_times(m)-xi)
#m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save = 'plot_m.png') # plot map print (temp_result.dot(temp_result))
print (temp_result.val)
s.plot(title="signal", save = 'plot_s.png') # plot signal
d_ = field(x_space, val=d.val, target=k_space)
d_.plot(title="data", vmin=s.min(), vmax=s.max(), save = 'plot_d.png') # plot data
m.plot(title="reconstructed map", vmin=s.min(), vmax=s.max(), save = 'plot_m.png') # plot map
# #
...@@ -193,6 +193,14 @@ class lm_space(point_space): ...@@ -193,6 +193,14 @@ class lm_space(point_space):
self.paradict['lmax'] = x[0] self.paradict['lmax'] = x[0]
self.paradict['mmax'] = x[1] self.paradict['mmax'] = x[1]
def __hash__(self):
result_hash = 0
for (key, item) in vars(self).items():
if key in ['power_indices']:
continue
result_hash ^= item.__hash__() * hash(key)
return result_hash
def _identifier(self): def _identifier(self):
# Extract the identifying parts from the vars(self) dict. # Extract the identifying parts from the vars(self) dict.
temp = [(ii[0], temp = [(ii[0],
...@@ -605,7 +613,7 @@ class lm_space(point_space): ...@@ -605,7 +613,7 @@ class lm_space(point_space):
x = self.cast(x) x = self.cast(x)
# check sigma # check sigma
if sigma == 0: if sigma == 0:
return x return self.unary_operation(x, op='copy')
elif sigma == -1: elif sigma == -1:
about.infos.cprint("INFO: invalid sigma reset.") about.infos.cprint("INFO: invalid sigma reset.")
sigma = np.sqrt(2) * np.pi / (self.paradict['lmax'] + 1) sigma = np.sqrt(2) * np.pi / (self.paradict['lmax'] + 1)
...@@ -1158,11 +1166,11 @@ class gl_space(point_space): ...@@ -1158,11 +1166,11 @@ class gl_space(point_space):
nlon = self.paradict['nlon'] nlon = self.paradict['nlon']
lmax = nlat - 1 lmax = nlat - 1
if self.dtype == np.dtype('float32'): if self.dtype == np.dtype('float32'):
x = gl.synfast_f(arg['syn'], x = gl.synfast_f(arg['spec'],
nlat=nlat, nlon=nlon, nlat=nlat, nlon=nlon,
lmax=lmax, mmax=lmax, alm=False) lmax=lmax, mmax=lmax, alm=False)
else: else:
x = gl.synfast(arg['syn'], x = gl.synfast(arg['spec'],
nlat=nlat, nlon=nlon, nlat=nlat, nlon=nlon,
lmax=lmax, mmax=lmax, alm=False) lmax=lmax, mmax=lmax, alm=False)
# weight if discrete # weight if discrete
...@@ -1296,7 +1304,7 @@ class gl_space(point_space): ...@@ -1296,7 +1304,7 @@ class gl_space(point_space):
x = self.cast(x) x = self.cast(x)
# check sigma # check sigma
if sigma == 0: if sigma == 0:
return x return self.unary_operation(x, op='copy')
elif sigma == -1: elif sigma == -1:
about.infos.cprint("INFO: invalid sigma reset.") about.infos.cprint("INFO: invalid sigma reset.")
sigma = np.sqrt(2) * np.pi / self.paradict['nlat'] sigma = np.sqrt(2) * np.pi / self.paradict['nlat']
...@@ -1873,7 +1881,7 @@ class hp_space(point_space): ...@@ -1873,7 +1881,7 @@ class hp_space(point_space):
x = self.cast(x) x = self.cast(x)
# check sigma # check sigma
if sigma == 0: if sigma == 0:
return x return self.unary_operation(x, op='copy')
elif sigma == -1: elif sigma == -1:
about.infos.cprint("INFO: invalid sigma reset.") about.infos.cprint("INFO: invalid sigma reset.")
# sqrt(2)*pi/(lmax+1) # sqrt(2)*pi/(lmax+1)
...@@ -1881,8 +1889,8 @@ class hp_space(point_space): ...@@ -1881,8 +1889,8 @@ class hp_space(point_space):
elif sigma < 0: elif sigma < 0:
raise ValueError(about._errors.cstring("ERROR: invalid sigma.")) raise ValueError(about._errors.cstring("ERROR: invalid sigma."))
# smooth # smooth
lmax = 3*nside-1 lmax = 3*nside-1
mmax = lmax mmax = lmax
return hp.smoothing(x, fwhm=0.0, sigma=sigma, invert=False, pol=True, return hp.smoothing(x, fwhm=0.0, sigma=sigma, invert=False, pol=True,
iter=niter, lmax=lmax, mmax=mmax, iter=niter, lmax=lmax, mmax=mmax,
use_weights=False, datapath=None) use_weights=False, datapath=None)
......
...@@ -241,6 +241,9 @@ class space(object): ...@@ -241,6 +241,9 @@ class space(object):
def para(self, x): def para(self, x):
self.paradict['default'] = x self.paradict['default'] = x
def __hash__(self):
return hash(())
def _identifier(self): def _identifier(self):
""" """
_identiftier returns an object which contains all information needed _identiftier returns an object which contains all information needed
...@@ -835,6 +838,13 @@ class point_space(space): ...@@ -835,6 +838,13 @@ class point_space(space):
def para(self, x): def para(self, x):
self.paradict['num'] = x[0] self.paradict['num'] = x[0]
def __hash__(self):
# Extract the identifying parts from the vars(self) dict.
result_hash = 0
for (key, item) in vars(self).items():
result_hash ^= item.__hash__() * hash(key)
return result_hash
def _identifier(self): def _identifier(self):
# Extract the identifying parts from the vars(self) dict. # Extract the identifying parts from the vars(self) dict.
temp = [(ii[0], temp = [(ii[0],
...@@ -2476,7 +2486,7 @@ class field(object): ...@@ -2476,7 +2486,7 @@ class field(object):
return return_field return return_field
def smooth(self, sigma=0, overwrite=False, **kwargs): def smooth(self, sigma=0, inplace=False, **kwargs):
""" """
Smoothes the field by convolution with a Gaussian kernel. Smoothes the field by convolution with a Gaussian kernel.
...@@ -2501,7 +2511,7 @@ class field(object): ...@@ -2501,7 +2511,7 @@ class field(object):
Otherwise, nothing is returned. Otherwise, nothing is returned.
""" """
if overwrite: if inplace:
new_field = self new_field = self
else: else:
new_field = self.copy_empty() new_field = self.copy_empty()
......
...@@ -37,6 +37,16 @@ class space_paradict(object): ...@@ -37,6 +37,16 @@ class space_paradict(object):
def __getitem__(self, key): def __getitem__(self, key):
return self.parameters.__getitem__(key) return self.parameters.__getitem__(key)
def __hash__(self):
result_hash = 0
for (key, item) in self.parameters.items():
try:
temp_hash = hash(item)
except TypeError:
temp_hash = hash(tuple(item))
result_hash ^= temp_hash * hash(key)
return result_hash
class point_space_paradict(space_paradict): class point_space_paradict(space_paradict):
......
...@@ -104,9 +104,10 @@ class power_indices(object): ...@@ -104,9 +104,10 @@ class power_indices(object):
if temp_config_dict is not None: if temp_config_dict is not None:
return self._cast_config_helper(**temp_config_dict) return self._cast_config_helper(**temp_config_dict)
else: else:
temp_log = kwargs.get("log", None) defaults = self.default_parameters
temp_nbin = kwargs.get("nbin", None) temp_log = kwargs.get("log", defaults['log'])
temp_binbounds = kwargs.get("binbounds", None) temp_nbin = kwargs.get("nbin", defaults['nbin'])
temp_binbounds = kwargs.get("binbounds", defaults['binbounds'])
return self._cast_config_helper(log=temp_log, return self._cast_config_helper(log=temp_log,
nbin=temp_nbin, nbin=temp_nbin,
......
...@@ -135,9 +135,9 @@ class random(object): ...@@ -135,9 +135,9 @@ class random(object):
pindex = kwargs.get('pindex', None) pindex = kwargs.get('pindex', None)
kindex = kwargs.get('kindex', None) kindex = kwargs.get('kindex', None)
size = kwargs.get('size', None) size = kwargs.get('size', None)
log = kwargs.get('log', False) log = kwargs.get('log', 'default')
nbin = kwargs.get('nbin', None) nbin = kwargs.get('nbin', 'default')
binbounds = kwargs.get('binbounds', None) binbounds = kwargs.get('binbounds', 'default')
spec = kwargs.get('spec', 1) spec = kwargs.get('spec', 1)
codomain = kwargs.get('codomain', None) codomain = kwargs.get('codomain', None)
......
...@@ -27,7 +27,9 @@ from nifty_operators import operator,\ ...@@ -27,7 +27,9 @@ from nifty_operators import operator,\
vecvec_operator,\ vecvec_operator,\
response_operator,\ response_operator,\
invertible_operator,\ invertible_operator,\
propagator_operator propagator_operator,\
identity,\
identity_operator
from nifty_probing import prober,\ from nifty_probing import prober,\
...@@ -36,5 +38,7 @@ from nifty_probing import prober,\ ...@@ -36,5 +38,7 @@ from nifty_probing import prober,\
diagonal_prober,\ diagonal_prober,\
inverse_diagonal_prober inverse_diagonal_prober
from nifty_los import los_response
from nifty_minimization import conjugate_gradient,\ from nifty_minimization import conjugate_gradient,\
steepest_descent steepest_descent
\ No newline at end of file
...@@ -182,8 +182,8 @@ cdef class line_integrator(object): ...@@ -182,8 +182,8 @@ cdef class line_integrator(object):
cdef FLOAT_t weight cdef FLOAT_t weight
# estimate the maximum number of cells that could be hit # estimate the maximum number of cells that could be hit
# the current estimator is: norm of the vector times number of dims # the current estimator is: norm of the vector times number of dims + 1
num_estimate = INT(ceil(list_norm(list_sub(end, start))))*len(start) num_estimate = INT(ceil(list_norm(list_sub(end, start))))*len(start)+1
cdef np.ndarray[INT_t, ndim=2] index_list = np.empty((num_estimate, cdef np.ndarray[INT_t, ndim=2] index_list = np.empty((num_estimate,
len(start)), len(start)),
...@@ -203,12 +203,18 @@ cdef class line_integrator(object): ...@@ -203,12 +203,18 @@ cdef class line_integrator(object):
index_list[i, j] = floor_current_position[j] index_list[i, j] = floor_current_position[j]
else: else:
index_list[i, j] = floor_next_position[j] index_list[i, j] = floor_next_position[j]
weight_list[i] = weight weight_list[i] = weight
if next_position == end: if next_position == end:
break break
current_position = next_position current_position = next_position
i += 1 i += 1
for j in xrange(len(start)):
if index_list[i, j] == self.shape[j]:
return (list(index_list[:i].T), weight_list[:i])
return (list(index_list[:i+1].T), weight_list[:i+1]) return (list(index_list[:i+1].T), weight_list[:i+1])
...@@ -279,7 +285,6 @@ cpdef list multi_integrator(list starts, ...@@ -279,7 +285,6 @@ cpdef list multi_integrator(list starts,
end[j] = ends[j][i] end[j] = ends[j][i]