Commit fe8f6318 authored by Theo Steininger's avatar Theo Steininger
Browse files

Merge branch 'demo_tweaks' into 'nightly'

Demo tweaks

See merge request !203
parents 4dd9b524 0b313930
...@@ -95,7 +95,7 @@ if __name__ == "__main__": ...@@ -95,7 +95,7 @@ if __name__ == "__main__":
N = ift.DiagonalOperator(s_space, ndiag) N = ift.DiagonalOperator(s_space, ndiag)
n = ift.Field.from_random(domain=s_space, n = ift.Field.from_random(domain=s_space,
random_type='normal', random_type='normal',
std=ift.sqrt(noise), std=np.sqrt(noise),
mean=0) mean=0)
# Create mock data # Create mock data
...@@ -128,7 +128,7 @@ if __name__ == "__main__": ...@@ -128,7 +128,7 @@ if __name__ == "__main__":
flat_power = ift.Field(p_space, val=1e-8) flat_power = ift.Field(p_space, val=1e-8)
m0 = flat_power.power_synthesize(real_signal=True) m0 = flat_power.power_synthesize(real_signal=True)
t0 = ift.Field(p_space, val=ift.log(1./(1+p_space.kindex)**2)) t0 = ift.Field(p_space, val=np.log(1./(1+p_space.kindex)**2))
for i in range(500): for i in range(500):
S0 = ift.create_power_operator(h_space, power_spectrum=ift.exp(t0), S0 = ift.create_power_operator(h_space, power_spectrum=ift.exp(t0),
......
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from nifty import * import nifty as ift
import numpy as np
if __name__ == "__main__": if __name__ == "__main__":
nifty_configuration['default_distribution_strategy'] = 'fftw' ift.nifty_configuration['default_distribution_strategy'] = 'not'
# Setting up parameters |\label{code:wf_parameters}| # Setting up parameters |\label{code:wf_parameters}|
correlation_length = 1. # Typical distance over which the field is correlated correlation_length = 1. # Typical distance over which the field is correlated
...@@ -20,54 +21,53 @@ if __name__ == "__main__": ...@@ -20,54 +21,53 @@ if __name__ == "__main__":
L = 2. # Total side-length of the domain L = 2. # Total side-length of the domain
N_pixels = 128 # Grid resolution (pixels per axis) N_pixels = 128 # Grid resolution (pixels per axis)
#signal_space = RGSpace([N_pixels, N_pixels], distances=L/N_pixels) #signal_space = RGSpace([N_pixels, N_pixels], distances=L/N_pixels)
signal_space = HPSpace(16) signal_space = ift.HPSpace(16)
harmonic_space = FFTOperator.get_default_codomain(signal_space) harmonic_space = ift.FFTOperator.get_default_codomain(signal_space)
fft = FFTOperator(harmonic_space, target=signal_space, target_dtype=np.float) fft = ift.FFTOperator(harmonic_space, target=signal_space, target_dtype=np.float)
power_space = PowerSpace(harmonic_space) power_space = ift.PowerSpace(harmonic_space)
# Creating the mock signal |\label{code:wf_mock_signal}| # Creating the mock signal |\label{code:wf_mock_signal}|
S = create_power_operator(harmonic_space, power_spectrum=power_spectrum) S = ift.create_power_operator(harmonic_space, power_spectrum=power_spectrum)
mock_power = Field(power_space, val=power_spectrum) mock_power = ift.Field(power_space, val=power_spectrum)
mock_signal = fft(mock_power.power_synthesize(real_signal=True)) mock_signal = fft(mock_power.power_synthesize(real_signal=True))
# Setting up an exemplary response # Setting up an exemplary response
mask = Field(signal_space, val=1.) mask = ift.Field(signal_space, val=1.)
N10 = int(N_pixels/10) N10 = int(N_pixels/10)
#mask.val[N10*5:N10*9, N10*5:N10*9] = 0. #mask.val[N10*5:N10*9, N10*5:N10*9] = 0.
R = ResponseOperator(signal_space, sigma=(response_sigma,), exposure=(mask,)) #|\label{code:wf_response}| R = ift.ResponseOperator(signal_space, sigma=(response_sigma,), exposure=(mask,)) #|\label{code:wf_response}|
data_domain = R.target[0] data_domain = R.target[0]
R_harmonic = ComposedOperator([fft, R], default_spaces=[0, 0]) R_harmonic = ift.ComposedOperator([fft, R], default_spaces=[0, 0])
# Setting up the noise covariance and drawing a random noise realization # Setting up the noise covariance and drawing a random noise realization
ndiag = Field(data_domain, mock_signal.var()/signal_to_noise).weight(1) ndiag = ift.Field(data_domain, mock_signal.var()/signal_to_noise).weight(1)
N = DiagonalOperator(data_domain, ndiag) N = ift.DiagonalOperator(data_domain, ndiag)
noise = Field.from_random(domain=data_domain, random_type='normal', noise = ift.Field.from_random(domain=data_domain, random_type='normal',
std=mock_signal.std()/np.sqrt(signal_to_noise), mean=0) std=mock_signal.std()/np.sqrt(signal_to_noise), mean=0)
data = R(exp(mock_signal)) + noise #|\label{code:wf_mock_data}| data = R(ift.exp(mock_signal)) + noise #|\label{code:wf_mock_data}|
# Wiener filter # Wiener filter
m0 = Field(harmonic_space, val=0.j) m0 = ift.Field(harmonic_space, val=0.j)
energy = library.LogNormalWienerFilterEnergy(m0, data, R_harmonic, N, S) energy = ift.library.LogNormalWienerFilterEnergy(m0, data, R_harmonic, N, S)
minimizer1 = VL_BFGS(convergence_tolerance=1e-5, IC1 = ift.GradientNormController(iteration_limit=30,
iteration_limit=3000, tol_abs_gradnorm=0.1)
#callback=convergence_measure, minimizer1 = ift.VL_BFGS(IC1, max_history_length=20)
max_history_length=20) IC2 = ift.GradientNormController(iteration_limit=5,
tol_abs_gradnorm=0.1)
minimizer2 = RelaxedNewton(convergence_tolerance=1e-5, minimizer2 = ift.RelaxedNewton(IC2)
iteration_limit=10, IC3 = ift.GradientNormController(iteration_limit=100,
#callback=convergence_measure tol_abs_gradnorm=0.1)
) minimizer3 = ift.SteepestDescent(IC3)
minimizer3 = SteepestDescent(convergence_tolerance=1e-5, iteration_limit=1000)
# me1 = minimizer1(energy) # me1 = minimizer1(energy)
# me2 = minimizer2(energy) me2 = minimizer2(energy)
# me3 = minimizer3(energy) # me3 = minimizer3(energy)
# m1 = fft(me1[0].position) # m1 = fft(me1[0].position)
# m2 = fft(me2[0].position) m2 = fft(me2[0].position)
# m3 = fft(me3[0].position) # m3 = fft(me3[0].position)
# #
...@@ -82,19 +82,19 @@ if __name__ == "__main__": ...@@ -82,19 +82,19 @@ if __name__ == "__main__":
#Plotting #|\label{code:wf_plotting}| #Plotting #|\label{code:wf_plotting}|
#plotter = plotting.RG2DPlotter(color_map=plotting.colormaps.PlankCmap()) #plotter = plotting.RG2DPlotter(color_map=plotting.colormaps.PlankCmap())
plotter = plotting.HealpixPlotter(color_map=plotting.colormaps.PlankCmap()) plotter = ift.plotting.HealpixPlotter(color_map=ift.plotting.colormaps.PlankCmap())
plotter.figure.xaxis = plotting.Axis(label='Pixel Index') plotter.figure.xaxis = ift.plotting.Axis(label='Pixel Index')
plotter.figure.yaxis = plotting.Axis(label='Pixel Index') plotter.figure.yaxis = ift.plotting.Axis(label='Pixel Index')
plotter.plot.zmax = 5; plotter.plot.zmin = -5 #plotter.plot.zmax = 5; plotter.plot.zmin = -5
## plotter(variance, path = 'variance.html') ## plotter(variance, path = 'variance.html')
# #plotter.plot.zmin = exp(mock_signal.min()); #plotter.plot.zmin = np.exp(mock_signal.min());
# plotter(mock_signal.real, path='mock_signal.html') plotter(mock_signal.real, path='mock_signal.html')
# plotter(Field(signal_space, val=np.log(data.val.get_full_data().real).reshape(signal_space.shape)), plotter(ift.Field(signal_space, val=np.log(data.val.get_full_data().real).reshape(signal_space.shape)),
# path = 'log_of_data.html') path = 'log_of_data.html')
# #
# plotter(m1.real, path='m_LBFGS.html') # plotter(m1.real, path='m_LBFGS.html')
# plotter(m2.real, path='m_Newton.html') plotter(m2.real, path='m_Newton.html')
# plotter(m3.real, path='m_SteepestDescent.html') # plotter(m3.real, path='m_SteepestDescent.html')
# #
...@@ -7,7 +7,7 @@ from nifty import plotting ...@@ -7,7 +7,7 @@ from nifty import plotting
from keepers import Repository from keepers import Repository
if __name__ == "__main__": if __name__ == "__main__":
ift.nifty_configuration['default_distribution_strategy'] = 'fftw' ift.nifty_configuration['default_distribution_strategy'] = 'not'
signal_to_noise = 1.5 # The signal to noise ratio signal_to_noise = 1.5 # The signal to noise ratio
...@@ -110,7 +110,7 @@ if __name__ == "__main__": ...@@ -110,7 +110,7 @@ if __name__ == "__main__":
# Probing the variance # Probing the variance
class Proby(ift.DiagonalProberMixin, ift.Prober): pass class Proby(ift.DiagonalProberMixin, ift.Prober): pass
proby = Proby((signal_space_1, signal_space_2), probe_count=100) proby = Proby((signal_space_1, signal_space_2), probe_count=1)
proby(lambda z: fft(wiener_curvature.inverse_times(fft.inverse_times(z)))) proby(lambda z: fft(wiener_curvature.inverse_times(fft.inverse_times(z))))
# sm = SmoothingOperator(signal_space, sigma=0.02) # sm = SmoothingOperator(signal_space, sigma=0.02)
# variance = sm(proby.diagonal.weight(-1)) # variance = sm(proby.diagonal.weight(-1))
......
...@@ -7,7 +7,7 @@ from keepers import Repository ...@@ -7,7 +7,7 @@ from keepers import Repository
if __name__ == "__main__": if __name__ == "__main__":
ift.nifty_configuration['default_distribution_strategy'] = 'fftw' ift.nifty_configuration['default_distribution_strategy'] = 'not'
# Setting up parameters |\label{code:wf_parameters}| # Setting up parameters |\label{code:wf_parameters}|
correlation_length_scale = 1. # Typical distance over which the field is correlated correlation_length_scale = 1. # Typical distance over which the field is correlated
...@@ -55,7 +55,7 @@ if __name__ == "__main__": ...@@ -55,7 +55,7 @@ if __name__ == "__main__":
# Probing the uncertainty |\label{code:wf_uncertainty_probing}| # Probing the uncertainty |\label{code:wf_uncertainty_probing}|
class Proby(ift.DiagonalProberMixin, ift.Prober): pass class Proby(ift.DiagonalProberMixin, ift.Prober): pass
proby = Proby(signal_space, probe_count=800) proby = Proby(signal_space, probe_count=1)
proby(lambda z: fft(wiener_curvature.inverse_times(fft.inverse_times(z)))) #|\label{code:wf_variance_fft_wrap}| proby(lambda z: fft(wiener_curvature.inverse_times(fft.inverse_times(z)))) #|\label{code:wf_variance_fft_wrap}|
sm = ift.FFTSmoothingOperator(signal_space, sigma=0.03) sm = ift.FFTSmoothingOperator(signal_space, sigma=0.03)
......
...@@ -8,7 +8,7 @@ from nifty.library import WienerFilterCurvature ...@@ -8,7 +8,7 @@ from nifty.library import WienerFilterCurvature
if __name__ == "__main__": if __name__ == "__main__":
nifty_configuration['default_distribution_strategy'] = 'fftw' nifty_configuration['default_distribution_strategy'] = 'not'
nifty_configuration['harmonic_rg_base'] = 'real' nifty_configuration['harmonic_rg_base'] = 'real'
# Setting up variable parameters # Setting up variable parameters
......
...@@ -43,7 +43,7 @@ class AdjointFFTResponse(LinearOperator): ...@@ -43,7 +43,7 @@ class AdjointFFTResponse(LinearOperator):
if __name__ == "__main__": if __name__ == "__main__":
nifty_configuration['default_distribution_strategy'] = 'fftw' nifty_configuration['default_distribution_strategy'] = 'not'
nifty_configuration['harmonic_rg_base'] = 'real' nifty_configuration['harmonic_rg_base'] = 'real'
# Set up position space # Set up position space
......
...@@ -27,81 +27,92 @@ __all__ = ['cos', 'sin', 'cosh', 'sinh', 'tan', 'tanh', 'arccos', 'arcsin', ...@@ -27,81 +27,92 @@ __all__ = ['cos', 'sin', 'cosh', 'sinh', 'tan', 'tanh', 'arccos', 'arcsin',
'conjugate', 'clipped_exp', 'limited_exp', 'limited_exp_deriv'] 'conjugate', 'clipped_exp', 'limited_exp', 'limited_exp_deriv']
def _math_helper(x, function): def _math_helper(x, function, out):
if isinstance(x, Field): if not isinstance(x, Field):
result_val = x.val.apply_scalar_function(function) raise TypeError("This function only accepts Field objects.")
result = x.copy_empty(dtype=result_val.dtype) if out is not None:
result.val = result_val if not isinstance(out, Field) or x.domain!=out.domain:
elif isinstance(x, distributed_data_object): raise ValueError("Bad 'out' argument")
result = x.apply_scalar_function(function, inplace=False) function(x.val, out=out.val)
return out
else: else:
result = function(np.asarray(x)) return Field(domain=x.domain, val=function(x.val))
return result
def cos(x, out=None):
return _math_helper(x, np.cos, out)
def sin(x, out=None):
return _math_helper(x, np.sin, out)
def cos(x): def cosh(x, out=None):
return _math_helper(x, np.cos) return _math_helper(x, np.cosh, out)
def sin(x): def sinh(x, out=None):
return _math_helper(x, np.sin) return _math_helper(x, np.sinh, out)
def cosh(x): def tan(x, out=None):
return _math_helper(x, np.cosh) return _math_helper(x, np.tan, out)
def sinh(x): def tanh(x, out=None):
return _math_helper(x, np.sinh) return _math_helper(x, np.tanh, out)
def tan(x): def arccos(x, out=None):
return _math_helper(x, np.tan) return _math_helper(x, np.arccos, out)
def tanh(x): def arcsin(x, out=None):
return _math_helper(x, np.tanh) return _math_helper(x, np.arcsin, out)
def arccos(x): def arccosh(x, out=None):
return _math_helper(x, np.arccos) return _math_helper(x, np.arccosh, out)
def arcsin(x): def arcsinh(x, out=None):
return _math_helper(x, np.arcsin) return _math_helper(x, np.arcsinh, out)
def arccosh(x): def arctan(x, out=None):
return _math_helper(x, np.arccosh) return _math_helper(x, np.arctan, out)
def arcsinh(x): def arctanh(x, out=None):
return _math_helper(x, np.arcsinh) return _math_helper(x, np.arctanh, out)
def arctan(x): def sqrt(x, out=None):
return _math_helper(x, np.arctan) return _math_helper(x, np.sqrt, out)
def arctanh(x): def exp(x, out=None):
return _math_helper(x, np.arctanh) return _math_helper(x, np.exp, out)
def sqrt(x): def log(x, out=None):
return _math_helper(x, np.sqrt) return _math_helper(x, np.log, out)
def exp(x): def conjugate(x, out=None):
return _math_helper(x, np.exp) return _math_helper(x, np.conjugate, out)
def clipped_exp(x): def conj(x, out=None):
return _math_helper(x, lambda z: np.exp(np.minimum(200, z))) return _math_helper(x, np.conj, out)
def limited_exp(x): def clipped_exp(x, out=None):
return _math_helper(x, _limited_exp_helper) return _math_helper(x, lambda z: np.exp(np.minimum(200, z)), out)
def limited_exp(x, out=None):
return _math_helper(x, _limited_exp_helper, out)
def _limited_exp_helper(x): def _limited_exp_helper(x):
...@@ -114,8 +125,8 @@ def _limited_exp_helper(x): ...@@ -114,8 +125,8 @@ def _limited_exp_helper(x):
return result return result
def limited_exp_deriv(x): def limited_exp_deriv(x, out=None):
return _math_helper(x, _limited_exp_deriv_helper) return _math_helper(x, _limited_exp_deriv_helper, out)
def _limited_exp_deriv_helper(x): def _limited_exp_deriv_helper(x):
...@@ -127,19 +138,3 @@ def _limited_exp_deriv_helper(x): ...@@ -127,19 +138,3 @@ def _limited_exp_deriv_helper(x):
result[mask] = np.exp(thr) result[mask] = np.exp(thr)
result[~mask] = np.exp(x[~mask]) result[~mask] = np.exp(x[~mask])
return result return result
def log(x, base=None):
result = _math_helper(x, np.log)
if base is not None:
result = result/log(base)
return result
def conjugate(x):
return _math_helper(x, np.conjugate)
def conj(x):
return _math_helper(x, np.conjugate)
...@@ -1081,8 +1081,8 @@ class Field(Loggable, Versionable, object): ...@@ -1081,8 +1081,8 @@ class Field(Loggable, Versionable, object):
The domain of x must contain `self.domain` The domain of x must contain `self.domain`
spaces : tuple of ints spaces : tuple of ints
If the domain of `self` and `x` are not the same, `spaces` specfies If the domain of `self` and `x` are not the same, `spaces`
the mapping. specifies the mapping.
Returns Returns
------- -------
......
...@@ -39,7 +39,7 @@ class IterationController( ...@@ -39,7 +39,7 @@ class IterationController(
The concrete convergence criteria can be chosen by inheriting from this The concrete convergence criteria can be chosen by inheriting from this
class; the implementer has full flexibility to use whichever criteria are class; the implementer has full flexibility to use whichever criteria are
appropriate for a particular problem - as ong as they can be computed from appropriate for a particular problem - as long as they can be computed from
the information passed to the controller during the iteration process. the information passed to the controller during the iteration process.
""" """
......
...@@ -20,7 +20,6 @@ import numpy as np ...@@ -20,7 +20,6 @@ import numpy as np
from ...field import Field from ...field import Field
from ...spaces.power_space import PowerSpace from ...spaces.power_space import PowerSpace
from ..endomorphic_operator import EndomorphicOperator from ..endomorphic_operator import EndomorphicOperator
from ... import sqrt
from ... import nifty_utilities as utilities from ... import nifty_utilities as utilities
...@@ -109,7 +108,7 @@ class LaplaceOperator(EndomorphicOperator): ...@@ -109,7 +108,7 @@ class LaplaceOperator(EndomorphicOperator):
ret[sl_l] = deriv ret[sl_l] = deriv
ret[prefix + (-1,)] = 0. ret[prefix + (-1,)] = 0.
ret[sl_r] -= deriv ret[sl_r] -= deriv
ret /= sqrt(dposc) ret /= np.sqrt(dposc)
ret[prefix + (slice(None, 2),)] = 0. ret[prefix + (slice(None, 2),)] = 0.
ret[prefix + (-1,)] = 0. ret[prefix + (-1,)] = 0.
return Field(self.domain, val=ret).weight(power=-0.5, spaces=spaces) return Field(self.domain, val=ret).weight(power=-0.5, spaces=spaces)
...@@ -131,7 +130,7 @@ class LaplaceOperator(EndomorphicOperator): ...@@ -131,7 +130,7 @@ class LaplaceOperator(EndomorphicOperator):
dpos = self._dpos.reshape((1,)*axis + (nval-1,)) dpos = self._dpos.reshape((1,)*axis + (nval-1,))
dposc = self._dposc.reshape((1,)*axis + (nval,)) dposc = self._dposc.reshape((1,)*axis + (nval,))
y = x.copy().weight(power=0.5).val y = x.copy().weight(power=0.5).val
y /= sqrt(dposc) y /= np.sqrt(dposc)
y[prefix + (slice(None, 2),)] = 0. y[prefix + (slice(None, 2),)] = 0.
y[prefix + (-1,)] = 0. y[prefix + (-1,)] = 0.
deriv = (y[sl_r]-y[sl_l])/dpos # defined between points deriv = (y[sl_r]-y[sl_l])/dpos # defined between points
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment