diff --git a/1_wiener_filter.py b/1_wiener_filter.py index a7318de71887a7a1261accd2b5969531a6be846d..274689997791008b5ad0c06c1a331e6a81a73f7b 100644 --- a/1_wiener_filter.py +++ b/1_wiener_filter.py @@ -3,6 +3,4 @@ import numpy as np import nifty7 as ift from helpers import generate_wf_data, plot_WF -np.random.seed(42) - # Want to implement: m = Dj = (S^{-1} + R^T N^{-1} R)^{-1} R^T N^{-1} d diff --git a/1_wiener_filter_solution.py b/1_wiener_filter_solution.py index 5c5975a2b6a7030cc1409179dd7c3ec9d71ef621..e4c82bf660936b520c49ac293680ec8ecf9cb0ec 100644 --- a/1_wiener_filter_solution.py +++ b/1_wiener_filter_solution.py @@ -20,8 +20,6 @@ import numpy as np import nifty7 as ift from helpers import generate_wf_data, plot_WF -np.random.seed(42) - # Want to implement: m = Dj = (S^{-1} + R^T N^{-1} R)^{-1} R^T N^{-1} d position_space = ift.RGSpace(256) diff --git a/2_critical_filter.py b/2_critical_filter.py index 38009dd9513e700a895eee44d7d3f156faf41e2b..9e0961a5249ef8076f99bae5d772e98a37f66a49 100644 --- a/2_critical_filter.py +++ b/2_critical_filter.py @@ -19,8 +19,6 @@ import numpy as np import nifty7 as ift -np.random.seed(42) - position_space = ift.RGSpace(2*(256,)) harmonic_space = position_space.get_default_codomain() HT = ift.HarmonicTransformOperator(harmonic_space, target=position_space) diff --git a/2_critical_filter_solution.py b/2_critical_filter_solution.py index 98b842146d79ca2f0ec50e916335428214a3332f..350f7cad2b24a142d83d589c3eb49fa24cbc933b 100644 --- a/2_critical_filter_solution.py +++ b/2_critical_filter_solution.py @@ -21,28 +21,26 @@ import nifty7 as ift from helpers import (checkerboard_response, generate_gaussian_data, plot_prior_samples_2d, plot_reconstruction_2d) -np.random.seed(42) - position_space = ift.RGSpace(2*(256,)) -harmonic_space = position_space.get_default_codomain() -HT = ift.HarmonicTransformOperator(harmonic_space, target=position_space) -power_space = ift.PowerSpace(harmonic_space) - -# Set up generative model -A = ift.SLAmplitude( - **{ - 'target': power_space, - 'n_pix': 64, # 64 spectral bins - # Smoothness of spectrum - 'a': 10, # relatively high variance of spectral curvature - 'k0': .2, # quefrency mode below which cepstrum flattens - # Power-law part of spectrum - 'sm': -4, # preferred power-law slope - 'sv': .6, # low variance of power-law slope - 'im': -2, # y-intercept mean, in-/decrease for more/less contrast - 'iv': 2. # y-intercept variance - }) -signal = ift.CorrelatedField(position_space, A) + +args = { + 'offset_mean': 0, + 'offset_std': (1e-3, 1e-6), + + # Amplitude of field fluctuations + 'fluctuations': (1., 0.8), # 1.0, 1e-2 + + # Exponent of power law power spectrum component + 'loglogavgslope': (-3., 1), # -6.0, 1 + + # Amplitude of integrated Wiener process power spectrum component + 'flexibility': (2, 1.), # 1.0, 0.5 + + # How ragged the integrated Wiener process component is + 'asperity': (0.5, 0.4) # 0.1, 0.5 +} + +signal = ift.SimpleCorrelatedField(position_space, **args) R = checkerboard_response(position_space) data_space = R.target @@ -52,15 +50,15 @@ signal_response = R @ signal N = ift.ScalingOperator(data_space, 0.1) data, ground_truth = generate_gaussian_data(signal_response, N) -plot_prior_samples_2d(5, signal, R, signal, A, 'gauss', N=N) +plot_prior_samples_2d(5, signal, R, signal, signal.amplitude, 'gauss', N=N) +exit() likelihood = ift.GaussianEnergy( mean=data, inverse_covariance=N.inverse)(signal_response) # Solve inference problem ic_sampling = ift.GradientNormController(iteration_limit=100) -ic_newton = ift.GradInfNormController( - name='Newton', tol=1e-6, iteration_limit=30) +ic_newton = ift.GradInfNormController(name='Newton', tol=1e-6, iteration_limit=10) minimizer = ift.NewtonCG(ic_newton) H = ift.StandardHamiltonian(likelihood, ic_sampling) @@ -68,12 +66,12 @@ initial_mean = ift.MultiField.full(H.domain, 0.) mean = initial_mean # Draw five samples and minimize KL, iterate 10 times -for _ in range(10): - KL = ift.MetricGaussianKL(mean, H, 5) +for _ in range(3): + KL = ift.MetricGaussianKL(mean, H, 5, True) KL, convergence = minimizer(KL) mean = KL.position # Draw posterior samples and plot N_posterior_samples = 30 -KL = ift.MetricGaussianKL(mean, H, N_posterior_samples) -plot_reconstruction_2d(data, ground_truth, KL, signal, R, A, 'criticalfilter') +KL = ift.MetricGaussianKL(mean, H, N_posterior_samples, True) +plot_reconstruction_2d(data, ground_truth, KL, signal, R, signal.amplitude, 'criticalfilter') diff --git a/3_more_examples.py b/3_more_examples.py index de46c16679f7f55156e5cf526bad6283b7d2a18a..a7f3a7ad6ef7be0e0e83206157f6a57fb555755b 100644 --- a/3_more_examples.py +++ b/3_more_examples.py @@ -15,15 +15,13 @@ # # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik. -import numpy as np - import helpers as h import nifty7 as ift seeds = [123, 42] name = ['bernoulli', 'poisson'] for mode in [0, 1]: - np.random.seed(seeds[mode]) + ift.random.push_sseq_from_seed(seeds[mode]) position_space = ift.RGSpace([256, 256]) harmonic_space = position_space.get_default_codomain() diff --git a/helpers/generate_data.py b/helpers/generate_data.py index 691854bfad4396bac926179b5defe88ab6468c77..aa955867f39d14b6518a014020cc868fb0d5fdfe 100644 --- a/helpers/generate_data.py +++ b/helpers/generate_data.py @@ -11,7 +11,7 @@ # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # -# Copyright(C) 2013-2019 Max-Planck-Society +# Copyright(C) 2013-2021 Max-Planck-Society # # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik. @@ -21,20 +21,20 @@ import nifty7 as ift def generate_gaussian_data(signal_response, noise_covariance): - ground_truth = ift.from_random('normal', signal_response.domain) - d = signal_response(ground_truth) + noise_covariance.draw_sample() + ground_truth = ift.from_random(signal_response.domain) + d = signal_response(ground_truth) + noise_covariance.draw_sample_with_dtype(np.float64) return d, ground_truth def generate_poisson_data(signal_response): - ground_truth = ift.from_random('normal', signal_response.domain) + ground_truth = ift.from_random(signal_response.domain) rate = signal_response(ground_truth).val d = np.random.poisson(rate) return ift.makeField(signal_response.target, d), ground_truth def generate_bernoulli_data(signal_response): - ground_truth = ift.from_random('normal', signal_response.domain) + ground_truth = ift.from_random(signal_response.domain) rate = signal_response(ground_truth).val d = np.random.binomial(1, rate) return ift.makeField(signal_response.target, d), ground_truth diff --git a/helpers/plot.py b/helpers/plot.py index 085e1b7c204ef3847b6a4c2fd91064d3b8506de2..0539d8a15fabe1e53e42b646d4c5d59629405a21 100644 --- a/helpers/plot.py +++ b/helpers/plot.py @@ -85,21 +85,19 @@ def power_plot(name, s, m, samples=None): plt.close('all') -def plot_prior_samples_2d(n_samps, - signal, - R, - correlated_field, - A, - likelihood, +def plot_prior_samples_2d(n_samps, signal, R, correlated_field, A, likelihood, N=None): samples, pspecmin, pspecmax = [], np.inf, 0 pspec = A*A for _ in range(n_samps): - ss = ift.from_random('normal', signal.domain) + ss = ift.from_random(signal.domain) samples.append(ss) foo = pspec.force(ss).val + print(pspecmin, pspecmax) pspecmin = min([min(foo), pspecmin]) pspecmax = max([max(foo), pspecmin]) + pspecmin /= 10 + pspecmax *= 10 fig, ax = plt.subplots(nrows=n_samps, ncols=5, figsize=(2*5, 2*n_samps)) for ii, sample in enumerate(samples): @@ -108,24 +106,23 @@ def plot_prior_samples_2d(n_samps, sg = signal(sample) sr = (R.adjoint @ R @ signal)(sample) if likelihood == 'gauss': - data = signal_response(sample) + N.draw_sample() + data = signal_response(sample) + N.draw_sample_with_dtype(np.float64) elif likelihood == 'poisson': rate = signal_response(sample).val - data = ift.makeField(signal_response.target, - np.random.poisson(rate)) + data = ift.makeField(signal_response.target, np.random.poisson(rate)) elif likelihood == 'bernoulli': rate = signal_response(sample).val data = ift.makeField(signal_response.target, - np.random.binomial(1, rate)) + np.random.binomial(1, rate)) else: raise ValueError('likelihood type not implemented') data = R.adjoint(data + 0.) As = pspec.force(sample) ax[ii, 0].plot(As.domain[0].k_lengths, As.val) - ax[ii, 0].set_ylim(pspecmin, pspecmax) ax[ii, 0].set_yscale('log') ax[ii, 0].set_xscale('log') + ax[ii, 0].set_ylim(pspecmin, pspecmax) ax[ii, 0].get_xaxis().set_visible(False) ax[ii, 1].imshow(cf.val, aspect='auto') diff --git a/teaser_critical_filter.py b/teaser_critical_filter.py index 0f04903c7639d4ab158b8ee8ddb93d526fabd456..6cdad8cdd2ff570a88e2da4d064456d346989d88 100644 --- a/teaser_critical_filter.py +++ b/teaser_critical_filter.py @@ -20,8 +20,6 @@ import numpy as np import nifty7 as ift from helpers import plot_WF, power_plot, generate_mysterious_data -np.random.seed(42) - position_space = ift.RGSpace(256) harmonic_space = position_space.get_default_codomain()