Commit 1c5f5014 authored by Philipp Frank's avatar Philipp Frank
Browse files

new version

parent fbf41a69
......@@ -12,44 +12,188 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019-2020 Max-Planck-Society
# Author: Philipp Arras, Philipp Frank, Philipp Haim, Reimar Leike,
# Jakob Knollmueller
import numpy as np
import nifty6 as ift
import src as vlbi
# MPI information
comm, nranks, rank, master = ift.utilities.get_MPI_params()
ift.fft.set_nthreads(1)
npix = 128
dt = 6 # hours
fov = 256*vlbi.MUAS2RAD # RAD
np.seterr(all='raise', under='warn')
if master:
print('Numpy mode:', np.geterr())
doms = ift.RGSpace(2*(npix,), 2*(fov/npix,))
npixt = 7*24//dt
oversampling_factor = 2
nthreads = 1
ift.fft.set_nthreads(nthreads)
eps = 1e-7
radius_fitting_init = {'m87': [5, 0, 1, 15], # cx, cy, step, dist
'crescent': [10, -25, 1, 15],
'ehtcrescent': [0, 0, 1, 15]}
radius_fitting_dtheta, radius_fitting_sample_r = 1, 0.5*vlbi.MUAS2RAD
# vlbi.set_SNR_cutoff(0.2)
npix = 256
dt = 6 # hours
# npix = 128
# dt = 24 # hours
startt = 0
npixt = 7*24//dt
fov = 256*vlbi.MUAS2RAD
min_timestamps_per_bin = 12 # how many different time stamps should be at least in a time bin
npix = int(npix)
doms = ift.RGSpace(2*(npix,), 2*(fov/npix,))
zm = 0, 0.2, 0.1, ''
spaceparams = {
'target_subdomain': doms,
'fluctuations_mean': 1.5,
'fluctuations_stddev': 1.,
'loglogavgslope_mean': -1.5,
'loglogavgslope_stddev': 0.5,
'flexibility_mean': 0.01,
'flexibility_stddev': 0.001,
'asperity_mean': 0.0001,
'asperity_stddev': 0.00001,
'prefix': 'space'
}
timeparams = {
'fluctuations_mean': 0.2,
'fluctuations_stddev': 1.,
'loglogavgslope_mean': -4,
'loglogavgslope_stddev': 0.5,
'flexibility_mean': 0.01,
'flexibility_stddev': 0.001,
'asperity_mean': 0.001,
'asperity_stddev': 0.0001,
'prefix': 'time'
}
zm_single = 0, 1, 0.3, ''
spaceparams_single = {
'target_subdomain': doms,
'fluctuations_mean': 0.75,
'fluctuations_stddev': 0.3,
'flexibility_mean': 0.001,
'flexibility_stddev': 0.0001,
'asperity_mean': 0.0001,
'asperity_stddev': 0.00001,
'loglogavgslope_mean': -1.5,
'loglogavgslope_stddev': 0.5,
'prefix': 'space'
}
cfm_single = ift.CorrelatedFieldMaker.make(*zm_single)
cfm_single.add_fluctuations(**spaceparams_single)
logsky = cfm_single.finalize(prior_info=0)
sky_single = vlbi.normalize(doms) @ logsky.exp()
def smoothed_sky_model(sigma):
cfm_single = ift.CorrelatedFieldMaker.make(*zm_single)
cfm_single.add_fluctuations(**spaceparams_single)
logsky = cfm_single.finalize(prior_info=0)
SMO = ift.HarmonicSmoothingOperator(logsky.target,sigma*vlbi.MUAS2RAD)
sky_single = vlbi.normalize(doms) @ SMO @ (SMO @ logsky).exp()
return sky_single
def excitation_smoother(sigma,sigma_old, xi):
codomain = xi.domain[0]
kernel = codomain.get_k_length_array()
old_sig = codomain.get_fft_smoothing_kernel_function(sigma_old*vlbi.MUAS2RAD)
new_sig = codomain.get_fft_smoothing_kernel_function(sigma*vlbi.MUAS2RAD)
weights = old_sig(kernel).clip(1e-200,1e300)/new_sig(kernel).clip(1e-200,1e300)
return weights * xi
def smoothed_movie_model(sigma_x,sigma_t):
domt = ift.RGSpace(npixt, dt)
dom = ift.makeDomain((domt, doms))
domt_zeropadded = ift.RGSpace(int(oversampling_factor * npixt), dt)
cfm = ift.CorrelatedFieldMaker.make(*zm)
cfm.add_fluctuations(domt_zeropadded, **timeparams)
cfm.add_fluctuations(**spaceparams)
logsky = cfm.finalize(prior_info=0)
FA_lo = ift.FieldAdapter(logsky.target, 'lo')
FA_hi = ift.FieldAdapter(logsky.target, 'hi')
xi_hi = ift.FieldAdapter(logsky.domain['xi'], 'xi_hi')
id_hi = ift.FieldAdapter(logsky.domain['xi'], 'xi').adjoint @ xi_hi
xi_lo = ift.FieldAdapter(logsky.domain['xi'], 'xi_lo')
id_lo = ift.FieldAdapter(logsky.domain['xi'], 'xi').adjoint @ xi_lo
expected_difference = 1e-2
logsky_1 = (FA_hi.adjoint @ logsky).partial_insert(id_hi)
logsky_2 = (FA_lo.adjoint
@ logsky).partial_insert(id_lo).scale(expected_difference)
ls_lo = (FA_hi + FA_lo)
ls_hi = (FA_hi - FA_lo)
t_SMO = ift.HarmonicSmoothingOperator(ls_lo.target,sigma_t,space=0)
x_SMO = ift.HarmonicSmoothingOperator(ls_lo.target,sigma_x*vlbi.MUAS2RAD,space=1)
SMO = t_SMO @ x_SMO
logsky_lo_smo = SMO @ ls_lo
logsky_hi_smo = SMO @ ls_hi
sky_lo_smo = SMO @ logsky_lo_smo.exp()
sky_hi_smo = SMO @ logsky_hi_smo.exp()
sky_lo = FA_lo.adjoint @ sky_lo_smo
sky_hi = FA_hi.adjoint @ sky_hi_smo
sky_mf = (sky_hi + sky_lo) @ (logsky_1 + logsky_2)
smooth_movie = vlbi.normalize(logsky_mf.target) @ sky_mf
return smooth_movie
def movie_excitation_smoother(sigma_x, sigma_t, sigma_x_old, sigma_t_old, xi_hi,xi_lo):
def get_kernel(sigma_x, sigma_t,domain):
def get_single_kernel(domain, sigma):
k = domain.get_k_length_array()
kernel = domain.get_fft_smoothing_kernel_function(sigma)
sig = kernel(k)
return sig
sig_t = get_single_kernel(domain[0],sigma_t).val
sig_x = get_single_kernel(domain[1],sigma_x*vlbi.MUAS2RAD).val
sig = np.outer(sig_t, sig_x)
sig = sig.reshape(sig_t.shape + sig_x.shape )
sig = ift.makeField(domain, sig)
return sig
sig_old = get_kernel(sigma_x_old, sigma_t_old, xi_hi.domain)
sig_new = get_kernel(sigma_x, sigma_t, xi_hi.domain)
weights = sig_old.clip(1e-200,1e300)/sig_new.clip(1e-200,1e300)
return xi_hi * weights, xi_lo * weights
pspec_single = cfm_single.amplitude
domt = ift.RGSpace(npixt, dt)
dom = ift.makeDomain((domt, doms))
domt_zeropadded = ift.RGSpace(int(oversampling_factor*npixt), dt)
cfm = ift.CorrelatedFieldMaker.make(0.2, 1e-1, '')
cfm.add_fluctuations(ift.RGSpace(int(2*npixt), dt),
fluctuations_mean=.2,
fluctuations_stddev=1.,
flexibility_mean=0.1,
flexibility_stddev=0.001,
asperity_mean=0.01,
asperity_stddev=0.001,
loglogavgslope_mean=-4,
loglogavgslope_stddev=0.5,
prefix='time')
cfm.add_fluctuations(doms,
fluctuations_mean=cfm.moment_slice_to_average(1.5),
fluctuations_stddev=1.,
flexibility_mean=0.3,
flexibility_stddev=0.001,
asperity_mean=0.01,
asperity_stddev=0.001,
loglogavgslope_mean=-1.5,
loglogavgslope_stddev=0.5,
prefix='space')
cfm = ift.CorrelatedFieldMaker.make(*zm)
cfm.add_fluctuations(domt_zeropadded, **timeparams)
cfm.add_fluctuations(**spaceparams)
logsky = cfm.finalize(prior_info=0)
FA_lo = ift.FieldAdapter(logsky.target, 'lo')
......@@ -59,8 +203,9 @@ id_hi = ift.FieldAdapter(logsky.domain['xi'], 'xi').adjoint @ xi_hi
xi_lo = ift.FieldAdapter(logsky.domain['xi'], 'xi_lo')
id_lo = ift.FieldAdapter(logsky.domain['xi'], 'xi').adjoint @ xi_lo
expected_difference = 1e-2
logsky_1 = (FA_hi.adjoint @ logsky).partial_insert(id_hi)
logsky_2 = (FA_lo.adjoint @ logsky).partial_insert(id_lo).scale(0.01)
logsky_2 = (FA_lo.adjoint @ logsky).partial_insert(id_lo).scale(expected_difference)
logsky_lo = FA_lo.adjoint @ (FA_hi + FA_lo)
logsky_hi = FA_hi.adjoint @ (FA_hi - FA_lo)
logsky_mf = (logsky_hi + logsky_lo) @ (logsky_1 + logsky_2)
......
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2020 Max-Planck-Society
# Author: Philipp Arras
import matplotlib.pyplot as plt
import numpy as np
import nifty6 as ift
import src as vlbi
from config import comm, rank, master
from config import oversampling_factor as ofac
from config import sky_movie_mf
from config import sky_movie_mf as full_sky
def main():
# Prior plots
if master:
with ift.random.Context(31):
p = ift.Plot()
n = 5
for _ in range(5): # FIXME: should this be 'range(n)'?
pos = ift.from_random(sky_movie_mf.domain, 'normal')
ss = sky_movie_mf(pos)['hi']
mi, ma = 0, np.max(ss.val)
for ii in range(0, ss.shape[0], 4):
extr = ift.DomainTupleFieldInserter(ss.domain, 0, (ii,)).adjoint
p.add(extr(ss), zmin=mi, zmax=ma)
p.output(name=f'prior_samples.png', ny=n, xsize=28, ysize=9)
print('Start inf check')
for ii in range(20):
pp = ift.from_random(sky_movie_mf.domain, 'normal') #* 2.5
sky_movie_mf(pp)
sky = full_sky
fld = vlbi.gaussian_profile(sky.target['hi'][1], 30 *vlbi.MUAS2RAD)
fld = ift.ContractionOperator(sky.target['hi'], 0).adjoint(fld).val
multi = ift.makeField(sky.target, {'lo': fld, 'hi': fld})
output = vlbi.normalize(multi.domain)(multi)
cov = 1e-3*max([vv.max() for vv in output.val.values()])**2
dtype = list(set([ff.dtype for ff in output.values()]))
if len(dtype) != 1:
raise ValueError('Only MultiFields with one dtype supported.')
dtype = dtype[0]
invcov = ift.ScalingOperator(output.domain, cov).inverse
d = output + invcov.draw_sample_with_dtype(dtype, from_inverse=True)
lh = ift.GaussianEnergy(d, invcov) @ sky
H = ift.StandardHamiltonian(
lh, ic_samp=ift.AbsDeltaEnergyController(0.5, iteration_limit=100, convergence_level=2, name=f'CG(task {rank})'))
pos = 0.1*ift.from_random(sky.domain, 'normal')
cst = ('spaceasperity', 'spaceflexibility', 'spacefluctuations', 'spaceloglogavgslope', 'spacespectrum', 'timeasperity', 'timeflexibility', 'timefluctuations', 'timeloglogavgslope', 'timespectrum')
minimizer = ift.NewtonCG(ift.GradientNormController(iteration_limit=10, name='findpos' if master else None))
n = 2
for ii in range(n):
if master:
ift.logger.info(f'Start iteration {ii+1}/{n}')
kl = ift.MetricGaussianKL(pos, H, 2, comm=comm, mirror_samples=True, constants=cst, point_estimates=cst)
kl, _ = minimizer(kl)
pos = kl.position
if master:
movie = vlbi.freq_avg(vlbi.strip_oversampling(sky(pos), ofac)).val
ma = np.max(movie)
fig, axs = plt.subplots(5, 6, figsize=(12, 12))
axs = axs.ravel()
for ii in range(movie.shape[0]):
axx = axs[ii]
axx.imshow(movie[ii].T, vmin=0, vmax=ma)
axx.set_title(f'Frame {ii}')
fig.savefig(f'movie_start.png')
plt.close()
if master:
vlbi.save_hdf5('initial.h5', pos)
with open("initial_random_state.txt", "wb") as f:
f.write(ift.random.getState())
if __name__ == '__main__':
# Change seed for the whole reconstruction here
# ift.random.push_sseq_from_seed(42)
main()
......@@ -12,89 +12,198 @@
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019-2020 Max-Planck-Society
# Author: Philipp Arras, Philipp Frank, Philipp Haim, Reimar Leike,
# Jakob Knollmueller
# Author: Philipp Arras, Philipp Haim, Reimar Leike, Jakob Knollmueller
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import sys
import gc
from mpi4py import MPI
from functools import reduce
from operator import add
import numpy as np
from time import time
import nifty6 as ift
import src as vlbi
from config import doms, dt, npixt
from config import comm, nranks, rank, master
from config import doms, dt, eps, min_timestamps_per_bin, npixt, nthreads
from config import sky_movie_mf as sky
from config import startt
if __name__ == '__main__':
np.seterr(all='raise')
np.random.seed(42)
def stat_plotting(pos, KL):
if master:
sc = ift.StatCalculator()
sc_mean = ift.StatCalculator()
sc_spectral = ift.StatCalculator()
skysamples = {'hi': [], 'lo': []}
# CAUTION: the loop and the if-clause cannot be interchanged!
for ss in KL.samples:
if master:
samp = sky(pos+ss)
skysamples['hi'].append(samp['hi'])
skysamples['lo'].append(samp['lo'])
sc.add(samp)
sc_mean.add(0.5*(samp['hi']+samp['lo']).val)
sc_spectral.add(2 * (samp['lo']-samp['hi']).val / (samp['lo']+samp['hi']).val )
def optimization_heuristic(ii, lh_full, lh_amp, lh_ph, ind_full, ind_amp, ind_ph):
cut = 2 if dt == 24 else 6
N_samples = 10 * (1 + ii // 8)
N_iterations = 4 * (4 + ii // 4) if ii<50 else 20
eps = 0.1
clvl = 3 if ii < 20 else 2
ic_newton = ift.AbsDeltaEnergyController(eps, iteration_limit=N_iterations,
name=f'it_{ii}' if master else None,
convergence_level=clvl)
if ii < 50:
minimizer = ift.VL_BFGS(ic_newton)
else:
minimizer = ift.NewtonCG(ic_newton)
if ii < 30:
lh = []
active_inds = []
if ii % 2 == 0 or ii < 10:
for key in ind_full.keys():
if int(key[0])<cut and key[1] == '_': #FIXME: dirty fix to select
# the data of the first two days
active_inds.append(key)
lh.append(ind_full[key])
elif ii % 4 == 1:
for key in ind_amp.keys():
if int(key[0])<cut and key[1] == '_': #FIXME
active_inds.append(key)
lh.append(ind_amp[key])
else:
for key in ind_ph.keys():
if int(key[0])<cut and key[1] == '_': #FIXME
active_inds.append(key)
lh.append(ind_ph[key])
pre = sys.argv[1]
if pre not in ['crescent', 'disk', 'blobs']:
conv = vlbi.DomainTuple2MultiField(sky.target, active_inds)
lh = reduce(add, lh) @ conv
else:
if ii % 2 == 0 or ii > 50:
lh = lh_full
elif ii % 4 == 1:
lh = lh_amp
else:
lh = lh_ph
return minimizer, N_samples, N_iterations, lh
def setup():
if len(sys.argv) != 3:
raise RuntimeError
path = f'data/{pre}'
_, pre_data, fname_input = sys.argv
pre_output = pre_data
ndata = {}
lh_full = []
lh_amp = []
lh_ph = []
ind_full = {}
ind_amp = {}
ind_ph = {}
active_inds = []
# Build data model
lh, active_inds = [], []
for freq in vlbi.data.FREQS:
rawd = vlbi.combined_data(path, [freq], identify_short_baselines=['APAA', 'SMJC'])
for ii, dd in enumerate(vlbi.time_binning(rawd, dt, startt, npixt*dt)):
rawd = vlbi.combined_data(f'data/{pre_data}', [freq], min_timestamps_per_bin) if master else None
if nranks > 1:
rawd = comm.bcast(rawd)
ndata[freq] = []
args = {'tmin': startt, 'tmax': npixt*dt, 'delta_t': dt}
for ii, dd in enumerate(vlbi.time_binning(rawd, **args)):
if len(dd) == 0:
ndata[freq] += [0, ]
continue
ind = str(ii) + "_" + freq
active_inds.append(ind)
v2ph, icovph = vlbi.Visibilities2ClosurePhases(dd)
v2ampl, icovampl = vlbi.Visibilities2ClosureAmplitudes(dd)
R = vlbi.RadioResponse(doms, dd['uv'], 1e-7).ducktape(ind)
vis = ift.makeField(R.target, dd['vis'])
ll_ph = ift.GaussianEnergy(v2ph(vis), icovph) @ v2ph
ll_ampl = ift.GaussianEnergy(v2ampl(vis), icovampl) @ v2ampl
lh.append((ll_ph + ll_ampl) @ R)
lh = reduce(add, lh) @ vlbi.DomainTuple2MultiField(sky.target, active_inds)
pb = vlbi.gaussian_profile(sky.target['hi'][1], 50*vlbi.MUAS2RAD)
pb = ift.ContractionOperator(sky.target['hi'], 0).adjoint(pb)
pb = ift.makeOp(ift.MultiField.from_dict({'hi': pb, 'lo': pb}))
# Plot prior samples
for ii in range(4):
pp = ift.from_random('normal', sky.domain)
vlbi.save_state(sky, pp, pre, f'prior{ii}', [])
# Minimization
pos = 0.1*ift.from_random('normal', sky.domain)
vlbi.save_state(sky, pos, pre, 'initial', [])
for ii in range(30):
if ii < 10:
N_samples = 1
N_iterations = 15
elif ii < 20:
N_samples = 2
N_iterations = 15
elif ii < 25:
N_samples = 3
N_iterations = 15
elif ii < 28:
N_samples = 10
N_iterations = 20
else:
N_samples = 20
N_iterations = 30
print(f'Iter: {ii}, N_samples: {N_samples}, N_iter: {N_iterations}')
cstkeys = set(pos.domain.keys()) - set(['xi_lo', 'xi_hi'])
cst = cstkeys if ii < 20 else []
H = ift.StandardHamiltonian(lh @ pb @ sky if ii < 15 else lh @ sky,
ift.GradientNormController(iteration_limit=40))
KL = ift.MetricGaussianKL(pos, H, N_samples, mirror_samples=True,
lh_sampling_dtype=np.complex128,
point_estimates=cst, constants=cst)
dct = {'deltaE': 0.1, 'iteration_limit': N_iterations,
'name': f'it_{ii}', 'convergence_level': 2}
minimizer = ift.NewtonCG(ift.AbsDeltaEnergyController(**dct))
vis2closph, evalsph, _ = vlbi.Visibilities2ClosurePhases(dd)
vis2closampl, evalsampl = vlbi.Visibilities2ClosureAmplitudes(dd)
nfft = vlbi.RadioResponse(doms, dd['uv'], eps, nthreads)
vis = ift.makeField(nfft.target, dd['vis'])
ndata[freq] += [vis2closph.target.size + vis2closampl.target.size,]
lhph = ift.GaussianEnergy(mean=vis2closph(vis)) @ vis2closph
lhamp = ift.GaussianEnergy(mean=vis2closampl(vis)) @ vis2closampl
llh_full = reduce(add, [lhamp, lhph]) @ nfft.ducktape(ind)
lh_full.append(llh_full)
llh_amp = reduce(add, [lhamp]) @ nfft.ducktape(ind)
lh_amp.append(llh_amp)
llh_ph = reduce(add, [lhph]) @ nfft.ducktape(ind)
lh_ph.append(llh_ph)
ind_ph[ind] = llh_ph
ind_amp[ind] = llh_amp
ind_full[ind] = llh_full
foo = reduce(add, [lhph, lhamp]) @ nfft.ducktape(ind)
ift.extra.check_jacobian_consistency(foo, ift.from_random(foo.domain),
tol=1e-5, ntries=10)
conv = vlbi.DomainTuple2MultiField(sky.target, active_inds)
lh_full = reduce(add, lh_full) @ conv
lh_amp = reduce(add, lh_amp) @ conv
lh_ph = reduce(add, lh_ph) @ conv
pos = vlbi.load_hdf5(fname_input, sky.domain) if master else None
if nranks > 1:
pos = comm.bcast(pos)
ic = ift.AbsDeltaEnergyController(0.5, iteration_limit=200, name=f'Sampling(task {rank})', convergence_level=3)
if master:
t0 = time()
(lh_full @ sky)(pos)
print(f'Likelihood call: {1000*(time()-t0):.0f} ms')
return pos, lh_full, lh_amp, lh_ph, sky, ic, pre_output, ind_full, ind_amp, ind_ph
# Encapsulate everything in functions, to avoid as many (unintended) global variables as possible
def main():
# These following lines are to verify the scan averaging
with open("time_averaging.txt", 'w') as f:
# delete the file such that new lines can be appended
f.write("min max avg med\n")
pos, lh_full, lh_amp, lh_ph, sky, ic, pre_output, ind_full, ind_amp, ind_ph = setup()
for ii in range(60):
gc.collect()
minimizer, N_samples, N_iterations, lh = optimization_heuristic(
ii, lh_full, lh_amp, lh_ph,ind_full, ind_amp, ind_ph)
if master:
print(f'Iter: {ii}, N_samples: {N_samples}, N_iter: {N_iterations}')
ll = lh @ sky
H = ift.StandardHamiltonian(ll, ic)
KL = ift.MetricGaussianKL(pos, H, N_samples, comm=comm, mirror_samples=True)
del ll, H
gc.collect()
KL, _ = minimizer(KL)
pos = KL.position
vlbi.save_state(sky, pos, pre, ii, KL.samples)
vlbi.save_state(sky, KL.position, f'{pre_output}_', ii, samples=KL.samples, master=master)
del KL
gc.collect()
if __name__ == '__main__':
with open("initial_random_state.txt", "rb") as f:
ift.random.setState(f.read())
main()
from .closure import Visibilities2ClosureAmplitudes, Visibilities2ClosurePhases
from .closure import Visibilities2ClosureAmplitudes, Visibilities2ClosurePhases, set_SNR_cutoff
from .constants import *
from .data import combined_data, read_data, read_data_raw, time_binning
#from .fitting import radius_fitting
#from .plotting import *
#from .validation_models import *