Commit e2a561df authored by Philipp Arras's avatar Philipp Arras
Browse files

Initial commit

parents
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This diff is collapsed.
This diff is collapsed.
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019-2020 Max-Planck-Society
# Author: Philipp Arras, Philipp Frank, Philipp Haim, Reimar Leike,
# Jakob Knollmueller
import os
os.environ["MKL_NUM_THREADS"] = "1"
os.environ["OMP_NUM_THREADS"] = "1"
import sys
from functools import reduce
from operator import add
from time import time
import numpy as np
import nifty6 as ift
import util as vres
from config import doms, dt, eps, npixt, nthreads
from config import sky_movie_mf as sky
from config import startt
if __name__ == '__main__':
np.seterr(all='raise')
np.random.seed(42)
pre = sys.argv[1]
if pre not in ['crescent', 'disk', 'blobs']:
raise RuntimeError
path = f'data/{pre}'
lh = []
active_inds = []
for freq in vres.data.FREQS:
rawd = vres.combined_data(path, [freq], identify_short_baselines=['APAA', 'SMJC'])
args = {'tmin': startt, 'tmax': npixt*dt, 'delta_t': dt}
for ii, dd in enumerate(vres.time_binning(rawd, **args)):
if len(dd) == 0:
continue
ind = str(ii) + "_" + freq
active_inds.append(ind)
vis2closph, invcov_ph = vres.Visibilities2ClosurePhases(dd)
vis2closampl, invcov_ampl = vres.Visibilities2ClosureAmplitudes(dd)
nfft = vres.RadioResponse(doms, dd['uv'], eps, nthreads)
vis = ift.makeField(nfft.target, dd['vis'])
ll = ift.GaussianEnergy(mean=vis2closph(vis), inverse_covariance=invcov_ph) @ vis2closph
ll = ll + ift.GaussianEnergy(mean=vis2closampl(vis), inverse_covariance=invcov_ampl) @ vis2closampl
lh.append(ll @ nfft.ducktape(ind))
conv = vres.DomainTuple2MultiField(sky.target, active_inds)
lh = reduce(add, lh) @ conv
for ii in range(4):
pp = ift.from_random('normal', sky.domain)
vres.save_state(sky, pp, pre, f'prior{ii}', samples=[0*pp, 0*pp])
pos = 0.1*ift.from_random('normal', sky.domain)
vres.save_state(sky, pos, pre, 'initial', samples=[0*pos, 0*pos])
ic = ift.GradientNormController(iteration_limit=40, name='Sampling')
pb = vres.gaussian_profile(sky.target['hi'][1], 50*vres.MUAS2RAD)
pb = ift.ContractionOperator(sky.target['hi'], 0).adjoint(pb)
pb = ift.MultiField.from_dict({'hi': pb, 'lo': pb})
t0 = time()
(lh @ ift.makeOp(pb) @ sky)(pos)
print(f'Likelihood call: {1000*(time()-t0):.0f} ms')
for ii in range(30):
if ii < 10:
N_samples = 1
N_iterations = 15
elif ii < 20:
N_samples = 2
N_iterations = 15
elif ii < 25:
N_samples = 3
N_iterations = 15
elif ii < 28:
N_samples = 10
N_iterations = 20
else:
N_samples = 20
N_iterations = 30
print(f'Iter: {ii}, N_samples: {N_samples}, N_iter: {N_iterations}')
if ii < 15:
print('Primary beam hack active')
ll = lh @ ift.makeOp(pb) @ sky
else:
print('No primary beam anymore')
ll = lh @ sky
cstkeys = ('spaceasperity', 'spaceflexibility', 'spacefluctuations', 'spaceloglogavgslope', 'spacespectrum', 'timeasperity', 'timeflexibility', 'timefluctuations', 'timeloglogavgslope', 'timespectrum', 'zeromode')
pe = cstkeys if ii < 20 else []
cst = cstkeys if ii < 20 else []
H = ift.StandardHamiltonian(ll, ic)
KL = ift.MetricGaussianKL(pos, H, N_samples, mirror_samples=True,
lh_sampling_dtype=np.complex128, point_estimates=pe, constants=cst)
ic_newton = ift.AbsDeltaEnergyController(0.1,
iteration_limit=N_iterations,
name=f'it_{ii}',
convergence_level=2)
minimizer = ift.NewtonCG(ic_newton)
KL, _ = minimizer(KL)
pos = KL.position
vres.save_state(sky, pos, pre, ii, samples=KL.samples)
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019 Max-Planck-Society
from collections import defaultdict
from itertools import product
import numpy as np
import pytest
import nifty6 as ift
import util as eht
pmp = pytest.mark.parametrize
bools = [False, True]
# cb = [[], ['APAA'], ['SMJC'], ['APAA', 'SMJC']]
cb = [['APAA', 'SMJC']]
d3 = {
'uv': np.random.randn(11, 2)*1e7,
'vis': np.random.randn(11) + 1j*np.random.rand(11),
'sigma': np.random.randn(11)**2,
'freq': 1e11,
'time': np.array(5*[7.24027777] + 6*[7.4236114]),
'ant1': np.array([0, 0, 0, 1, 1, 0, 0, 0, 1, 1, 2]),
'ant2': np.array([1, 2, 3, 2, 3, 1, 2, 3, 2, 3, 3])
}
ds = [
eht.read_data(dd, ff, o2)
for dd, ff, o2 in product(eht.DAYS, eht.FREQS, cb)
] + [eht.combined_data(eht.FREQS, identify_short_baselines=o2)
for o2 in cb] + [d3]
seeds = [189, 4938]
dtypes = [np.complex128, np.float64]
def AntennaBasedCalibration(tspace, time, ant1, ant2, amplkey, phkey):
assert time.shape == ant1.shape
assert time.shape == ant2.shape
assert len(time.shape) == 1
antspace = ift.UnstructuredDomain(len(set(ant1) | set(ant2)))
dom = ift.makeDomain((antspace, tspace))
ddct = defaultdict(lambda: len(ddct))
ant1 = np.array([ddct[ele] for ele in ant1])
ant2 = np.array([ddct[ele] for ele in ant2])
dtr1 = CalibrationDistributor(dom, ant1, time)
dtr2 = CalibrationDistributor(dom, ant2, time)
logampl = (dtr1 + dtr2).ducktape(amplkey)
logph = 1j*(dtr1 - dtr2).ducktape(phkey)
return (logampl + logph).exp()
def CalibrationDistributor(domain, ant, time):
assert len(time.shape) == 1
assert ant.shape == time.shape
assert time.min() >= 0
assert time.max() < domain[1].total_volume
assert np.max(ant) < domain[0].shape[0]
assert np.min(ant) >= 0
dd = [ift.RGSpace(domain[0].shape, distances=1.), domain[1]]
dd = ift.DomainTuple.make(dd)
positions = np.array([ant, time])
li = ift.LinearInterpolator(dd, positions)
return li @ ift.Realizer(li.domain) @ ift.GeometryRemover(dd, 0).adjoint
@pmp('n', range(3, 40))
def test_visibility_closure_matrices(n):
np.random.seed(42)
Phi = eht.closure.visibility_design_matrix(n)
Psi = eht.closure.closure_phase_design_matrix(n)
np.testing.assert_allclose(Psi @ Phi, 0)
@pmp('seed', seeds)
@pmp('shape', [(3,4), (10,20)])
def test_normalize(seed, shape):
np.random.seed(seed)
sp = ift.RGSpace(shape, distances=np.exp(np.random.randn(len(shape))))
s = ift.from_random('normal', sp).exp()
s_normalized = eht.normalize(s.domain)(s)
np.testing.assert_allclose(s_normalized.integrate(), 1.)
@pmp('seed', seeds)
@pmp('d', ds)
@pmp('mode', ['ph', 'ampl'])
def test_closure_consistency(seed, d, mode):
np.random.seed(seed)
f = eht.Visibilities2ClosureAmplitudes
if mode == 'ph':
f = eht.Visibilities2ClosurePhases
op = f(d)[0]
pos = ift.from_random('normal', op.domain, dtype=np.complex128)
ift.extra.check_jacobian_consistency(op, pos)
@pmp('seed', seeds)
@pmp('shape', [(3,4), (10,20)])
def test_sparse_cov(seed, shape):
np.random.seed(seed)
matrix = np.random.randn(*shape)
sigma_sq = np.exp(np.random.randn(shape[1]))
d, r, c = eht.closure.sparse_cov(matrix, sigma_sq)
mat = np.zeros((shape[0],)*2)
mat[(r,c)] = d
np.testing.assert_allclose(mat.T @ mat @ matrix @ np.diag(sigma_sq) @ matrix.T, np.eye(shape[0]), rtol = 1e-7, atol=1e-7)
@pmp('seed', seeds)
@pmp('dd', ds)
@pmp('corrupt_phase', [False, True])
@pmp('corrupt_ampl', [False, True])
def test_closure_property(seed, dd, corrupt_phase, corrupt_ampl):
np.random.seed(seed)
CP, _ = eht.Visibilities2ClosurePhases(dd)
CA, _ = eht.Visibilities2ClosureAmplitudes(dd)
vis = ift.makeField(CP.domain, dd['vis'].copy())
resph0 = CP(vis).val
resam0 = CA(vis).val
corruptedvis = dd['vis'].copy()
nants = len(set(dd['ant1']) | set(dd['ant2']))
for tt in np.unique(dd['time']):
corruption = np.ones(nants)
if corrupt_ampl:
corruption *= np.abs(np.random.normal(size=nants))
if corrupt_phase:
corruption = corruption*np.exp(1j*np.random.normal(size=nants))
ind = tt == dd['time']
aa1 = dd['ant1'][ind]
aa2 = dd['ant2'][ind]
corruptedvis[ind] = corruptedvis[ind]*corruption[aa1]
corruptedvis[ind] = corruptedvis[ind]*corruption[aa2].conjugate()
vis = ift.makeField(CP.domain, corruptedvis)
np.testing.assert_allclose(resam0, CA(vis).val)
np.testing.assert_allclose(resph0, CP(vis).val)
@pmp('seed', seeds)
def test_abs_operator(seed):
np.random.seed(seed)
dom = ift.UnstructuredDomain(5)
op = eht.closure.ToUnitCircle(dom)
pos = ift.from_random('normal', op.domain)
op(pos)
# currently unfortunately the only way to test complex differentiability
build_complex_op = 1.j*ift.FieldAdapter(
op.domain, 'Im') + ift.FieldAdapter(op.domain, 'Re')
op = op @ build_complex_op
pos = ift.from_random('normal', op.domain)
ift.extra.check_jacobian_consistency(op, pos)
res = op(pos).val
np.testing.assert_allclose(np.abs(res), 1)
np.iscomplexobj(res)
np.testing.assert_(np.sum(np.abs(res.imag)) > 0)
@pmp('npix', [64, 98, 256])
@pmp('epsilon', [1e-3, 1e-4, 1e-5, 1e-6, 1e-7, 1e-8])
@pmp('dd', ds)
def test_nfft(npix, epsilon, dd):
np.random.seed(42)
fov = 200*eht.MUAS2RAD
dom = ift.RGSpace(2*(npix,), 2*(fov/npix,))
nfft = eht.RadioResponse(dom, dd['uv'], epsilon)
ift.extra.consistency_check(nfft,
domain_dtype=np.float64,
target_dtype=np.complex128,
only_r_linear=True)
def test_closure():
np.random.seed(42)
def vis2closure(d, ind):
ind = np.sort(ind)
i, j, k = ind
vis_ind = tuple(
np.where(np.logical_and(d['ant1'] == a, d['ant2'] == b) == 1)[0]
for a, b in ((i, j), (j, k), (i, k)))
phase = d['vis']/abs(d['vis'])
return phase[vis_ind[0]]*phase[vis_ind[1]]*phase[
vis_ind[2]].conjugate()
# Antenna setup
# 0 -- 3 -- 4
# | \/ |
# | /\ |
# 1 -- 2
ant1 = []
ant2 = []
for ii in range(4):
for jj in range(ii + 1, 4):
ant1.extend([ii])
ant2.extend([jj])
ant1.extend([3])
ant2.extend([4])
d = {}
d['time'] = np.array([
0,
]*len(ant1))
d['ant1'] = np.array(ant1)
d['ant2'] = np.array(ant2)
d['vis'] = np.random.random(len(ant1))*np.exp(
np.random.random(len(ant1))*2*np.pi*1j)
ww = 1 + np.arange(len(ant1))
d['sigma'] = abs(d['vis'])/np.sqrt(ww)
closure = np.empty(3, dtype=np.complex128)
closure[0] = vis2closure(d, [1, 2, 3])[0]
closure[1] = vis2closure(d, [0, 2, 3])[0]
closure[2] = vis2closure(d, [0, 1, 3])[0]
closure_op = eht.Visibilities2ClosurePhases(d)[0]
vis = ift.makeField(ift.UnstructuredDomain(len(ant1)), d['vis'])
closure_from_op = closure_op(vis).val
np.testing.assert_allclose(closure_from_op - closure, 0, atol=1e-10)
@pmp('d', ds)
@pmp('dt', [1/60, 1/6])
def test_calibration_dist(d, dt):
np.random.seed(42)
cph, _ = eht.Visibilities2ClosurePhases(d)
ca, _ = eht.Visibilities2ClosureAmplitudes(d)
time = np.array(d['time']) - min(d['time'])
npix = max(time)/dt + 1
tspace = ift.RGSpace(npix, distances=dt)
cal_op = AntennaBasedCalibration(tspace, time, d['ant1'], d['ant2'], 'a',
'p')
g0 = cal_op(ift.full(cal_op.domain, 0)).val
np.testing.assert_allclose(g0, np.ones_like(g0))
g = cal_op(ift.from_random('normal', cal_op.domain))
vis = ift.makeField(g.domain, d['vis'])
cal_vis = g*vis
np.testing.assert_(np.all(vis.val != cal_vis.val))
np.testing.assert_allclose(cph(vis).val, cph(cal_vis).val)
np.testing.assert_allclose(ca(vis).val, ca(cal_vis).val)
@pmp('ddtype', dtypes)
@pmp('tdtype', dtypes)
def test_DomainTupleMultiField(ddtype, tdtype):
np.random.seed(42)
dom = {'lo': ift.RGSpace(5, 3), 'hi': ift.RGSpace(5, 3)}
dom = ift.MultiDomain.make(dom)
active_inds = ["0_lo", "4_hi"]
foo = eht.DomainTuple2MultiField(dom, active_inds)
ift.extra.consistency_check(foo, domain_dtype=ddtype, target_dtype=tdtype)
from .closure import Visibilities2ClosureAmplitudes, Visibilities2ClosurePhases
from .constants import *
from .data import combined_data, read_data, read_data_raw, time_binning
from .response import RadioResponse
from .sugar import *
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019-2020 Max-Planck-Society
# Author: Philipp Arras, Philipp Frank, Philipp Haim, Reimar Leike,
# Jakob Knollmueller
from itertools import combinations
import numpy as np
from scipy.linalg import eigh
from scipy.sparse import coo_matrix
from scipy.sparse.linalg import aslinearoperator
import nifty6 as ift
from .sugar import baselines, binom, n_baselines
def Visibilities2ClosurePhases(d):
rows = []
cols0, cols1, cols2 = [], [], []
icov_rows, icov_cols, icov_data = [], [], []
offset_closure, offset_vis = 0, 0
for tt in np.unique(d['time']):
ind = tt == d['time']
aa1 = d['ant1'][ind]
aa2 = d['ant2'][ind]
ww = (d['sigma'][ind]/abs(d['vis'][ind]))**2
aa = set(aa1) | set(aa2)
if len(aa) < 3:
offset_vis += len(aa1)
continue
psi = closure_phase_design_matrix(len(aa))
ww, missing_inds = insert_missing_baselines_into_weights(aa1, aa2, ww)
clos_desgn_mat = psi @ np.diag(ww)
goodclosures = np.sum(clos_desgn_mat != 0, axis=1) == 3
clos_desgn_mat = clos_desgn_mat[goodclosures]
if clos_desgn_mat.shape[0] == 0:
offset_vis += len(aa1)
continue
clos_desgn_mat = nonredundant_closure_set(clos_desgn_mat)
ww[ww == 0] = np.nan
clos_desgn_mat = np.round(clos_desgn_mat @ np.diag(1/ww)).astype(
np.int)
for ii in missing_inds[::-1]:
clos_desgn_mat = np.delete(clos_desgn_mat, ii, axis=1)
ww = (d['sigma'][ind]/abs(d['vis'][ind]))**2
assert clos_desgn_mat.shape[1] == len(aa1)
for i in range(clos_desgn_mat.shape[0]):
row = clos_desgn_mat[i]
rows += [offset_closure + i]
cols0 += [np.where(row == -1)[0][0] + offset_vis]
cols1 += [np.where(row == 1)[0][0] + offset_vis]
cols2 += [np.where(row == 1)[0][1] + offset_vis]
tmp_dat, tmp_rows, tmp_cols = sparse_cov(clos_desgn_mat, ww)
icov_data += tmp_dat
icov_rows += [xx + offset_closure for xx in tmp_rows]
icov_cols += [xx + offset_closure for xx in tmp_cols]
offset_closure += clos_desgn_mat.shape[0]
offset_vis += clos_desgn_mat.shape[1]
assert len(d['vis']) == offset_vis
shp = (offset_closure, offset_vis)
term0 = SparseMatrixOperator((np.ones(len(rows)), (rows, cols0)), shp)
term1 = SparseMatrixOperator((np.ones(len(rows)), (rows, cols1)), shp)
term2 = SparseMatrixOperator((np.ones(len(rows)), (rows, cols2)), shp)
invcov = SparseMatrixOperator((icov_data, (icov_rows, icov_cols)),
(shp[0], shp[0]))
invcov = ift.SandwichOperator.make(invcov.adjoint)
return (term0.conjugate()*term1*term2) @ ToUnitCircle(term0.domain), invcov
def Visibilities2ClosureAmplitudes(d):
rows, cols, sign = [], [], []
icov_rows, icov_cols, icov_data = [], [], []
offset_closure, offset_vis = 0, 0
for tt in np.unique(d['time']):
ind = tt == d['time']
aa1 = d['ant1'][ind]
aa2 = d['ant2'][ind]
ww = (d['sigma'][ind]/abs(d['vis'][ind]))**2
aa = set(aa1) | set(aa2)
if len(aa) < 4:
offset_vis += len(aa1)
continue
psi = closure_amplitude_design_matrix(len(aa))
ww, missing_inds = insert_missing_baselines_into_weights(aa1, aa2, ww)
zero_baselines = np.zeros(len(ww), dtype=int)
jj = 0
for ii in range(len(zero_baselines)):
if ii in missing_inds:
continue
zero_baselines[ii] = np.linalg.norm(d['uv'][jj]) < 1e7
jj += 1
nontrivial = (np.abs(psi) @ zero_baselines) < 2
clos_desgn_mat = psi @ np.diag(ww)
goodclosures = np.sum(clos_desgn_mat != 0, axis=1) == 4
goodclosures = np.logical_and(goodclosures, nontrivial)
clos_desgn_mat = clos_desgn_mat[goodclosures]
if clos_desgn_mat.shape[0] == 0:
offset_vis += len(aa1)
continue
clos_desgn_mat = nonredundant_closure_set(clos_desgn_mat)
ww[ww == 0] = np.nan
clos_desgn_mat = np.round(clos_desgn_mat @ np.diag(1/ww)).astype(
np.int)
for ii in missing_inds[::-1]:
clos_desgn_mat = np.delete(clos_desgn_mat, ii, axis=1)
ww = (d['sigma'][ind]/abs(d['vis'][ind]))**2
for i in range(clos_desgn_mat.shape[0]):
row = clos_desgn_mat[i]
for c in np.where(row != 0)[0]:
rows += [i + offset_closure]
cols += [c + offset_vis]
sign += [row[c]]
tmp_dat, tmp_rows, tmp_cols = sparse_cov(clos_desgn_mat, ww)
icov_data += tmp_dat
icov_rows += [xx + offset_closure for xx in tmp_rows]
icov_cols += [xx + offset_closure for xx in tmp_cols]
offset_closure += clos_desgn_mat.shape[0]
offset_vis += clos_desgn_mat.shape[1]
smo = SparseMatrixOperator((sign, (rows, cols)),
(offset_closure, offset_vis))
invcov = SparseMatrixOperator((icov_data, (icov_rows, icov_cols)),
(offset_closure,)*2)
invcov = ift.SandwichOperator.make(invcov.adjoint)
inp = ift.ScalingOperator(ift.UnstructuredDomain(d['vis'].shape), 1.)
return smo @ ((inp*inp.conjugate()).real).log().scale(0.5), invcov
def visibility_design_matrix(n):
if n < 2:
raise ValueError
lst = range(n*(n - 1)//2)
x = np.zeros((n_baselines(n), n), dtype=int)
x[(lst, [ii for ii in range(n) for _ in range(ii + 1, n)])] = 1
x[(lst, [jj for ii in range(n) for jj in range(ii + 1, n)])] = -1
return x
def closure_phase_design_matrix(n):
if n < 3:
raise ValueError
x = np.zeros((binom(n, 3), n_baselines(n)), dtype=int)
n = n - 1
vdm = visibility_design_matrix(n)
nb = vdm.shape[0]
x[:nb, :n] = vdm
x[:nb, n:] = np.diag(np.ones(nb))
if nb > 1:
x[nb:, n:] = closure_phase_design_matrix(n)
return x
def closure_amplitude_design_matrix(n):
if n < 4:
raise ValueError
m = 3
block = np.zeros((m, 6), dtype=int)
block[([0, 0, 1, 1, 2, 2], [0, 5, 0, 5, 2, 3])] = 1
block[([0, 0, 1, 1, 2, 2], [1, 4, 2, 3, 1, 4])] = -1
bl = baselines(range(n))
x = np.zeros((m*binom(n, 4), n_baselines(n)))
for ii, ants in enumerate(combinations(range(n), 4)):
inds = [bl.index(bb) for bb in baselines(ants)]
x[ii*m:(ii + 1)*m, inds] = block
return x
def insert_missing_baselines_into_weights(ants1, ants2, weights):
missing_inds = []
aa = set(ants1) | set(ants2)
if n_baselines(len(aa)) != len(weights):
baselines = list(zip(ants1, ants2))
ants = np.sort(list(aa))
counter, missing_inds = 0, []
for ii, xx in enumerate(ants):
for yy in ants[ii + 1:]:
if (xx, yy) not