diff --git a/demo/mf_imaging.py b/demo/mf_imaging.py deleted file mode 100644 index ae0cf28adf6dd2c6f721c1bdc5a551e2e9276a91..0000000000000000000000000000000000000000 --- a/demo/mf_imaging.py +++ /dev/null @@ -1,219 +0,0 @@ -# SPDX-License-Identifier: GPL-3.0-or-later -# Copyright(C) 2019-2020 Max-Planck-Society -# Author: Philipp Arras - -import argparse - -import matplotlib.pyplot as plt -import numpy as np - -import nifty7 as ift -import resolve as rve - -# FIXME Add damping to Wiener integration -# FIXME cumsum over first dimensions may be performance-wise suboptimal - - -def mf_logsky(domain, freq, prefix, plotter): - assert np.all(np.diff(freq) > 0) # need ordered frequencies - nfreq = len(freq) - freqdom = rve.IRGSpace(freq) - assert freqdom.size == nfreq - - # FIXME Figure out why the values are so freaking big/small - flexibility, asperity = (1e-11, 1e-14), (1e14, 1e14) - - a0 = ift.SimpleCorrelatedField( - domain, - 21, - (1, 0.1), - (5, 1), - (1.2, 0.4), - (0.2, 0.2), - (-2, 0.5), - prefix=f"{prefix}a0", - ) - b0 = ift.SimpleCorrelatedField( - domain, - 0, - (1e-7, 1e-7), - (1e-7, 1e-7), - (1.2, 0.4), - (0.2, 0.2), - (-2, 0.5), - prefix=f"{prefix}b0", - ) - # FIXME Is there a bug in the b0 handling? - b0 = ift.ScalingOperator(domain, 0.0).ducktape(f"{prefix}b0") - plotter.add("a0", a0) - plotter.add("b0", b0) - - # IDEA Try to use individual power spectra - # FIXME Support fixed variance for zero mode - cfm = ift.CorrelatedFieldMaker.make( - 0.0, (1, 0.00001), f"{prefix}freqxi", total_N=2 * (nfreq - 1) - ) - # FIXME Support fixed fluctuations - cfm.add_fluctuations(domain, (1, 0.00001), (1.2, 0.4), (0.2, 0.2), (-2, 0.5)) - freqxi = cfm.finalize(0) - - # FIXME Make sure that it is standard normal distributed in uncorrelated directions - # fld = freqxi(ift.from_random(freqxi.domain)).val - # mean = np.mean(fld, axis=(2, 3)) - # std = np.std(fld, axis=(2, 3)) - # print(np.mean(mean), np.std(mean)) - # print(np.mean(std), np.std(std)) - - intop = rve.WienerIntegrations(freqdom, domain) - freqxi = rve.Reshaper(freqxi.target, intop.domain) @ freqxi - expander = ift.ContractionOperator(intop.domain, (0, 1)).adjoint - vol = freqdom.dvol - - flex = ift.LognormalTransform(*flexibility, prefix + "flexibility", 0) - dom = intop.domain[0] - vflex = np.empty(dom.shape) - vflex[0] = vflex[1] = np.sqrt(vol) - vflex = ift.DiagonalOperator( - ift.makeField(dom, vflex), domain=expander.target, spaces=0 - ) - sig_flex = vflex @ expander @ flex - shift = np.empty(expander.target.shape) - shift[0] = (vol ** 2 / 12.0)[..., None, None] - shift[1] = 1 - shift = ift.makeField(expander.target, shift) - if asperity is None: - asp = ift.makeOp(shift.ptw("sqrt")) @ (freqxi * sig_flex) - else: - asp = ift.LognormalTransform(*asperity, prefix + "asperity", 0) - vasp = np.empty(dom.shape) - vasp[0] = 1 - vasp[1] = 0 - vasp = ift.DiagonalOperator( - ift.makeField(dom, vasp), domain=expander.target, spaces=0 - ) - sig_asp = vasp @ expander @ asp - asp = freqxi * sig_flex * (ift.Adder(shift) @ sig_asp).ptw("sqrt") - - # FIXME shift, vasp, vflex have far more pixels than needed - - logsky = rve.IntWProcessInitialConditions(a0, b0, intop @ asp) - - rve.my_asserteq(logsky.target[1], ift.DomainTuple.make(domain)[0]) - rve.my_asserteq(logsky.target[0].size, nfreq) - - plotter.add_multiple2d("logsky", logsky) - # FIXME Add all power spectra to plotter - plotter.add_spectra("spectra", logsky, [[0.0002, 0.00035], [0.0004, 0.0001]]) - return logsky - - -def main(): - parser = argparse.ArgumentParser() - parser.add_argument("-j", type=int, default=1) - parser.add_argument("--automatic-weighting", action="store_true") - args = parser.parse_args() - rve.set_nthreads(args.j) - rve.set_wgridding(False) - - # obs = rve.ms2observations('/data/CYG-D-6680-64CH-10S.ms', 'DATA', False, 0, 'stokesiavg')[0] - - # obs = rve.Observation.load('/data/g330field0.npz') - - print("Load") - obs = rve.Observation.load("/data/hydraC_every10th_channel.npz") - print("Done") - - print("Frequencies:") - print(obs.freq) - print("Shape visibilities:", obs.vis.shape) - plt.scatter(np.arange(len(obs.freq)), obs.freq * 1e-6) - plt.ylabel("Frequency [MHz]") - plt.xlabel("Channel") - plt.savefig("debug_channels.png") - plt.close() - - rve.set_epsilon(1 / 10 / obs.max_snr()) - - fov = np.array([3, 3]) * rve.ARCMIN2RAD - # Do not use powers of two here, otherwise super slow - npix = np.array([250, 250]) - - if False: - R = rve.response.MfResponse( - obs, rve.IRGSpace(obs.freq), ift.RGSpace(npix, fov / npix) - ) - j = R.adjoint(obs.vis * obs.weight) - rve.plotter._plot_mf( - rve.MinimizationState(j, []), - ift.ScalingOperator(j.domain, 1), - "out", - None, - 3, - ) - - dom = ift.RGSpace(npix, fov / npix) - plotter = rve.MfPlotter("png", "plots") - jump = 15 - logsky = mf_logsky(dom, obs.freq[jump // 2 :: jump], "sky", plotter) - - # Plot prior samples - if False: - for ii in range(3): - state = rve.MinimizationState(ift.from_random(logsky.domain), []) - plotter.plot(f"prior{ii}", state) - - sky = logsky.exp() - - if args.automatic_weighting: - # FIXME Figure out how to do automatic weighting for mf - npix = 2500 - effuv = np.linalg.norm(obs.effective_uvw(), axis=1) - dom = ift.RGSpace(npix, 2 * np.max(effuv) / npix) - - cfm = ift.CorrelatedFieldMaker.make(0, (2, 2), "invcov", total_N=obs.nfreq) - cfm.add_fluctuations(dom, (2, 2), (1.2, 0.4), (0.5, 0.2), (-2, 0.5)) - logweighting = cfm.finalize(0) - - interpolation = rve.MfWeightingInterpolation(effuv, logweighting.target) - weightop = ift.makeOp(obs.weight) @ (interpolation @ logweighting.exp()) ** (-2) - lh_wgt = rve.ImagingLikelihood(obs, sky, inverse_covariance_operator=weightop) - plotter.add_histogram( - "normalized residuals autowgts", lh_wgt.normalized_residual - ) - - # FIXME - # plotter.add('bayesian weighting', logweighting.exp()) - plotter.add_multiple1d("power spectrum bayesian weighting", cfm.power_spectrum) - lh = rve.ImagingLikelihood(obs, sky) - plotter.add_histogram("normalized residuals", lh.normalized_residual) - - minimizer = ift.NewtonCG( - ift.GradientNormController(name="newton", iteration_limit=5) - ) - - ham = ift.StandardHamiltonian(lh) - state = rve.MinimizationState(0.1 * ift.from_random(ham.domain), []) - - if args.automatic_weighting: - for ii in range(5): - state = rve.simple_minimize(ham, state.mean, 0, minimizer) - plotter.plot(f"pre_sky_{ii}", state) - - lh = lh_wgt - ham = ift.StandardHamiltonian(lh) - pos = ift.MultiField.union([0.1 * ift.from_random(ham.domain), state.mean]) - state = rve.MinimizationState(pos, []) - - for ii in range(5): - state = rve.simple_minimize( - ham, state.mean, 0, minimizer, constants=sky.domain.keys() - ) - plotter.plot(f"pre_wgt_{ii}", state) - - for ii in range(20): - state = rve.simple_minimize(ham, state.mean, 0, minimizer) - plotter.plot(f"iter{ii}", state) - - -if __name__ == "__main__": - main() diff --git a/misc/ms_summary.py b/misc/ms_summary.py new file mode 100644 index 0000000000000000000000000000000000000000..c36bed83ae6292a6772d8951b96d4c04922fb95f --- /dev/null +++ b/misc/ms_summary.py @@ -0,0 +1,18 @@ +# SPDX-License-Identifier: GPL-3.0-or-later +# Copyright(C) 2021 Max-Planck-Society +# Author: Philipp Arras + +import argparse + +import resolve as rve + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("ms") + args = parser.parse_args() + nspec = rve.ms_n_spectral_windows(args.ms) + from casacore.tables import tablesummary + tablesummary(args.ms) + print() + print(f"The data set has {nspec} spectral windows.") + print() diff --git a/resolve/__init__.py b/resolve/__init__.py index d69bc4b0d0fee51d513924dd75a0ccf391164df4..3766cad758583276c56f1f0a2837df61a4982e39 100644 --- a/resolve/__init__.py +++ b/resolve/__init__.py @@ -9,7 +9,7 @@ from .minimization import Minimization, MinimizationState, simple_minimize from .mosaicing import * from .mpi import onlymaster from .mpi_operators import * -from .ms_import import ms2observations, ms_n_spectral_windows +from .ms_import import ms2observations, ms_n_spectral_windows, ms_table from .multi_frequency.irg_space import IRGSpace from .multi_frequency.operators import ( IntWProcessInitialConditions, @@ -23,10 +23,4 @@ from .polarization import Polarization, polarization_matrix_exponential from .primary_beam import * from .response import MfResponse, ResponseDistributor, StokesIResponse, SingleResponse from .simple_operators import * -from .util import ( - Reshaper, - divide_where_possible, - my_assert, - my_assert_isinstance, - my_asserteq, -) +from .util import * diff --git a/resolve/antenna_positions.py b/resolve/antenna_positions.py index fca073da18e7196e81f299b08780873033885c21..5a27b81d0651649e5fde2691dabac8d247f65c19 100644 --- a/resolve/antenna_positions.py +++ b/resolve/antenna_positions.py @@ -24,7 +24,6 @@ class AntennaPositions: time of measurement """ - # FIXME Split this class into two. One for only imaging, one also for calibration def __init__(self, uvw, ant1=None, ant2=None, time=None): if ant1 is None: my_asserteq(ant2, time, None) diff --git a/resolve/calibration.py b/resolve/calibration.py index 004599305c073ecf73bc9cb220802707e89cba5b..e838378f45b137adf93401754172373f849c1db8 100644 --- a/resolve/calibration.py +++ b/resolve/calibration.py @@ -21,8 +21,6 @@ def calibration_distribution( ap = observation.antenna_positions cop1 = CalibrationDistributor(dom, tgt, ap.ant1, ap.time, antenna_dct, time_dct) cop2 = CalibrationDistributor(dom, tgt, ap.ant2, ap.time, antenna_dct, time_dct) - my_asserteq(cop1.domain, cop2.domain) - my_asserteq(cop1.target, cop2.target) res0 = (cop1 + cop2).real @ logamplitude_operator res1 = (1j * (cop1 - cop2).real) @ phase_operator return (res0 + res1).exp() diff --git a/resolve/constants.py b/resolve/constants.py index 524b7e70c5e380595ebbfc7f3fc853210eb8ae7a..61a8f70d05c2a1d147eeed62e8f0cef9f4866480 100644 --- a/resolve/constants.py +++ b/resolve/constants.py @@ -3,11 +3,12 @@ # Author: Philipp Arras import numpy as np +from scipy.constants import speed_of_light ARCMIN2RAD = np.pi / 60 / 180 AS2RAD = ARCMIN2RAD / 60 DEG2RAD = np.pi / 180 -SPEEDOFLIGHT = 299792458 +SPEEDOFLIGHT = speed_of_light def str2rad(s): diff --git a/resolve/likelihood.py b/resolve/likelihood.py index 49d1b4b14ec106f3ee1ecec1dd369a21e45c9d34..d07b41cb34c4fa62f0787f9c3ccf790bc6ebd6fa 100644 --- a/resolve/likelihood.py +++ b/resolve/likelihood.py @@ -40,12 +40,12 @@ def get_mask_multi_field(weight): return op -def _Likelihood(operator, normalized_residual_operator): - my_assert_isinstance(operator, ift.Operator) +def _Likelihood(operator, data, metric_at_pos, model_data): + my_assert_isinstance(operator, model_data, ift.Operator) my_asserteq(operator.target, ift.DomainTuple.scalar_domain()) - my_assert_isinstance(normalized_residual_operator, ift.Operator) - - operator.normalized_residual = normalized_residual_operator + operator.data = data + operator.metric_at_pos = metric_at_pos + operator.model_data = model_data return operator @@ -55,19 +55,7 @@ def _build_gauss_lh_nres(op, mean, invcov): my_asserteq(op.target, mean.domain, invcov.domain) lh = ift.GaussianEnergy(mean=mean, inverse_covariance=ift.makeOp(invcov)) @ op - nres = ift.makeOp(invcov.sqrt()) @ ift.Adder(mean, neg=True) @ op - return _Likelihood(lh, nres) - - -def _build_varcov_gauss_lh_nres(residual, inverse_covariance, dtype): - my_assert_isinstance(residual, inverse_covariance, ift.Operator) - my_asserteq(residual.target, inverse_covariance.target) - - s0, s1 = "residual", "inverse covariance" - op = residual.ducktape_left(s0) + inverse_covariance.ducktape_left(s1) - lh = ift.VariableCovarianceGaussianEnergy(residual.target, s0, s1, dtype) @ op - nres = residual * inverse_covariance.sqrt() - return _Likelihood(lh, nres) + return _Likelihood(lh, mean, lambda x: ift.makeOp(invcov), op) def _varcov(observation, Rs, inverse_covariance_operator): @@ -77,7 +65,12 @@ def _varcov(observation, Rs, inverse_covariance_operator): residual = ift.Adder(vis, neg=True) @ mask @ Rs inverse_covariance_operator = mask @ inverse_covariance_operator dtype = observation.vis.dtype - return _build_varcov_gauss_lh_nres(residual, inverse_covariance_operator, dtype) + s0, s1 = "residual", "inverse covariance" + op = residual.ducktape_left(s0) + inverse_covariance_operator.ducktape_left(s1) + lh = ift.VariableCovarianceGaussianEnergy(residual.target, s0, s1, dtype) @ op + return _Likelihood( + lh, vis, lambda x: ift.makeOp(inverse_covariance_operator(x)), mask @ Rs + ) def ImagingLikelihood( @@ -146,13 +139,15 @@ def ImagingLikelihood( model_data = R @ sky_operator if inverse_covariance_operator is None: if mosaicing: - vis = ift.MultiField.from_dict({kk: o.vis for kk, o in observation.items()}) - weight = ift.MultiField.from_dict( - {kk: o.weight for kk, o in observation.items()} - ) + vis, weight, mask = {}, {}, {} + for kk, oo in observation.items(): + mask[kk], vis[kk], weight[kk] = _get_mask(oo) + vis = ift.MultiField.from_dict(vis) + weight = ift.MultiField.from_dict(weight) + mask = ift.MultiField.from_dict(mask) else: - vis, weight = observation.vis, observation.weight - return _build_gauss_lh_nres(model_data, vis, weight) + mask, vis, weight = _get_mask(observation) + return _build_gauss_lh_nres(mask @ model_data, vis, weight) return _varcov(observation, model_data, inverse_covariance_operator) @@ -196,6 +191,7 @@ def CalibrationLikelihood( my_assert_isinstance(calibration_operator, ift.Operator) model_data = ift.makeOp(model_visibilities) @ calibration_operator if inverse_covariance_operator is None: - return _build_gauss_lh_nres(model_data, observation.vis, observation.weight) + mask, vis, wgt = _get_mask(observation) + return _build_gauss_lh_nres(mask @ model_data, vis, wgt) my_assert_isinstance(inverse_covariance_operator, ift.Operator) return _varcov(observation, model_data, inverse_covariance_operator) diff --git a/resolve/meerkat_beam.py b/resolve/meerkat_beam.py new file mode 100644 index 0000000000000000000000000000000000000000..fb5acf898b757f8c114dcf27953a26f68e12dd5c --- /dev/null +++ b/resolve/meerkat_beam.py @@ -0,0 +1,204 @@ +################################################################################ +# Copyright (c) 2020, National Research Foundation (SARAO) +# +# Licensed under the BSD 3-Clause License (the "License"); you may not use +# this file except in compliance with the License. You may obtain a copy +# of the License at +# +# https://opensource.org/licenses/BSD-3-Clause +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +################################################################################ + +import io + +import numpy as np + +KNOWN_MODELS = { + 'MKAT-AA-L-JIM-2020': + u'''freq, Hx squint, Hy squint, Vx squint, Vy squint, Hx fwhm, Hy fwhm, Vx fwhm, Vy fwhm + MHz, arcmin, arcmin, arcmin, arcmin, arcmin, arcmin, arcmin, arcmin + 900, 0.00, 0.88, -0.00, 0.72, 97.98, 100.37, 96.41, 101.89 + 950, -0.01, 0.50, -0.03, 0.41, 92.58, 94.70, 90.72, 96.26 + 1000, 0.05, 0.20, 0.02, 0.38, 87.89, 89.30, 85.59, 91.74 + 1050, -0.02, 0.31, 0.02, -0.12, 83.39, 84.55, 80.96, 86.98 + 1100, -0.03, -0.03, 0.01, -0.23, 79.08, 80.06, 76.76, 82.25 + 1150, -0.03, 0.09, -0.02, -0.29, 74.79, 76.14, 72.92, 78.09 + 1200, -0.05, 0.00, -0.00, -0.36, 70.81, 72.78, 69.70, 73.86 + 1250, -0.05, -0.03, 0.02, -0.35, 67.30, 69.69, 66.79, 70.31 + 1300, 0.06, 0.02, 0.01, -0.58, 64.20, 67.18, 64.30, 67.11 + 1350, 0.18, -0.07, 0.03, -0.42, 61.67, 64.80, 62.15, 64.32 + 1400, -0.43, -0.07, 0.03, -0.07, 59.58, 62.70, 60.11, 62.26 + 1450, -1.27, -0.12, -0.00, 1.07, 57.92, 60.78, 58.31, 60.45 + 1500, -0.97, -0.23, -0.02, 1.14, 56.91, 58.82, 56.61, 59.31 + 1550, -0.40, -0.21, -0.02, 0.74, 56.08, 57.00, 54.83, 58.31 + 1600, -0.04, -0.29, -0.04, 0.49, 55.35, 55.24, 53.18, 57.44 + 1650, 0.22, -0.18, -0.04, 1.07, 55.22, 53.52, 51.58, 56.90''', + + 'MKAT-AA-UHF-JIM-2020': + u'''freq, Hx squint, Hy squint, Vx squint, Vy squint, Hx fwhm, Hy fwhm, Vx fwhm, Vy fwhm + MHz, arcmin, arcmin, arcmin, arcmin, arcmin, arcmin, arcmin, arcmin + 550, -0.15, 2.46, -0.08, 0.40, 159.05, 165.92, 157.72, 165.00 + 600, -0.00, 0.93, -0.06, 1.22, 147.75, 153.60, 146.55, 155.25 + 650, -0.02, 1.18, 0.00, 0.43, 135.71, 139.61, 133.70, 141.99 + 700, 0.06, 0.14, -0.01, 0.03, 124.92, 128.66, 122.75, 130.42 + 750, 0.07, -0.13, -0.02, -0.16, 115.48, 118.02, 113.03, 121.01 + 800, 0.08, -0.01, 0.02, -0.81, 106.78, 110.47, 105.56, 111.64 + 850, -0.15, -0.61, 0.01, -0.58, 99.25, 103.60, 99.38, 103.52 + 900, -1.12, -0.61, -0.01, -0.10, 93.46, 97.88, 93.96, 97.68 + 950, -1.50, -0.80, -0.09, 0.15, 89.67, 93.10, 89.45, 93.52 + 1000, -0.58, -0.83, -0.14, -0.47, 87.38, 88.87, 85.55, 90.83 + 1050, 0.32, -0.43, -0.08, -0.72, 86.10, 85.16, 82.32, 88.15''' +} + + +def _cosine_taper(r): + # r is normalised such that the half power point occurs at r=0.5: + # _cosine_taper(0) = 1.0 + # _cosine_taper(0.5) = sqrt(0.5) + rr = r * 1.18896478 + return np.cos(np.pi * rr) / (1. - 4. * rr**2) + + +def _pattern(x, y, squint_x, squint_y, fwhm_x, fwhm_y): + return _cosine_taper(np.sqrt(((x - squint_x) / fwhm_x)**2 + ((y - squint_y) / fwhm_y)**2)) + +# -------------------------------------------------------------------------------------------------- +# --- CLASS : JimBeam +# -------------------------------------------------------------------------------------------------- + + +class JimBeam(object): + """MeerKAT simplified primary beam models for L and UHF bands. + A cosine aperture taper (Essential Radio Astronomy, Condon & Ransom, 2016, + page 83, link_) is used as a simplified model of the co-polarisation primary beams. + While the sidelobe level accuracy may be coincidental, the model attains a good fit + to measurements for the mainlobe region. The model is parameterised by measured + frequency dependent pointing, and frequency dependent full width half maximum + beam widths (FWHM). The MeerKAT beams are measured using holography techniques, + and an averaged result at 60 degrees elevation is used here to determine the + frequency dependent parameter values. The pointing errors are determined in + the aperture plane using standard phase fitting techniques, while the FWHM + values are measured in the beam plane along axis-aligned cuts through the beam + centers. + Notes + ----- + a) This model is a simplification. + b) The actual beam varies per antenna, and depends on environmental factors. + c) Since per-antenna pointing errors during an observation often exceed 1 arc + minute, the nett 'imaging primary beam' will be slightly broader, and could + be approximated by averaging several individual antenna beams with + respective antenna pointing errors inserted. + d) Depending on the usecase it may be necessary to do reference pointing (or + use another technique) to remove the antenna pointing errors during the + observation in order to use a beam model successfully. + Parameters + ---------- + name : str + Name of model, must be either 'MKAT-AA-L-JIM-2020' or 'MKAT-AA-UHF-JIM-2020' + Raises + ------ + ValueError + If `name` is an unknown model + Request + ------- + As a user, please email the author (mattieu@ska.ac.za) with details about + your usecase requirements. This may influence future releases. A general + description, what extent of the beams are needed, pixelation, frequency + resolution, and accuracy requirements are of interest. + Example usage + ------------- + .. code:: python + import matplotlib.pylab as plt + from katbeam import JimBeam + def showbeam(beam,freqMHz=1000,pol='H',beamextent=10.): + margin=np.linspace(-beamextent/2.,beamextent/2.,128) + x,y=np.meshgrid(margin,margin) + if pol=='H': + beampixels=beam.HH(x,y,freqMHz) + elif pol=='V': + beampixels=beam.VV(x,y,freqMHz) + else: + beampixels=beam.I(x,y,freqMHz) + pol='I' + plt.clf() + plt.imshow(beampixels,extent=[-beamextent/2,beamextent/2,-beamextent/2,beamextent/2]) + plt.title('%s pol beam\nfor %s at %dMHz'%(pol,beam.name,freqMHz)) + plt.xlabel('deg') + plt.ylabel('deg') + uhfbeam=JimBeam('MKAT-AA-UHF-JIM-2020') + showbeam(uhfbeam,800,'H',10) + .. _link: https://books.google.co.za/books?id=Jg6hCwAAQBAJ + """ + + def __init__(self, name='MKAT-AA-L-JIM-2020'): + self.name = name + try: + csv_file = io.StringIO(KNOWN_MODELS[name]) + except KeyError: + raise ValueError('Unknown model {!r}, available ones are {!r}' + .format(name, list(KNOWN_MODELS.keys()))) + else: + table = np.loadtxt(csv_file, skiprows=2, delimiter=',') + self.freqMHzlist = table[:, 0] + # Shape (4, nfreq), where 4 refers to Hx,Hy,Vx,Vy components (and arcmin to degrees) + self.squintlist = table[:, 1:5].T / 60. + self.fwhmlist = table[:, 5:9].T / 60. + + def _interp_squint_fwhm(self, freqMHz): + squint = [np.interp(freqMHz, self.freqMHzlist, lst) for lst in self.squintlist] + fwhm = [np.interp(freqMHz, self.freqMHzlist, lst) for lst in self.fwhmlist] + return squint, fwhm + + def HH(self, x, y, freqMHz): + """Calculate the H co-polarised beam at the provided coordinates. + Parameters + ---------- + x, y : arrays of float of the same shape + Coordinates where beam is sampled, in degrees + freqMHz : float + Frequency, in MHz + Returns + ------- + HH : array of float, same shape as `x` and `y` + The H co-polarised beam + """ + squint, fwhm = self._interp_squint_fwhm(freqMHz) + return _pattern(x, y, squint[0], squint[1], fwhm[0], fwhm[1]) + + def VV(self, x, y, freqMHz): + """Calculate the V co-polarised beam at the provided coordinates. + Parameters + ---------- + x, y : arrays of float of the same shape + Coordinates where beam is sampled, in degrees + freqMHz : float + Frequency, in MHz + Returns + ------- + VV : array of float, same shape as `x` and `y` + The V co-polarised beam + """ + squint, fwhm = self._interp_squint_fwhm(freqMHz) + return _pattern(x, y, squint[2], squint[3], fwhm[2], fwhm[3]) + + def I(self, x, y, freqMHz): # noqa: E741, E743 + """Calculate the Stokes I beam at the provided coordinates. + Parameters + ---------- + x, y : arrays of float of the same shape + Coordinates where beam is sampled, in degrees + freqMHz : float + Frequency, in MHz + Returns + ------- + I : array of float, same shape as `x` and `y` + The Stokes I beam (non-negative) + """ + H = self.HH(x, y, freqMHz) + V = self.VV(x, y, freqMHz) + return 0.5 * (np.abs(H)**2 + np.abs(V)**2) diff --git a/resolve/minimization.py b/resolve/minimization.py index a0e369598c844c28da8faae55f463ffc9b70d094..0c6b999c37e8b9139ba0964fbfac758c86f0242c 100644 --- a/resolve/minimization.py +++ b/resolve/minimization.py @@ -53,7 +53,7 @@ class Minimization: class MinimizationState: - def __init__(self, position, samples, mirror_samples=False): + def __init__(self, position, samples=[], mirror_samples=False): self._samples = list(samples) self._position = position if len(samples) > 0: @@ -71,6 +71,14 @@ class MinimizationState: return self._position.unite(-self._samples[key]) return self._position.unite(self._samples[key]) + def all_samples(self): + if len(self._samples) == 0: + return None + lst = [] + if self._mirror: + lst = [-ss for ss in self._samples] + return lst + self._samples + def __len__(self): return (2 if self._mirror else 1) * len(self._samples) diff --git a/resolve/mpi_operators.py b/resolve/mpi_operators.py index 5a0bcb1da766895398ec1ad0c970d3a34e0db192..af7b8dd6c00e756533789ca40e45d0a11afd201e 100644 --- a/resolve/mpi_operators.py +++ b/resolve/mpi_operators.py @@ -105,7 +105,9 @@ class SliceSumLinear(ift.LinearOperator): self._comm, ) else: - arr = _allgather([op.adjoint(x).val for op in self._oplist], self._comm) + arr = array_allgather( + [op.adjoint(x).val for op in self._oplist], self._comm + ) return ift.makeField(self.domain, arr) @@ -126,7 +128,7 @@ class SliceLinear(ift.EndomorphicOperator): def apply(self, x, mode): self._check_input(x, mode) - res = _allgather( + res = array_allgather( [ op.apply(ift.makeField(op.domain, x.val[self._lo + ii]), mode).val for ii, op in enumerate(self._oplist) @@ -141,7 +143,7 @@ class SliceLinear(ift.EndomorphicOperator): for ii, op in enumerate(self._oplist): with ift.random.Context(sseq[self._lo + ii]): local_samples.append(op.draw_sample(from_inverse).val) - res = _allgather(local_samples, self._comm) + res = array_allgather(local_samples, self._comm) return ift.makeField(self._domain, res) @@ -187,7 +189,7 @@ def _get_global_unique(lst, f, comm): return cap -def _allgather(arrs, comm): +def array_allgather(arrs, comm): if comm is None: fulllst = [arrs] else: diff --git a/resolve/ms_import.py b/resolve/ms_import.py index d528f6998fa3037fd032ddbf40add460273970c6..c3b7c5e6a7aace8de26428f778c3c48a77db2751 100644 --- a/resolve/ms_import.py +++ b/resolve/ms_import.py @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-3.0-or-later -# Copyright(C) 2019-2020 Max-Planck-Society +# Copyright(C) 2019-2021 Max-Planck-Society from os.path import isdir, join, splitext @@ -11,7 +11,10 @@ from .observation import Observation from .polarization import Polarization from .util import my_assert, my_asserteq, my_assert_isinstance -_CASACORE_TABLE_CFG = {"readonly": True, "ack": False} + +def ms_table(path): + from casacore.tables import table + return table(path, readonly=True, ack=False) def ms2observations( @@ -62,8 +65,6 @@ def ms2observations( because it is not guaranteed by the measurement set data structure that all baselines are present in all spectral windows. """ - from casacore.tables import table - if ms[-1] == "/": ms = ms[:-1] if not isdir(ms) or splitext(ms)[1].lower() != ".ms": @@ -82,11 +83,11 @@ def ms2observations( my_assert_isinstance(spectral_window, int) my_assert(spectral_window >= 0) my_assert(spectral_window < ms_n_spectral_windows(ms)) - with table(join(ms, "SPECTRAL_WINDOW"), **_CASACORE_TABLE_CFG) as t: + with ms_table(join(ms, "SPECTRAL_WINDOW")) as t: freq = t.getcol("CHAN_FREQ")[spectral_window] # Polarization - with table(join(ms, "POLARIZATION"), **_CASACORE_TABLE_CFG) as t: + with ms_table(join(ms, "POLARIZATION")) as t: pol = t.getcol("CORR_TYPE") my_asserteq(pol.ndim, 2) my_asserteq(pol.shape[0], 1) @@ -107,7 +108,7 @@ def ms2observations( pol_summation = False # Field - with table(join(ms, "FIELD"), **_CASACORE_TABLE_CFG) as t: + with ms_table(join(ms, "FIELD")) as t: equinox = t.coldesc("REFERENCE_DIR")["desc"]["keywords"]["MEASINFO"]["Ref"] equinox = str(equinox)[1:] # FIXME Put proper support for equinox here @@ -170,8 +171,6 @@ def read_ms_i( with_calib_info, channel_slice, ): - from casacore.tables import table - freq = np.array(freq) my_asserteq(freq.ndim, 1) my_assert(len(freq) > 0) @@ -182,7 +181,7 @@ def read_ms_i( if pol_summation: my_asserteq(len(pol_indices), 2) - with table(name, **_CASACORE_TABLE_CFG) as t: + with ms_table(name) as t: # FIXME Get rid of fullwgt fullwgt, weightcol = _determine_weighting(t) nrow = t.nrows() @@ -193,7 +192,6 @@ def read_ms_i( active_channels = np.zeros(nchan, dtype=np.bool) step = max(1, nrow // 100) # how many rows to read in every step - # Check if data column is available t.getcol(data_column, startrow=0, nrow=10) @@ -324,8 +322,6 @@ def read_ms_i( def ms_n_spectral_windows(ms): - from casacore.tables import table - - with table(join(ms, "SPECTRAL_WINDOW"), **_CASACORE_TABLE_CFG) as t: + with ms_table(join(ms, "SPECTRAL_WINDOW")) as t: freq = t.getcol("CHAN_FREQ") return freq.shape[0] diff --git a/resolve/observation.py b/resolve/observation.py index 3d2a0a28c098d1fed1493cec155a2d40900cc316..efcc39878725b2b2f44fe07c4ebbe638d81f42b2 100644 --- a/resolve/observation.py +++ b/resolve/observation.py @@ -185,6 +185,30 @@ class Observation(_Observation): self._polarization = polarization self._freq = freq self._direction = direction + self._ndeff = None + + def apply_flags(self, arr): + return arr[self._weight != 0.0] + + @property + def flags(self): + return self._weight == 0.0 + + @property + def mask(self): + return self._weight > 0.0 + + def max_snr(self): + return np.max(np.abs(self.apply_flags(self._vis * np.sqrt(self._weight)))) + + def fraction_useful(self): + return self.n_data_effective / self._weight.size + + @property + def n_data_effective(self): + if self._ndeff is None: + self._ndeff = self.apply_flags(self._weight).size + return self._ndeff @onlymaster def save(self, file_name, compress): @@ -214,6 +238,7 @@ class Observation(_Observation): pol = Polarization.from_list(dct["polarization"]) direction = Direction.from_list(dct["direction"]) slc = slice(None) if lo_hi_index is None else slice(*lo_hi_index) + # FIXME Put barrier here that makes sure that only one full Observation is loaded at a time return Observation( AntennaPositions.from_list(antpos), dct["vis"][..., slc], @@ -223,6 +248,34 @@ class Observation(_Observation): direction, ) + @staticmethod + def load_mf(file_name, n_imaging_bands, comm=None): + if comm is not None: + my_assert(n_imaging_bands >= comm.Get_size()) + + # Compute frequency ranges in data space + global_freqs = np.load(file_name).get("freq") + assert np.all(np.diff(global_freqs) > 0) + my_assert(n_imaging_bands <= global_freqs.size) + + if comm is None: + local_imaging_bands = range(n_imaging_bands) + else: + local_imaging_bands = range( + *ift.utilities.shareRange( + n_imaging_bands, comm.Get_size(), comm.Get_rank() + ) + ) + full_obs = Observation.load(file_name) + obs_list = [ + full_obs.get_freqs_by_slice( + slice(*ift.utilities.shareRange(len(global_freqs), n_imaging_bands, ii)) + ) + for ii in local_imaging_bands + ] + nu0 = global_freqs.mean() + return obs_list, nu0 + def __eq__(self, other): if not isinstance(other, Observation): return False @@ -247,6 +300,40 @@ class Observation(_Observation): self._direction, ) + def get_freqs(self, frequency_list): + """Return observation that contains a subset of the present frequencies + + Parameters + ---------- + frequency_list : list + List of indices that shall be returned + """ + mask = np.zeros(self.nfreq, dtype=bool) + mask[frequency_list] = 1 + return self.get_freqs_by_slice(mask) + + def get_freqs_by_slice(self, slc): + return Observation( + self._antpos, + self._vis[..., slc], + self._weight[..., slc], + self._polarization, + self._freq[slc], + self._direction, + ) + + def average_stokesi(self): + my_asserteq(self._vis.shape[0], 2) + my_asserteq(self._polarization.restrict_to_stokes_i(), self._polarization) + vis = np.sum(self._weight * self._vis, axis=0)[None] + wgt = np.sum(self._weight, axis=0)[None] + invmask = wgt == 0.0 + vis /= wgt + np.ones_like(wgt) * invmask + vis[invmask] = 0.0 + return Observation( + self._antpos, vis, wgt, Polarization.trivial(), self._freq, self._direction + ) + def move_time(self, t0): antpos = self._antpos.move_time(t0) return Observation( diff --git a/resolve/primary_beam.py b/resolve/primary_beam.py index e13e7738351aeac465b846d559259432762e4d55..47080ab5be58e5b93883246f8710242e527946b9 100644 --- a/resolve/primary_beam.py +++ b/resolve/primary_beam.py @@ -1,5 +1,5 @@ # SPDX-License-Identifier: GPL-3.0-or-later -# Copyright(C) 2019-2020 Max-Planck-Society +# Copyright(C) 2019-2021 Max-Planck-Society # Author: Philipp Arras import numpy as np @@ -9,6 +9,54 @@ import nifty7 as ift from .constants import ARCMIN2RAD, SPEEDOFLIGHT from .util import my_assert +from .meerkat_beam import JimBeam + + +def _get_meshgrid(domain): + nx, ny = domain.shape + xf = domain.distances[0] * nx / 2 + yf = domain.distances[1] * ny / 2 + xs = np.linspace(-xf, xf, nx) + ys = np.linspace(-yf, yf, ny) + return np.meshgrid(xs, ys, indexing="ij") + + +def meerkat_beam(domain, freq, mode): + """Return approximate version of MeerKAT beam + + Parameters + ---------- + domain : RGSpace + Domain on which the beam shall be defined. + frequency : float + Observing frequency in MHz. + mode : str + Either "L" (900 to 1650 MHz) or "UHF" (550 to 1050 MHz) + """ + beam = JimBeam(f"MKAT-AA-{mode}-JIM-2020") + xx, yy = _get_meshgrid(domain) + res = beam.I(xx, yy, freq) + assert res.shape == domain.shape + return res + + +def mf_meerkat_beam(frequency_domain, spatial_domain, mode): + """Return approximate version of MeerKAT beam + + Parameters + ---------- + frequency_domain : IRGSpace + + spatial_domain : RGSpace + + mode : str + Either "L" (900 to 1650 MHz) or "UHF" (550 to 1050 MHz) + """ + freqs = frequency_domain.coordinates + res = np.empty((len(freqs),) + spatial_domain.shape) + for ii, freq in enumerate(freqs): + res[ii] = meerkat_beam(spatial_domain, freq, mode) + return res def vla_beam(domain, freq): @@ -158,11 +206,7 @@ def vla_beam(domain, freq): cupper = poly[ind] rweight = (freq - flower) / (fupper - flower) - xf, xp = dom.distances[0] * dom.shape[0] / 2, dom.shape[0] - yf, yp = dom.distances[1] * dom.shape[1] / 2, dom.shape[1] - xx, yy = np.meshgrid( - np.linspace(-xf, xf, xp), np.linspace(-yf, yf, yp), indexing="ij" - ) + xx, yy = _get_meshgrid(dom) r = np.sqrt(xx ** 2 + yy ** 2) / 1000 / ARCMIN2RAD # Mhz->GHz, RAD->ARCMIN def _vla_eval_poly(coeffs, xs): diff --git a/resolve/response.py b/resolve/response.py index 43fade8b86d0cd009aaaac3b7769482d93619f2b..b683ea337d6e7872e478b771e209d361ef2d6d2a 100644 --- a/resolve/response.py +++ b/resolve/response.py @@ -1,9 +1,10 @@ # SPDX-License-Identifier: GPL-3.0-or-later -# Copyright(C) 2019-2020 Max-Planck-Society +# Copyright(C) 2019-2021 Max-Planck-Society # Author: Philipp Arras from functools import reduce from operator import add +from itertools import product import numpy as np from ducc0.wgridder.experimental import dirty2vis, vis2dirty @@ -112,10 +113,12 @@ class MfResponse(ift.LinearOperator): Contains the the :class:`nifty7.RGSpace` for the positions. """ - def __init__(self, observation, frequency_domain, position_domain, verbose=True): + def __init__( + self, observation, frequency_domain, position_domain, verbose=False, + ): my_assert_isinstance(observation, Observation) # FIXME Add polarization support - my_asserteq(observation.npol, 1) + my_assert(observation.npol in [1, 2]) my_assert_isinstance(frequency_domain, IRGSpace) my_assert_isinstance(position_domain, ift.RGSpace) @@ -127,7 +130,7 @@ class MfResponse(ift.LinearOperator): data_freq = observation.freq my_assert(np.all(np.diff(data_freq) > 0)) sky_freq = np.array(frequency_domain.coordinates) - band_indices = [np.argmin(np.abs(ff - sky_freq)) for ff in data_freq] + band_indices = self.band_indices(sky_freq, data_freq) # Make sure that no index is wasted my_asserteq(len(set(band_indices)), frequency_domain.size) self._r = [] @@ -135,14 +138,20 @@ class MfResponse(ift.LinearOperator): mask = observation.mask for band_index in np.unique(band_indices): sel = band_indices == band_index - assert mask.shape[0] == 1 + if mask.shape[0] == 1: + mymask = mask[0, :, sel] + elif mask.shape[0] == 2: + # FIXME In stokesi mode: mask everything possible in gridder, the rest afterwards. + mymask = np.any(mask[0, :, sel], axis=0) + else: + raise NotImplementedError r = SingleResponse( position_domain, observation.uvw, observation.freq[sel], - mask[0, :, sel].T, + mymask.T, sp, - verbose + verbose, ) self._r.append((band_index, sel, r)) # Double check that all channels are written to @@ -160,14 +169,14 @@ class MfResponse(ift.LinearOperator): foo = rr(ift.makeField(rr.domain, x[band_index])) if res is None: res = np.empty(self._tgt(mode).shape, foo.dtype) - res[0][..., sel] = foo.val + res[..., sel] = foo.val else: empty = np.zeros(self._domain.shape[0], bool) res = np.empty(self._tgt(mode).shape) for band_index, sel, rr in self._r: assert x.shape[0] == 1 # FIXME Is ascontiguousarray really a good idea here? - inp = np.ascontiguousarray(x[0][..., sel]) + inp = np.ascontiguousarray(np.sum(x[..., sel], axis=0)) res[band_index] = rr.adjoint(ift.makeField(rr.target, inp)).val empty[band_index] = False for band_index in np.where(empty)[0]: @@ -178,6 +187,10 @@ class MfResponse(ift.LinearOperator): my_assert(not np.any(empty)) return ift.makeField(self._tgt(mode), res) + @staticmethod + def band_indices(sky_freq, data_freq): + return [np.argmin(np.abs(ff - sky_freq)) for ff in data_freq] + class ResponseDistributor(ift.LinearOperator): def __init__(self, *ops): @@ -216,7 +229,13 @@ class FullResponse(ift.LinearOperator): class SingleResponse(ift.LinearOperator): - def __init__(self, domain, uvw, freq, mask, single_precision, verbose=True): + def __init__( + self, domain, uvw, freq, mask, single_precision, verbose=False, facets=(1, 1) + ): + my_assert_isinstance(facets, tuple) + for ii in range(1): + if domain.shape[0] % facets[0] != 0: + raise ValueError("nfacets needs to be divisor of npix.") # FIXME Currently only the response uses single_precision if possible. # Could be rolled out to the whole likelihood self._domain = ift.DomainTuple.make(domain) @@ -237,43 +256,27 @@ class SingleResponse(ift.LinearOperator): } self._vol = self._domain[0].scalar_dvol self._target_dtype = np.complex64 if single_precision else np.complex128 - self._domain_dtype = np.float32 if single_precision else np.float64 + self._domain_dtype = np.float32 if single_precision else np.float64 self._verbt, self._verbadj = verbose, verbose self._ofac = None + self._facets = facets def apply(self, x, mode): self._check_input(x, mode) # FIXME mtr Is the sky in single precision mode single or double? + # FIXME Make sure that vdot in Gaussian Energy is always in double # my_asserteq(x.dtype, self._domain_dtype if mode == self.TIMES else self._target_dtype) x = x.val.astype( self._domain_dtype if mode == self.TIMES else self._target_dtype ) + std = self._facets == (1, 1) if mode == self.TIMES: - args1 = {"dirty": x} - if self._verbt: - args1["verbosity"] = True - print( - f"\nINFO: Oversampling factors in response: {self.oversampling_factors()}\n" - ) - self._verbt = False - f = dirty2vis - # FIXME Use vis_out keyword of wgridder + res = self._times(x) if std else self._facet_times(x) + self._verbt = False else: - # FIXME assert correct strides for visibilities - my_assert(x.flags["C_CONTIGUOUS"]) - args1 = { - "vis": x, - "npix_x": self._domain[0].shape[0], - "npix_y": self._domain.shape[1], - } - if self._verbadj: - args1["verbosity"] = True - print( - f"\nINFO: Oversampling factors in response: {self.oversampling_factors()}\n" - ) - self._verbadj = False - f = vis2dirty - res = ift.makeField(self._tgt(mode), f(**self._args, **args1) * self._vol) + res = self._adjoint(x) if std else self._facet_adjoint(x) + self._verbadj = False + res = ift.makeField(self._tgt(mode), res * self._vol) my_asserteq( res.dtype, self._target_dtype if mode == self.TIMES else self._domain_dtype ) @@ -291,3 +294,64 @@ class SingleResponse(ift.LinearOperator): hvol = np.array(hspace.shape) * np.array(hspace.distances) / 2 self._ofac = hvol / maxuv return self._ofac + + def _times(self, x): + if self._verbt: + print(f"\nINFO: Oversampling factors in response: {self.oversampling_factors()}\n") + # FIXME Use vis_out keyword of wgridder + res = dirty2vis(dirty=x, verbosity=self._verbt, **self._args) + self._verbt = False + return res + + def _adjoint(self, x): + my_assert(x.flags["C_CONTIGUOUS"]) + nx, ny = self._domain.shape + if self._verbadj: + print(f"\nINFO: Oversampling factors in response: {self.oversampling_factors()}\n") + res = vis2dirty(vis=x, npix_x=nx, npix_y=ny, verbosity=self._verbadj, **self._args) + self._verbadj = False + return res + + def _facet_times(self, x): + nfacets_x, nfacets_y = self._facets + npix_x, npix_y = self._domain.shape + nx = npix_x // nfacets_x + ny = npix_y // nfacets_y + vis = None + for xx, yy in product(range(nfacets_x), range(nfacets_y)): + cx = ((0.5 + xx) / nfacets_x - 0.5) * self._args["pixsize_x"] * npix_x + cy = ((0.5 + yy) / nfacets_y - 0.5) * self._args["pixsize_y"] * npix_y + facet = x[nx * xx : nx * (xx + 1), ny * yy : ny * (yy + 1)] + foo = dirty2vis( + dirty=facet, + center_x=cx, + center_y=cy, + verbosity=self._verbt, + **self._args + ) + if vis is None: + vis = foo + else: + vis += foo + return vis + + def _facet_adjoint(self, x): + nfacets_x, nfacets_y = self._facets + npix_x, npix_y = self._domain.shape + nx = npix_x // nfacets_x + ny = npix_y // nfacets_y + res = np.zeros((npix_x, npix_y), self._domain_dtype) + for xx, yy in product(range(nfacets_x), range(nfacets_y)): + cx = ((0.5 + xx) / nfacets_x - 0.5) * self._args["pixsize_x"] * npix_x + cy = ((0.5 + yy) / nfacets_y - 0.5) * self._args["pixsize_y"] * npix_y + im = vis2dirty( + vis=x, + npix_x=nx, + npix_y=ny, + center_x=cx, + center_y=cy, + verbosity=self._verbadj, + **self._args + ) + res[nx * xx : nx * (xx + 1), ny * yy : ny * (yy + 1)] = im + return res diff --git a/resolve/util.py b/resolve/util.py index 29ed79ddaa3f26d75e00260358f32a2f4a4a6db2..ab9aea5cff0c7f80b2c70dd8f5d3d4bdc1943f29 100644 --- a/resolve/util.py +++ b/resolve/util.py @@ -2,6 +2,7 @@ # Copyright(C) 2020 Max-Planck-Society # Author: Philipp Arras +import matplotlib.pyplot as plt import numpy as np import nifty7 as ift @@ -66,3 +67,9 @@ def divide_where_possible(a, b): raise TypeError arr = np.divide(a, b, out=np.ones(dom.shape), where=b != 0) return ift.makeField(dom, arr) + + +def imshow(arr, ax=None, **kwargs): + if ax is None: + ax = plt.gca() + return ax.imshow(arr.T, origin="lower", **kwargs) diff --git a/test/test_general.py b/test/test_general.py index 9e1045f69f0902842ea54f6bdcdc2d029e92c19c..e4913e5f1fdd7ef8f2a7f10e486e5ac182a6638a 100644 --- a/test/test_general.py +++ b/test/test_general.py @@ -39,6 +39,7 @@ points = ift.InverseGammaOperator( sky = rve.vla_beam(dom, np.mean(OBS[0].freq)) @ (sky0 + inserter @ points) freqdomain = rve.IRGSpace(np.linspace(1000, 1050, num=10)) +FACETS = [(1, 1), (2, 2), (2, 1), (1, 4)] @pmp( @@ -266,12 +267,29 @@ def test_response_distributor(): @pmp("obs", OBS) -def test_single_response(obs): +@pmp("facets", FACETS) +def test_single_response(obs, facets): mask = obs.mask - op = rve.response.SingleResponse(dom, obs.uvw, obs.freq, mask[0], False) + op = rve.response.SingleResponse( + dom, obs.uvw, obs.freq, mask[0], False, facets=facets + ) ift.extra.check_linear_operator(op, np.float64, np.complex128, only_r_linear=True) +def test_facet_consistency(): + obs = OBS[0] + res0 = None + pos = ift.from_random(dom) + for facets in FACETS: + op = rve.response.SingleResponse( + dom, obs.uvw, obs.freq, obs.mask[0], False, facets=facets + ) + res = op(pos) + if res0 is None: + res0 = res + ift.extra.assert_allclose(res0, res, atol=1e-4, rtol=1e-4) + + @rve.onlymaster def fvalid(): return 1.0