...
 
Commits (344)
......@@ -2,6 +2,7 @@
git_version.py
# custom
*.txt
setup.cfg
.idea
.DS_Store
......
......@@ -10,10 +10,11 @@ RUN apt-get update && apt-get install -y \
# Testing dependencies
python3-pytest-cov jupyter \
# Optional NIFTy dependencies
libfftw3-dev python3-mpi4py python3-matplotlib \
python3-mpi4py python3-matplotlib \
# more optional NIFTy dependencies
&& pip3 install pyfftw \
&& pip3 install git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git \
&& pip3 install git+https://gitlab.mpcdf.mpg.de/ift/nifty_gridder.git \
&& pip3 install git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft.git \
&& pip3 install jupyter \
&& rm -rf /var/lib/apt/lists/*
......
......@@ -47,11 +47,13 @@ Installation
- [Python 3](https://www.python.org/) (3.5.x or later)
- [SciPy](https://www.scipy.org/)
- [pypocketfft](https://gitlab.mpcdf.mpg.de/mtr/pypocketfft)
Optional dependencies:
- [pyFFTW](https://pypi.python.org/pypi/pyFFTW) for faster Fourier transforms
- [pyHealpix](https://gitlab.mpcdf.mpg.de/ift/pyHealpix) (for harmonic
transforms involving domains on the sphere)
- [nifty_gridder](https://gitlab.mpcdf.mpg.de/ift/nifty_gridder) (for radio
interferometry responses)
- [mpi4py](https://mpi4py.scipy.org) (for MPI-parallel execution)
- [matplotlib](https://matplotlib.org/) (for field plotting)
......@@ -60,7 +62,7 @@ Optional dependencies:
The current version of Nifty5 can be obtained by cloning the repository and
switching to the NIFTy_5 branch:
git clone https://gitlab.mpcdf.mpg.de/ift/NIFTy.git
git clone https://gitlab.mpcdf.mpg.de/ift/nifty.git
### Installation
......@@ -70,33 +72,21 @@ distributions, the "apt" lines will need slight changes.
NIFTy5 and its mandatory dependencies can be installed via:
sudo apt-get install git python3 python3-pip python3-dev
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/NIFTy.git@NIFTy_5
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty.git@NIFTy_5
pip3 install --user git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft
Plotting support is added via:
sudo apt-get install python3-matplotlib
NIFTy uses Numpy's FFT implementation by default. For large problems FFTW may be
used because of its higher performance. It can be installed via:
sudo apt-get install libfftw3-dev
pip3 install --user pyfftw
To enable FFTW usage in NIFTy, call
nifty5.fft.enable_fftw()
at the beginning of your code.
(Note: If you encounter problems related to `pyFFTW`, make sure that you are
using a pip-installed `pyFFTW` package. Unfortunately, some distributions are
shipping an incorrectly configured `pyFFTW` package, which does not cooperate
with the installed `FFTW3` libraries.)
Support for spherical harmonic transforms is added via:
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
Support for the radio interferometry gridder is added via:
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty_gridder.git
MPI support is added via:
sudo apt-get install python3-mpi4py
......
from time import time
import matplotlib.pyplot as plt
import numpy as np
import nifty5 as ift
np.random.seed(40)
N0s, a0s, b0s, c0s = [], [], [], []
for ii in range(10, 26):
nu = 1024
nv = 1024
N = int(2**ii)
print('N = {}'.format(N))
uv = np.random.rand(N, 2) - 0.5
vis = np.random.randn(N) + 1j*np.random.randn(N)
uvspace = ift.RGSpace((nu, nv))
visspace = ift.UnstructuredDomain(N)
img = np.random.randn(nu*nv)
img = img.reshape((nu, nv))
img = ift.from_global_data(uvspace, img)
t0 = time()
GM = ift.GridderMaker(uvspace, eps=1e-7, uv=uv)
vis = ift.from_global_data(visspace, vis)
op = GM.getFull().adjoint
t1 = time()
op(img).to_global_data()
t2 = time()
op.adjoint(vis).to_global_data()
t3 = time()
print(t2-t1, t3-t2)
N0s.append(N)
a0s.append(t1 - t0)
b0s.append(t2 - t1)
c0s.append(t3 - t2)
print('Measure rest operator')
sc = ift.StatCalculator()
op = GM.getRest().adjoint
for _ in range(10):
t0 = time()
res = op(img)
sc.add(time() - t0)
t_fft = sc.mean
print('FFT shape', res.shape)
plt.scatter(N0s, a0s, label='Gridder mr')
plt.legend()
# no idea why this is necessary, but if it is omitted, the range is wrong
plt.ylim(min(a0s), max(a0s))
plt.ylabel('time [s]')
plt.title('Initialization')
plt.loglog()
plt.savefig('bench0.png')
plt.close()
plt.scatter(N0s, b0s, color='k', marker='^', label='Gridder mr times')
plt.scatter(N0s, c0s, color='k', label='Gridder mr adjoint times')
plt.axhline(sc.mean, label='FFT')
plt.axhline(sc.mean + np.sqrt(sc.var))
plt.axhline(sc.mean - np.sqrt(sc.var))
plt.legend()
plt.ylabel('time [s]')
plt.title('Apply')
plt.loglog()
plt.savefig('bench1.png')
plt.close()
......@@ -45,13 +45,6 @@ def make_random_mask():
return mask.to_global_data()
def mask_to_nan(mask, field):
# Set masked pixels to nan for plotting
masked_data = field.local_data.copy()
masked_data[mask.local_data == 0] = np.nan
return ift.from_local_data(field.domain, masked_data)
if __name__ == '__main__':
np.random.seed(42)
......@@ -64,7 +57,7 @@ if __name__ == '__main__':
if mode == 0:
# One-dimensional regular grid
position_space = ift.RGSpace([1024])
mask = np.ones(position_space.shape)
mask = np.zeros(position_space.shape)
elif mode == 1:
# Two-dimensional regular grid with checkerboard mask
position_space = ift.RGSpace([128, 128])
......@@ -101,23 +94,22 @@ if __name__ == '__main__':
# Build instrument response consisting of a discretization, mask
# and harmonic transformaion
# Data is defined on a geometry-free space, thus the geometry is removed
GR = ift.GeometryRemover(position_space)
# Masking operator to model that parts of the field have not been observed
mask = ift.Field.from_global_data(position_space, mask)
Mask = ift.DiagonalOperator(mask)
Mask = ift.MaskOperator(mask)
# The response operator consists of
# - an harmonic transform (to get to image space)
# - a harmonic transform (to get to image space)
# - the application of the mask
# - the removal of geometric information
# The removal of geometric information is included in the MaskOperator
# it can also be implemented with a GeometryRemover
# Operators can be composed either with parenthesis
R = GR(Mask(HT))
R = Mask(HT)
# or with @
R = GR @ Mask @ HT
R = Mask @ HT
data_space = GR.target
data_space = R.target
# Set the noise covariance N
noise = 5.
......@@ -144,16 +136,17 @@ if __name__ == '__main__':
filename = "getting_started_1_mode_{}.png".format(mode)
if rg and len(position_space.shape) == 1:
plot.add(
[HT(MOCK_SIGNAL), GR.adjoint(data),
[HT(MOCK_SIGNAL), Mask.adjoint(data),
HT(m)],
label=['Mock signal', 'Data', 'Reconstruction'],
alpha=[1, .3, 1])
plot.add(mask_to_nan(mask, HT(m - MOCK_SIGNAL)), title='Residuals')
plot.add(Mask.adjoint(Mask(HT(m - MOCK_SIGNAL))), title='Residuals')
plot.output(nx=2, ny=1, xsize=10, ysize=4, name=filename)
else:
plot.add(HT(MOCK_SIGNAL), title='Mock Signal')
plot.add(mask_to_nan(mask, (GR(Mask)).adjoint(data)), title='Data')
plot.add(Mask.adjoint(data), title='Data')
plot.add(HT(m), title='Reconstruction')
plot.add(mask_to_nan(mask, HT(m - MOCK_SIGNAL)), title='Residuals')
plot.add(Mask.adjoint(Mask(HT(m) - HT(MOCK_SIGNAL))),
title='Residuals')
plot.output(nx=2, ny=2, xsize=10, ysize=10, name=filename)
print("Saved results as '{}'.".format(filename))
......@@ -103,13 +103,15 @@ if __name__ == '__main__':
data = signal_response(mock_position) + N.draw_sample()
# Minimization parameters
ic_sampling = ift.GradientNormController(iteration_limit=100)
ic_newton = ift.GradInfNormController(
name='Newton', tol=1e-7, iteration_limit=35)
ic_sampling = ift.AbsDeltaEnergyController(
name='Sampling', deltaE=0.05, iteration_limit=100)
ic_newton = ift.AbsDeltaEnergyController(
name='Newton', deltaE=0.5, iteration_limit=35)
minimizer = ift.NewtonCG(ic_newton)
# Set up likelihood and information Hamiltonian
likelihood = ift.GaussianEnergy(mean=data, covariance=N)(signal_response)
likelihood = ift.GaussianEnergy(mean=data,
inverse_covariance=N.inverse)(signal_response)
H = ift.StandardHamiltonian(likelihood, ic_sampling)
initial_mean = ift.MultiField.full(H.domain, 0.)
......
import numpy as np
import nifty5 as ift
def convtest(test_signal, delta, func):
domain = test_signal.domain
# Create Convolution Operator
conv_op = ift.FuncConvolutionOperator(domain, func)
# Convolve, Adjoint-Convolve
conv_signal = conv_op(test_signal)
cac_signal = conv_op.adjoint_times(conv_signal)
print(test_signal.integrate(), conv_signal.integrate(),
cac_signal.integrate())
# generate kernel image
conv_delta = conv_op(delta)
# Plot results
plot = ift.Plot()
plot.add(signal, title='Signal')
plot.add(conv_signal, title='Signal Convolved')
plot.add(cac_signal, title='Signal, Conv, Adj-Conv')
plot.add(conv_delta, title='Kernel')
plot.output()
# Healpix test
nside = 64
npix = 12 * nside * nside
domain = ift.HPSpace(nside)
# Define test signal (some point sources)
signal_vals = np.zeros(npix, dtype=np.float64)
for i in range(0, npix, npix//12 + 27):
signal_vals[i] = 500.
signal = ift.from_global_data(domain, signal_vals)
delta_vals = np.zeros(npix, dtype=np.float64)
delta_vals[0] = 1.0
delta = ift.from_global_data(domain, delta_vals)
# Define kernel function
def func(theta):
ct = np.cos(theta)
return 1. * np.logical_and(ct > 0.7, ct <= 0.8)
convtest(signal, delta, func)
domain = ift.RGSpace((100, 100))
# Define test signal (some point sources)
signal_vals = np.zeros(domain.shape, dtype=np.float64)
signal_vals[35, 70] = 5000.
signal_vals[45, 8] = 5000.
signal = ift.from_global_data(domain, signal_vals)
# Define delta signal, generate kernel image
delta_vals = np.zeros(domain.shape, dtype=np.float64)
delta_vals[0, 0] = 1.0
delta = ift.from_global_data(domain, delta_vals)
# Define kernel function
def func(dist):
return 1. * np.logical_and(dist > 0.1, dist <= 0.2)
convtest(signal, delta, func)
......@@ -2,6 +2,12 @@ NIFTy-related publications
==========================
::
@article{asclnifty5,
title={NIFTy5: Numerical Information Field Theory v5},
author={Arras, Philipp and Baltac, Mihai and Ensslin, Torsten A and Frank, Philipp and Hutschenreuter, Sebastian and Knollmueller, Jakob and Leike, Reimar and Newrzella, Max-Niklas and Platz, Lukas and Reinecke, Martin and others},
journal={Astrophysics Source Code Library},
year={2019}
}
@software{nifty,
author = {{Martin Reinecke, Theo Steininger, Marco Selig}},
......@@ -11,7 +17,7 @@ NIFTy-related publications
date = {2018-04-05},
}
@ARTICLE{2013A&A...554A..26S,
@article{2013A&A...554A..26S,
author = {{Selig}, M. and {Bell}, M.~R. and {Junklewitz}, H. and {Oppermann}, N. and {Reinecke}, M. and {Greiner}, M. and {Pachajoa}, C. and {En{\ss}lin}, T.~A.},
title = "{NIFTY - Numerical Information Field Theory. A versatile PYTHON library for signal inference}",
journal = {\aap},
......@@ -29,7 +35,7 @@ NIFTy-related publications
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@ARTICLE{2017arXiv170801073S,
@article{2017arXiv170801073S,
author = {{Steininger}, T. and {Dixit}, J. and {Frank}, P. and {Greiner}, M. and {Hutschenreuter}, S. and {Knollm{\"u}ller}, J. and {Leike}, R. and {Porqueres}, N. and {Pumpe}, D. and {Reinecke}, M. and {{\v S}raml}, M. and {Varady}, C. and {En{\ss}lin}, T.},
title = "{NIFTy 3 - Numerical Information Field Theory - A Python framework for multicomponent signal inference on HPC clusters}",
journal = {ArXiv e-prints},
......
This diff is collapsed.
......@@ -37,7 +37,7 @@ NIFTy comes with reimplemented MAP and VI estimators.
Free Theory & Implicit Operators
--------------------------------
A free IFT appears when the signal field :math:`{s}` and the noise :math:`{n}` of the data :math:`{d}` are independent, zero-centered Gaussian processes of kown covariances :math:`{S}` and :math:`{N}`, respectively,
A free IFT appears when the signal field :math:`{s}` and the noise :math:`{n}` of the data :math:`{d}` are independent, zero-centered Gaussian processes of known covariances :math:`{S}` and :math:`{N}`, respectively,
.. math::
......@@ -49,7 +49,7 @@ and the measurement equation is linear in both signal and noise,
d= R\, s + n,
with :math:`{R}` being the measurement response, which maps the continous signal field into the discrete data space.
with :math:`{R}` being the measurement response, which maps the continuous signal field into the discrete data space.
This is called a free theory, as the information Hamiltonian
......@@ -96,8 +96,8 @@ These implicit operators can be combined into new operators, e.g. to :math:`{D^{
The invocation of an inverse operator applied to a vector might trigger the execution of a numerical linear algebra solver.
Thus, when NIFTy calculates :math:`{m = D\, j}`, it actually solves :math:`{D^{-1} m = j}` for :math:`{m}` behind the scenes.
The advantage of implicit operators to explicit matrices is the reduced memory requirements.
The reconstruction of only a Megapixel image would otherwithe require the storage and processing of matrices with sizes of several Terabytes.
The advantage of implicit operators compared to explicit matrices is the reduced memory consumption;
for the reconstruction of just a Megapixel image the latter would already require several Terabytes.
Larger images could not be dealt with due to the quadratic memory requirements of explicit operator representations.
The demo codes `demos/getting_started_1.py` and `demos/Wiener_Filter.ipynb` illustrate this.
......@@ -106,7 +106,7 @@ The demo codes `demos/getting_started_1.py` and `demos/Wiener_Filter.ipynb` illu
Generative Models
-----------------
For more sophisticated measurement situations, involving non-linear measuremnts, unknown covariances, calibration constants and the like, it is recommended to formulate those as generative models for which NIFTy provides powerful inference algorithms.
For more sophisticated measurement situations (involving non-linear measurements, unknown covariances, calibration constants and the like) it is recommended to formulate those as generative models for which NIFTy provides powerful inference algorithms.
In a generative model, all known or unknown quantities are described as the results of generative processes, which start with simple probability distributions, like the uniform, the i.i.d. Gaussian, or the delta distribution.
......@@ -129,7 +129,7 @@ NIFTy takes advantage of this formulation in several ways:
1) All prior degrees of freedom have unit covariance, which improves the condition number of operators that need to be inverted.
2) The amplitude operator can be regarded as part of the response, :math:`{R'=R\,A}`.
In general, more sophisticated responses can be constructed out of the composition of simpler operators.
In general, more sophisticated responses can be obtained by combining simpler operators.
3) The response can be non-linear, e.g. :math:`{R'(s)=R \exp(A\,\xi)}`, see `demos/getting_started_2.py`.
......@@ -168,7 +168,7 @@ Maximum a Posteriori
One popular field estimation method is Maximum a Posteriori (MAP).
It only requires to minimize the information Hamiltonian, e.g by a gradient descent method that stops when
It only requires minimizing the information Hamiltonian, e.g. by a gradient descent method that stops when
.. math::
......
......@@ -8,33 +8,35 @@ distributions, the "apt" lines will need slight changes.
NIFTy5 and its mandatory dependencies can be installed via::
sudo apt-get install git python3 python3-pip python3-dev
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/NIFTy.git@NIFTy_5
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty.git@NIFTy_5
pip3 install --user git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft
Plotting support is added via::
sudo apt-get install python3-matplotlib
NIFTy uses Numpy's FFT implementation by default. For large problems FFTW may be
used because of its higher performance. It can be installed via::
Support for spherical harmonic transforms is added via::
sudo apt-get install libfftw3-dev
pip3 install --user pyfftw
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
To enable FFTW usage in NIFTy, call::
Support for the radio interferometry gridder is added via::
nifty5.fft.enable_fftw()
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty_gridder.git
at the beginning of your code.
MPI support is added via::
(Note: If you encounter problems related to `pyFFTW`, make sure that you are
using a pip-installed `pyFFTW` package. Unfortunately, some distributions are
shipping an incorrectly configured `pyFFTW` package, which does not cooperate
with the installed `FFTW3` libraries.)
sudo apt-get install python3-mpi4py
Support for spherical harmonic transforms is added via::
NIFTy documentation is provided by Sphinx. To build the documentation::
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
sudo apt-get install python3-sphinx-rtd-theme dvipng
cd <nifty_directory>
sh docs/generate.sh
MPI support is added via::
To view the documentation in firefox::
firefox docs/build/index.html
(Note: Make sure that you reinstall nifty after each change since sphinx
imports nifty from the Python path.)
sudo apt-get install python3-mpi4py
Discretization and Volume in NIFTy
Discretisation and Volume in NIFTy
==================================
.. note:: Some of this discussion is rather technical and may be skipped in a first read-through.
......@@ -160,15 +160,21 @@ Often, log-likelihoods contain integrals over the quantity of interest :math:`s`
\int_\Omega \text{d}x\, s(x) \approx \sum_i s^i\int_{\Omega_i}\text{d}x\, 1
Here the domain of the integral :math:`\Omega = \dot{\bigcup_q} \; \Omega_i` is the disjoint union over smaller :math:`\Omega_i`, e.g. the pixels of the space, and :math:`s_i` is the discretized field value on the :math:`i`-th pixel.
Here the domain of the integral :math:`\Omega = \dot{\bigcup_q} \; \Omega_i` is the disjoint union over smaller :math:`\Omega_i`, e.g. the pixels of the space, and :math:`s_i` is the discretised field value on the :math:`i`-th pixel.
This introduces the weighting :math:`V_i=\int_{\Omega_i}\text{d}x\, 1`, also called the volume factor, a property of the space.
NIFTy aids you in constructing your own log-likelihood by providing methods like :func:`~field.Field.weight`, which weights all pixels of a field with their corresponding volume.
An integral over a :class:`~field.Field` :code:`s` can be performed by calling :code:`s.weight(1).sum()`, which is equivalent to :code:`s.integrate()`.
Volume factors are also applied automatically in the following places:
- :class:`~operators.harmonic_operators.FFTOperator` as well as all other harmonic operators. Here the zero mode of the transformed field is the integral over the original field, thus the whole field is weighted once.
- some response operators, such as the :class:`~library.los_response.LOSResponse`. In this operator a line integral is descritized, so a 1-dimensional volume factor is applied.
- In :class:`~library.correlated_fields.CorrelatedField` as well :class:`~library.correlated_fields.MfCorrelatedField`, the field is multiplied by the square root of the total volume in configuration space. This ensures that the same field reconstructed over a larger domain has the same variance in position space in the limit of infinite resolution. It also ensures that power spectra in NIFTy behave according to the definition of a power spectrum, namely the power of a k-mode is the expectation of the k-mode square, divided by the volume of the space.
- :class:`~operators.harmonic_operators.FFTOperator` as well as all other harmonic operators.
Here the zero mode of the transformed field is the integral over the original field, thus the whole field is weighted once.
- Some response operators, such as the :class:`~library.los_response.LOSResponse`.
In this operator a line integral is discretised, so a 1-dimensional volume factor is applied.
- In :class:`~library.correlated_fields.CorrelatedField` as well as :class:`~library.correlated_fields.MfCorrelatedField`.
Both describe fields with a smooth, a priori unknown correlation structure specified by a power spectrum.
The field is multiplied by the square root of the total volume of it domain's harmonic counterpart.
This ensures that the same power spectrum can be used regardless of the chosen resolution, provided the total volume of the space remains the same.
It also guarantees that the power spectra in NIFTy behave according to their definition, i.e. the power of a mode :math:`s_k` is the expectation value of that mode squared, divided by the volume of its space :math:`P(k) = \left\langle s_k^2 \right\rangle / V_k`.
Note that in contrast to some older versions of NIFTy, the dot product :code:`s.vdot(t)` of fields does **not** apply a volume factor, but instead just sums over the field components,
......
......@@ -45,19 +45,22 @@ from .operators.block_diagonal_operator import BlockDiagonalOperator
from .operators.outer_product_operator import OuterProduct
from .operators.simple_linear_operators import (
VdotOperator, ConjugationOperator, Realizer,
FieldAdapter, ducktape, GeometryRemover, NullOperator)
FieldAdapter, ducktape, GeometryRemover, NullOperator,
MatrixProductOperator, PartialExtractor)
from .operators.value_inserter import ValueInserter
from .operators.energy_operators import (
EnergyOperator, GaussianEnergy, PoissonianEnergy, InverseGammaLikelihood,
BernoulliEnergy, StandardHamiltonian, AveragedEnergy)
BernoulliEnergy, StandardHamiltonian, AveragedEnergy, QuadraticFormOperator,
Squared2NormOperator)
from .operators.convolution_operators import FuncConvolutionOperator
from .probing import probe_with_posterior_samples, probe_diagonal, \
StatCalculator
StatCalculator, approximation2endo
from .minimization.line_search import LineSearch
from .minimization.iteration_controllers import (
IterationController, GradientNormController, DeltaEnergyController,
GradInfNormController)
GradInfNormController, AbsDeltaEnergyController)
from .minimization.minimizer import Minimizer
from .minimization.conjugate_gradient import ConjugateGradient
from .minimization.nonlinear_cg import NonlinearCG
......@@ -73,7 +76,8 @@ from .minimization.metric_gaussian_kl import MetricGaussianKL
from .sugar import *
from .plot import Plot
from .library.smooth_linear_amplitude import SLAmplitude, CepstrumOperator
from .library.smooth_linear_amplitude import (
SLAmplitude, LinearSLAmplitude, CepstrumOperator)
from .library.inverse_gamma_operator import InverseGammaOperator
from .library.los_response import LOSResponse
from .library.dynamic_operator import (dynamic_operator,
......@@ -84,6 +88,7 @@ from .library.wiener_filter_curvature import WienerFilterCurvature
from .library.correlated_fields import CorrelatedField, MfCorrelatedField
from .library.adjust_variances import (make_adjust_variances_hamiltonian,
do_adjust_variances)
from .library.gridder import GridderMaker
from . import extra
......@@ -93,5 +98,12 @@ from .logger import logger
from .linearization import Linearization
from .operator_spectrum import operator_spectrum
from . import internal_config
_scheme = internal_config.parallelization_scheme()
if _scheme == "Samples":
from .minimization.metric_gaussian_kl_mpi import MetricGaussianKL_MPI
# We deliberately don't set __all__ here, because we don't want people to do a
# "from nifty5 import *"; that would swamp the global namespace.
......@@ -235,7 +235,6 @@ class data_object(object):
for op in ["__add__", "__radd__", "__iadd__",
"__sub__", "__rsub__", "__isub__",
"__mul__", "__rmul__", "__imul__",
"__div__", "__rdiv__", "__idiv__",
"__truediv__", "__rtruediv__", "__itruediv__",
"__floordiv__", "__rfloordiv__", "__ifloordiv__",
"__pow__", "__rpow__", "__ipow__",
......
......@@ -149,7 +149,7 @@ def ensure_default_distributed(arr):
def absmax(arr):
return np.linalg.norm(arr.rehape(-1), ord=np.inf)
return np.linalg.norm(arr.reshape(-1), ord=np.inf)
def norm(arr, ord=2):
......
......@@ -15,11 +15,19 @@
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
try:
from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() == 1:
from .data_objects.numpy_do import *
else:
from .data_objects.distributed_do import *
except ImportError:
from . import internal_config
_scheme = internal_config.parallelization_scheme()
if _scheme in ("Samples", "None"):
from .data_objects.numpy_do import *
else:
try:
from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() == 1:
from .data_objects.numpy_do import *
else:
from .data_objects.distributed_do import *
except ImportError:
from .data_objects.numpy_do import *
......@@ -103,7 +103,9 @@ class GLSpace(StructuredDomain):
The partner domain
"""
from ..domains.lm_space import LMSpace
return LMSpace(lmax=self._nlat-1, mmax=self._nlon//2)
mmax = self._nlon//2
lmax = max(mmax, self._nlat-1)
return LMSpace(lmax=lmax, mmax=mmax)
def check_codomain(self, codomain):
"""Raises `TypeError` if `codomain` is not a matching partner domain
......
......@@ -103,6 +103,36 @@ class LMSpace(StructuredDomain):
def get_fft_smoothing_kernel_function(self, sigma):
return lambda x: self._kernel(x, sigma)
def get_conv_kernel_from_func(self, func):
"""Creates a convolution kernel defined by a function.
Parameters
----------
func: function
This function needs to take exactly one argument, which is
colatitude in radians, and return the kernel amplitude at that
colatitude.
Assumes the function to be radially symmetric,
e.g. only dependant on theta in radians"""
from .gl_space import GLSpace
from ..operators.harmonic_operators import HarmonicTransformOperator
import pyHealpix
# define azimuthally symmetric spaces for kernel transform
gl = GLSpace(self.lmax + 1, 1)
lm0 = gl.get_default_codomain()
theta = pyHealpix.GL_thetas(gl.nlat)
# evaluate the kernel function at the required thetas
kernel_sphere = Field.from_global_data(gl, func(theta))
# normalize the kernel such that the integral over the sphere is 4pi
kernel_sphere = kernel_sphere * (4 * np.pi / kernel_sphere.integrate())
# compute the spherical harmonic coefficients of the kernel
op = HarmonicTransformOperator(lm0, gl)
kernel_lm = op.adjoint_times(kernel_sphere.weight(1)).to_global_data()
# evaluate the k lengths of the harmonic space
k_lengths = self.get_k_length_array().to_global_data().astype(np.int)
return Field.from_global_data(self, kernel_lm[k_lengths])
@property
def lmax(self):
"""int : maximum allowed :math:`l`
......
......@@ -50,6 +50,8 @@ class LogRGSpace(StructuredDomain):
self._bindistances = tuple(bindistances)
self._t_0 = tuple(t_0)
if min(self._bindistances) <= 0:
raise ValueError('Non-positive bindistances encountered')
self._dim = int(reduce(lambda x, y: x*y, self._shape))
self._dvol = float(reduce(lambda x, y: x*y, self._bindistances))
......@@ -80,8 +82,8 @@ class LogRGSpace(StructuredDomain):
return np.array(self._t_0)
def __repr__(self):
return ("LogRGSpace(shape={}, harmonic={})".format(
self.shape, self.harmonic))
return ("LogRGSpace(shape={}, bindistances={}, t_0={}, harmonic={})".format(
self.shape, self.bindistances, self.t_0, self.harmonic))
def get_default_codomain(self):
"""Returns a :class:`LogRGSpace` object representing the (position or
......@@ -91,10 +93,10 @@ class LogRGSpace(StructuredDomain):
Returns
-------
LogRGSpace
The parter domain
The partner domain
"""
codomain_bindistances = 1./(self.bindistances*self.shape)
return LogRGSpace(self.shape, codomain_bindistances, self._t_0, True)
return LogRGSpace(self.shape, codomain_bindistances, self._t_0, not self.harmonic)
def get_k_length_array(self):
"""Generates array of distances to origin of the space.
......
......@@ -165,6 +165,8 @@ class PowerSpace(StructuredDomain):
if binbounds is not None:
binbounds = tuple(binbounds)
if min(binbounds) < 0:
raise ValueError('Negative binbounds encountered')
key = (harmonic_partner, binbounds)
if self._powerIndexCache.get(key) is None:
......
......@@ -54,6 +54,8 @@ class RGSpace(StructuredDomain):
if np.isscalar(shape):
shape = (shape,)
self._shape = tuple(int(i) for i in shape)
if min(self._shape) < 0:
raise ValueError('Negative number of pixels encountered')
if distances is None:
if self.harmonic:
......@@ -66,6 +68,8 @@ class RGSpace(StructuredDomain):
temp = np.empty(len(self.shape), dtype=np.float64)
temp[:] = distances
self._distances = tuple(temp)
if min(self._distances) <= 0:
raise ValueError('Non-positive distances encountered')
self._dvol = float(reduce(lambda x, y: x*y, self._distances))
self._size = int(reduce(lambda x, y: x*y, self._shape))
......@@ -90,9 +94,7 @@ class RGSpace(StructuredDomain):
def scalar_dvol(self):
return self._dvol
def get_k_length_array(self):
if (not self.harmonic):
raise NotImplementedError
def _get_dist_array(self):
ibegin = dobj.ibegin_from_shape(self._shape)
res = np.arange(self.local_shape[0], dtype=np.float64) + ibegin[0]
res = np.minimum(res, self.shape[0]-res)*self.distances[0]
......@@ -106,6 +108,11 @@ class RGSpace(StructuredDomain):
res = np.add.outer(res, tmp)
return Field.from_local_data(self, np.sqrt(res))
def get_k_length_array(self):
if (not self.harmonic):
raise NotImplementedError
return self._get_dist_array()
def get_unique_k_lengths(self):
if (not self.harmonic):
raise NotImplementedError
......@@ -146,6 +153,27 @@ class RGSpace(StructuredDomain):
raise NotImplementedError
return lambda x: self._kernel(x, sigma)
def get_conv_kernel_from_func(self, func):
"""Creates a convolution kernel defined by a function.
Parameters
----------
func: function
This function needs to take exactly one argument, which is
distance from center (in the same units as the RGSpace distances),
and return the kernel amplitude at that distance.
Assumes the function to be radially symmetric,
e.g. only dependant on distance"""
from ..operators.harmonic_operators import HarmonicTransformOperator
if (not self.harmonic):
raise NotImplementedError
op = HarmonicTransformOperator(self, self.get_default_codomain())
dist = op.target[0]._get_dist_array()
kernel = Field.from_local_data(op.target, func(dist.local_data))
kernel = kernel / kernel.integrate()
return op.adjoint_times(kernel.weight(1))
def get_default_codomain(self):
"""Returns a :class:`RGSpace` object representing the (position or
harmonic) partner domain of `self`, depending on `self.harmonic`.
......@@ -153,7 +181,7 @@ class RGSpace(StructuredDomain):
Returns
-------
RGSpace
The parter domain
The partner domain
"""
distances = 1. / (np.array(self.shape)*np.array(self.distances))
return RGSpace(self.shape, distances, not self.harmonic)
......
......@@ -17,8 +17,10 @@
import numpy as np
from .domain_tuple import DomainTuple
from .field import Field
from .linearization import Linearization
from .multi_domain import MultiDomain
from .operators.linear_operator import LinearOperator
from .sugar import from_random
......@@ -33,7 +35,8 @@ def _assert_allclose(f1, f2, atol, rtol):
_assert_allclose(val, f2[key], atol=atol, rtol=rtol)
def _adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol):
def _adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear):
needed_cap = op.TIMES | op.ADJOINT_TIMES
if (op.capability & needed_cap) != needed_cap:
return
......@@ -41,6 +44,8 @@ def _adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol):
f2 = from_random("normal", op.target, dtype=target_dtype)
res1 = f1.vdot(op.adjoint_times(f2))
res2 = op.times(f1).vdot(f2)
if only_r_linear:
res1, res2 = res1.real, res2.real
np.testing.assert_allclose(res1, res2, atol=atol, rtol=rtol)
......@@ -57,22 +62,35 @@ def _inverse_implementation(op, domain_dtype, target_dtype, atol, rtol):
_assert_allclose(res, foo, atol=atol, rtol=rtol)
def _full_implementation(op, domain_dtype, target_dtype, atol, rtol):
_adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol)
def _full_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear):
_adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear)
_inverse_implementation(op, domain_dtype, target_dtype, atol, rtol)
def _check_linearity(op, domain_dtype, atol, rtol):
needed_cap = op.TIMES
if (op.capability & needed_cap) != needed_cap:
return
fld1 = from_random("normal", op.domain, dtype=domain_dtype)
fld2 = from_random("normal", op.domain, dtype=domain_dtype)
alpha = np.random.random()
alpha = np.random.random() # FIXME: this can break badly with MPI!
val1 = op(alpha*fld1+fld2)
val2 = alpha*op(fld1)+op(fld2)
_assert_allclose(val1, val2, atol=atol, rtol=rtol)
def _domain_check(op):
for dd in [op.domain, op.target]:
if not isinstance(dd, (DomainTuple, MultiDomain)):
raise TypeError(
'The domain and the target of an operator need to',
'be instances of either DomainTuple or MultiDomain.')
def consistency_check(op, domain_dtype=np.float64, target_dtype=np.float64,
atol=0, rtol=1e-7):
atol=0, rtol=1e-7, only_r_linear=False):
"""
Checks an operator for algebraic consistency of its capabilities.
......@@ -98,15 +116,25 @@ def consistency_check(op, domain_dtype=np.float64, target_dtype=np.float64,
Relative tolerance for the check. If atol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
only_r_linear: bool
set to True if the operator is only R-linear, not C-linear.
This will relax the adjointness test accordingly.
"""
if not isinstance(op, LinearOperator):
raise TypeError('This test tests only linear operators.')
_domain_check(op)
_check_linearity(op, domain_dtype, atol, rtol)
_full_implementation(op, domain_dtype, target_dtype, atol, rtol)
_full_implementation(op.adjoint, target_dtype, domain_dtype, atol, rtol)
_full_implementation(op.inverse, target_dtype, domain_dtype, atol, rtol)
_check_linearity(op.adjoint, target_dtype, atol, rtol)
_check_linearity(op.inverse, target_dtype, atol, rtol)
_check_linearity(op.adjoint.inverse, domain_dtype, atol, rtol)
_full_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.inverse, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint.inverse, domain_dtype, target_dtype, atol,
rtol)
rtol, only_r_linear)
def _get_acceptable_location(op, loc, lin):
......@@ -151,6 +179,7 @@ def check_jacobian_consistency(op, loc, tol=1e-8, ntries=100):
tol : float
Tolerance for the check.
"""
_domain_check(op)
for _ in range(ntries):
lin = op(Linearization.make_var(loc))
loc2, lin2 = _get_acceptable_location(op, loc, lin)
......
......@@ -15,152 +15,28 @@
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
from .utilities import iscomplextype
import numpy as np
import pypocketfft
_nthreads = 1
_use_fftw = False
_fftw_prepped = False
_fft_extra_args = {}
def nthreads():
return _nthreads
def enable_fftw():
global _use_fftw
_use_fftw = True
def disable_fftw():
global _use_fftw
_use_fftw = False
def _init_pyfftw():
global _fft_extra_args, _fftw_prepped
if not _fftw_prepped:
import pyfftw
from pyfftw.interfaces.numpy_fft import fftn, rfftn, ifftn
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(1000.)
# Optional extra arguments for the FFT calls
# if exact reproducibility is needed,
# set "planner_effort" to "FFTW_ESTIMATE"
import os
nthreads = int(os.getenv("OMP_NUM_THREADS", "1"))
_fft_extra_args = dict(planner_effort='FFTW_ESTIMATE',
threads=nthreads)
_fftw_prepped = True
def set_nthreads(nthr):
global _nthreads
_nthreads = int(nthr)
def fftn(a, axes=None):
if _use_fftw:
from pyfftw.interfaces.numpy_fft import fftn
_init_pyfftw()
return fftn(a, axes=axes, **_fft_extra_args)
else:
return np.fft.fftn(a, axes=axes)
def rfftn(a, axes=None):
if _use_fftw:
from pyfftw.interfaces.numpy_fft import rfftn
_init_pyfftw()
return rfftn(a, axes=axes, **_fft_extra_args)
else:
return np.fft.rfftn(a, axes=axes)
return pypocketfft.c2c(a, axes=axes, nthreads=_nthreads)
def ifftn(a, axes=None):
if _use_fftw:
from pyfftw.interfaces.numpy_fft import ifftn
_init_pyfftw()
return ifftn(a, axes=axes, **_fft_extra_args)
else:
return np.fft.ifftn(a, axes=axes)
return pypocketfft.c2c(a, axes=axes, inorm=2, forward=False,
nthreads=_nthreads)
def hartley(a, axes=None):
# Check if the axes provided are valid given the shape
if axes is not None and \
not all(axis < len(a.shape) for axis in axes):
raise ValueError("Provided axes do not match array shape")
if iscomplextype(a.dtype):
raise TypeError("Hartley transform requires real-valued arrays.")
tmp = rfftn(a, axes=axes)
def _fill_array(tmp, res, axes):
if axes is None:
axes = tuple(range(tmp.ndim))
lastaxis = axes[-1]
ntmplast = tmp.shape[lastaxis]
slice1 = (slice(None),)*lastaxis + (slice(0, ntmplast),)
np.add(tmp.real, tmp.imag, out=res[slice1])
def _fill_upper_half(tmp, res, axes):
lastaxis = axes[-1]
nlast = res.shape[lastaxis]
ntmplast = tmp.shape[lastaxis]
nrem = nlast - ntmplast
slice1 = [slice(None)]*lastaxis + [slice(ntmplast, None)]
slice2 = [slice(None)]*lastaxis + [slice(nrem, 0, -1)]
for i in axes[:-1]:
slice1[i] = slice(1, None)
slice2[i] = slice(None, 0, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
np.subtract(tmp[slice2].real, tmp[slice2].imag, out=res[slice1])
for i, ax in enumerate(axes[:-1]):
dim1 = (slice(None),)*ax + (slice(0, 1),)
axes2 = axes[:i] + axes[i+1:]
_fill_upper_half(tmp[dim1], res[dim1], axes2)
_fill_upper_half(tmp, res, axes)
return res
return _fill_array(tmp, np.empty_like(a), axes)
# Do a real-to-complex forward FFT and return the _full_ output array
def my_fftn_r2c(a, axes=None):
# Check if the axes provided are valid given the shape
if axes is not None and \
not all(axis < len(a.shape) for axis in axes):
raise ValueError("Provided axes do not match array shape")
if iscomplextype(a.dtype):
raise TypeError("Transform requires real-valued input arrays.")
tmp = rfftn(a, axes=axes)
def _fill_complex_array(tmp, res, axes):
if axes is None:
axes = tuple(range(tmp.ndim))
lastaxis = axes[-1]
ntmplast = tmp.shape[lastaxis]
slice1 = [slice(None)]*lastaxis + [slice(0, ntmplast)]
res[tuple(slice1)] = tmp
def _fill_upper_half_complex(tmp, res, axes):
lastaxis = axes[-1]
nlast = res.shape[lastaxis]
ntmplast = tmp.shape[lastaxis]
nrem = nlast - ntmplast
slice1 = [slice(None)]*lastaxis + [slice(ntmplast, None)]
slice2 = [slice(None)]*lastaxis + [slice(nrem, 0, -1)]
for i in axes[:-1]:
slice1[i] = slice(1, None)
slice2[i] = slice(None, 0, -1)
# np.conjugate(tmp[slice2], out=res[slice1])
res[tuple(slice1)] = np.conjugate(tmp[tuple(slice2)])
for i, ax in enumerate(axes[:-1]):
dim1 = tuple([slice(None)]*ax + [slice(0, 1)])
axes2 = axes[:i] + axes[i+1:]
_fill_upper_half_complex(tmp[dim1], res[dim1], axes2)
_fill_upper_half_complex(tmp, res, axes)
return res
return _fill_complex_array(tmp, np.empty_like(a, dtype=tmp.dtype), axes)
def my_fftn(a, axes=None):
return fftn(a, axes=axes)
return pypocketfft.genuine_hartley(a, axes=axes, nthreads=_nthreads)
......@@ -626,6 +626,11 @@ class Field(object):
raise ValueError("domain mismatch")
return self
def extract_part(self, dom):
if dom != self._domain:
raise ValueError("domain mismatch")
return self
def unite(self, other):
return self+other
......@@ -636,6 +641,8 @@ class Field(object):
return 0.5*(1.+self.tanh())
def clip(self, min=None, max=None):
min = min.local_data if isinstance(min, Field) else min
max = max.local_data if isinstance(max, Field) else max
return Field(self._domain, dobj.clip(self._val, min, max))
def one_over(self):
......@@ -656,7 +663,6 @@ class Field(object):
for op in ["__add__", "__radd__",
"__sub__", "__rsub__",
"__mul__", "__rmul__",
"__div__", "__rdiv__",
"__truediv__", "__rtruediv__",
"__floordiv__", "__rfloordiv__",
"__pow__", "__rpow__",
......
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
# Internal configuration switches, typically for experimental features.
# Leave unchanged unless you know what you are doing!
def parallelization_scheme():
return "Standard"
# return "Samples"
# return "None"
......@@ -17,9 +17,8 @@
from ..minimization.energy_adapter import EnergyAdapter
from ..multi_field import MultiField
from ..operators.distributors import PowerDistributor
from ..operators.energy_operators import (StandardHamiltonian,
InverseGammaLikelihood)
from ..operators.energy_operators import (InverseGammaLikelihood,
StandardHamiltonian)
from ..operators.scaling_operator import ScalingOperator
from ..operators.simple_linear_operators import ducktape
......@@ -79,27 +78,23 @@ def make_adjust_variances_hamiltonian(a,
ic_samp=ic_samp)
def do_adjust_variances(position,
amplitude_operator,
minimizer,
xi_key='xi',
samples=[]):
def do_adjust_variances(position, A, minimizer, xi_key='xi', samples=[]):
"""Adjusts the variance of xi_key to be represented by amplitude_operator.
Solves a constant likelihood optimization of the
form phi = amplitude_operator * position[xi_key] under the constraint that
phi remains constant.
form phi = A * position[xi_key] under the constraint that phi remains
constant.
The field indexed by xi_key is desired to be a Gaussian white Field,
thus variations that are more easily represented by amplitude_operator
will be absorbed in amplitude_operator.
thus variations that are more easily represented by A will be absorbed in
A.
Parameters
----------
position : Field, MultiField
Contains the initial values for amplitude_operator and the key xi_key,
to be adjusted.
amplitude_operator : Operator
A : Operator
Gives the amplitude when evaluated at position.
minimizer : Minimizer
Used to solve the optimization problem.
......@@ -113,27 +108,20 @@ def do_adjust_variances(position,
Returns
-------
MultiField
The new position after variances were adjusted.
The new position after variances have been adjusted.
"""
h_space = position[xi_key].domain[0]
pd = PowerDistributor(h_space, amplitude_operator.target[0])
a = pd(amplitude_operator)
xi = ducktape(None, position.domain, xi_key)
ham = make_adjust_variances_hamiltonian(a, xi, position, samples=samples)
ham = make_adjust_variances_hamiltonian(A, xi, position, samples=samples)
# Minimize
e = EnergyAdapter(
position.extract(a.domain), ham, constants=[], want_metric=True)
position.extract(A.domain), ham, constants=[], want_metric=True)
e, _ = minimizer(e)
# Update position
s_h_old = (a*xi).force(position)
s_h_old = (A*xi).force(position)
position = position.to_dict()
position[xi_key] = s_h_old/a(e.position)
position[xi_key] = s_h_old/A(e.position)
position = MultiField.from_dict(position)
position = MultiField.union([position, e.position])
return position
return MultiField.union([position, e.position])
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import numpy as np
from ..domain_tuple import DomainTuple
from ..domains.rg_space import RGSpace
from ..domains.unstructured_domain import UnstructuredDomain
from ..operators.linear_operator import LinearOperator
from ..sugar import from_global_data, makeDomain
class GridderMaker(object):
def __init__(self, dirty_domain, uv, eps=2e-13):
import nifty_gridder
dirty_domain = makeDomain(dirty_domain)
if (len(dirty_domain) != 1 or not isinstance(dirty_domain[0], RGSpace)
or not len(dirty_domain.shape) == 2):
raise ValueError("need dirty_domain with exactly one 2D RGSpace")
if uv.ndim != 2:
raise ValueError("uv must be a 2D array")
if uv.shape[1] != 2:
raise ValueError("second dimension of uv must have length 2")
dstx, dsty = dirty_domain[0].distances
# wasteful hack to adjust to shape required by nifty_gridder
uvw = np.empty((uv.shape[0], 3), dtype=np.float64)
uvw[:, 0:2] = uv
uvw[:, 2] = 0.
# Scale uv such that 0<uv<=1 which is assumed by nifty_gridder
uvw[:, 0] = uvw[:, 0]*dstx
uvw[:, 1] = uvw[:, 1]*dsty
speedOfLight = 299792458.
bl = nifty_gridder.Baselines(uvw, np.array([speedOfLight]))
nxdirty, nydirty = dirty_domain.shape
gconf = nifty_gridder.GridderConfig(nxdirty, nydirty, eps, 1., 1.)
nu, nv = gconf.Nu(), gconf.Nv()
self._idx = nifty_gridder.getIndices(
bl, gconf, np.zeros((uv.shape[0], 1), dtype=np.bool))
self._bl = bl
du, dv = 1./(nu*dstx), 1./(nv*dsty)
grid_domain = RGSpace([nu, nv], distances=[du, dv], harmonic=True)
self._rest = _RestOperator(dirty_domain, grid_domain, gconf)
self._gridder = RadioGridder(grid_domain, bl, gconf, self._idx)
def getGridder(self):
return self._gridder
def getRest(self):
return self._rest
def getFull(self):
return self.getRest() @ self._gridder
def ms2vis(self, x):
return self._bl.ms2vis(x, self._idx)
class _RestOperator(LinearOperator):
def __init__(self, dirty_domain, grid_domain, gconf):
self._domain = makeDomain(grid_domain)
self._target = makeDomain(dirty_domain)
self._gconf = gconf
self._capability = self.TIMES | self.ADJOINT_TIMES
def apply(self, x, mode):
self._check_input(x, mode)
res = x.to_global_data()
if mode == self.TIMES:
res = self._gconf.grid2dirty(res)
else:
res = self._gconf.dirty2grid(res)
return from_global_data(self._tgt(mode), res)
class RadioGridder(LinearOperator):
def __init__(self, grid_domain, bl, gconf, idx):
self._domain = DomainTuple.make(
UnstructuredDomain((bl.Nrows())))
self._target = DomainTuple.make(grid_domain)
self._bl = bl
self._gconf = gconf
self._idx = idx
self._capability = self.TIMES | self.ADJOINT_TIMES
def apply(self, x, mode):
import nifty_gridder
self._check_input(x, mode)
if mode == self.TIMES:
x = self._bl.ms2vis(x.to_global_data().reshape((-1, 1)), self._idx)
res = nifty_gridder.vis2grid(self._bl, self._gconf, self._idx, x)
else:
res = nifty_gridder.grid2vis(self._bl, self._gconf, self._idx,
x.to_global_data())
res = self._bl.vis2ms(res, self._idx).reshape((-1,))
return from_global_data(self._tgt(mode), res)
......@@ -169,6 +169,18 @@ def SLAmplitude(*, target, n_pix, a, k0, sm, sv, im, iv, keys=['tau', 'phi']):
which returns on its target a power spectrum which consists out of a
smooth and a linear part.
'''
return LinearSLAmplitude(target=target, n_pix=n_pix, a=a, k0=k0, sm=sm,
sv=sv, im=im, iv=iv, keys=keys).exp()
def LinearSLAmplitude(*, target, n_pix, a, k0, sm, sv, im, iv,
keys=['tau', 'phi']):
'''LinearOperator for parametrizing smooth log-amplitudes (square roots of
power spectra).
Logarithm of SLAmplitude
See documentation of SLAmplitude for more details
'''
if not (isinstance(n_pix, int) and isinstance(target, PowerSpace)):
raise TypeError
......@@ -196,4 +208,4 @@ def SLAmplitude(*, target, n_pix, a, k0, sm, sv, im, iv, keys=['tau', 'phi']):
loglog_ampl = 0.5*(smooth + linear)
# Go from loglog-space to linear-linear-space
return et @ loglog_ampl.exp()
return et @ loglog_ampl
......@@ -20,6 +20,7 @@ import numpy as np
from .field import Field
from .multi_field import MultiField
from .sugar import makeOp
from . import utilities
class Linearization(object):
......@@ -108,8 +109,7 @@ class Linearization(object):
return self._metric
def __getitem__(self, name):
from .operators.simple_linear_operators import ducktape
return self.new(self._val[name], ducktape(None, self.domain, name))
return self.new(self._val[name], self._jac.ducktape_left(name))
def __neg__(self):
return self.new(-self._val, -self._jac,
......@@ -298,6 +298,10 @@ class Linearization(object):
tmp2 = makeOp(1. - (tmp == min) - (tmp == max))
return self.new(tmp, tmp2(self._jac))
def sqrt(self):
tmp = self._val.sqrt()
return self.new(tmp, makeOp(0.5/tmp)(self._jac))
def sin(self):
tmp = self._val.sin()
tmp2 = self._val.cos()
......@@ -315,7 +319,11 @@ class Linearization(object):
def sinc(self):
tmp = self._val.sinc()
tmp2 = (self._val.cos()-tmp)/self._val
tmp2 = ((np.pi*self._val).cos()-tmp)/self._val
ind = self._val.local_data == 0
loc = tmp2.local_data.copy()
loc[ind] = 0
tmp2 = Field.from_local_data(tmp.domain, loc)
return self.new(tmp, makeOp(tmp2)(self._jac))
def log(self):
......@@ -342,8 +350,16 @@ class Linearization(object):
return self.new(tmp2, makeOp(0.5*(1.-tmp**2))(self._jac))
def absolute(self):
if utilities.iscomplextype(self._val.dtype):
raise TypeError("Argument must not be complex")
tmp = self._val.absolute()
tmp2 = self._val.sign()
ind = self._val.local_data == 0
loc = tmp2.local_data.copy().astype(float)
loc[ind] = np.nan
tmp2 = Field.from_local_data(tmp.domain, loc)
return self.new(tmp, makeOp(tmp2)(self._jac))