...
 
Commits (399)
......@@ -2,6 +2,7 @@
git_version.py
# custom
*.txt
setup.cfg
.idea
.DS_Store
......
......@@ -10,10 +10,11 @@ RUN apt-get update && apt-get install -y \
# Testing dependencies
python3-pytest-cov jupyter \
# Optional NIFTy dependencies
libfftw3-dev python3-mpi4py python3-matplotlib \
python3-mpi4py python3-matplotlib \
# more optional NIFTy dependencies
&& pip3 install pyfftw \
&& pip3 install git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git \
&& pip3 install git+https://gitlab.mpcdf.mpg.de/ift/nifty_gridder.git \
&& pip3 install git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft.git \
&& pip3 install jupyter \
&& rm -rf /var/lib/apt/lists/*
......
......@@ -47,11 +47,13 @@ Installation
- [Python 3](https://www.python.org/) (3.5.x or later)
- [SciPy](https://www.scipy.org/)
- [pypocketfft](https://gitlab.mpcdf.mpg.de/mtr/pypocketfft)
Optional dependencies:
- [pyFFTW](https://pypi.python.org/pypi/pyFFTW) for faster Fourier transforms
- [pyHealpix](https://gitlab.mpcdf.mpg.de/ift/pyHealpix) (for harmonic
transforms involving domains on the sphere)
- [nifty_gridder](https://gitlab.mpcdf.mpg.de/ift/nifty_gridder) (for radio
interferometry responses)
- [mpi4py](https://mpi4py.scipy.org) (for MPI-parallel execution)
- [matplotlib](https://matplotlib.org/) (for field plotting)
......@@ -60,7 +62,7 @@ Optional dependencies:
The current version of Nifty5 can be obtained by cloning the repository and
switching to the NIFTy_5 branch:
git clone https://gitlab.mpcdf.mpg.de/ift/NIFTy.git
git clone https://gitlab.mpcdf.mpg.de/ift/nifty.git
### Installation
......@@ -70,33 +72,21 @@ distributions, the "apt" lines will need slight changes.
NIFTy5 and its mandatory dependencies can be installed via:
sudo apt-get install git python3 python3-pip python3-dev
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/NIFTy.git@NIFTy_5
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty.git@NIFTy_5
pip3 install --user git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft
Plotting support is added via:
sudo apt-get install python3-matplotlib
NIFTy uses Numpy's FFT implementation by default. For large problems FFTW may be
used because of its higher performance. It can be installed via:
sudo apt-get install libfftw3-dev
pip3 install --user pyfftw
To enable FFTW usage in NIFTy, call
nifty5.fft.enable_fftw()
at the beginning of your code.
(Note: If you encounter problems related to `pyFFTW`, make sure that you are
using a pip-installed `pyFFTW` package. Unfortunately, some distributions are
shipping an incorrectly configured `pyFFTW` package, which does not cooperate
with the installed `FFTW3` libraries.)
Support for spherical harmonic transforms is added via:
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
Support for the radio interferometry gridder is added via:
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty_gridder.git
MPI support is added via:
sudo apt-get install python3-mpi4py
......
from time import time
import matplotlib.pyplot as plt
import numpy as np
import nifty5 as ift
np.random.seed(40)
N0s, a0s, b0s, c0s = [], [], [], []
for ii in range(10, 26):
nu = 1024
nv = 1024
N = int(2**ii)
print('N = {}'.format(N))
uv = np.random.rand(N, 2) - 0.5
vis = np.random.randn(N) + 1j*np.random.randn(N)
uvspace = ift.RGSpace((nu, nv))
visspace = ift.UnstructuredDomain(N)
img = np.random.randn(nu*nv)
img = img.reshape((nu, nv))
img = ift.from_global_data(uvspace, img)
t0 = time()
GM = ift.GridderMaker(uvspace, eps=1e-7, uv=uv)
vis = ift.from_global_data(visspace, vis)
op = GM.getFull().adjoint
t1 = time()
op(img).to_global_data()
t2 = time()
op.adjoint(vis).to_global_data()
t3 = time()
print(t2-t1, t3-t2)
N0s.append(N)
a0s.append(t1 - t0)
b0s.append(t2 - t1)
c0s.append(t3 - t2)
print('Measure rest operator')
sc = ift.StatCalculator()
op = GM.getRest().adjoint
for _ in range(10):
t0 = time()
res = op(img)
sc.add(time() - t0)
t_fft = sc.mean
print('FFT shape', res.shape)
plt.scatter(N0s, a0s, label='Gridder mr')
plt.legend()
# no idea why this is necessary, but if it is omitted, the range is wrong
plt.ylim(min(a0s), max(a0s))
plt.ylabel('time [s]')
plt.title('Initialization')
plt.loglog()
plt.savefig('bench0.png')
plt.close()
plt.scatter(N0s, b0s, color='k', marker='^', label='Gridder mr times')
plt.scatter(N0s, c0s, color='k', label='Gridder mr adjoint times')
plt.axhline(sc.mean, label='FFT')
plt.axhline(sc.mean + np.sqrt(sc.var))
plt.axhline(sc.mean - np.sqrt(sc.var))
plt.legend()
plt.ylabel('time [s]')
plt.title('Apply')
plt.loglog()
plt.savefig('bench1.png')
plt.close()
......@@ -45,13 +45,6 @@ def make_random_mask():
return mask.to_global_data()
def mask_to_nan(mask, field):
# Set masked pixels to nan for plotting
masked_data = field.local_data.copy()
masked_data[mask.local_data == 0] = np.nan
return ift.from_local_data(field.domain, masked_data)
if __name__ == '__main__':
np.random.seed(42)
......@@ -64,7 +57,7 @@ if __name__ == '__main__':
if mode == 0:
# One-dimensional regular grid
position_space = ift.RGSpace([1024])
mask = np.ones(position_space.shape)
mask = np.zeros(position_space.shape)
elif mode == 1:
# Two-dimensional regular grid with checkerboard mask
position_space = ift.RGSpace([128, 128])
......@@ -101,23 +94,22 @@ if __name__ == '__main__':
# Build instrument response consisting of a discretization, mask
# and harmonic transformaion
# Data is defined on a geometry-free space, thus the geometry is removed
GR = ift.GeometryRemover(position_space)
# Masking operator to model that parts of the field have not been observed
mask = ift.Field.from_global_data(position_space, mask)
Mask = ift.DiagonalOperator(mask)
Mask = ift.MaskOperator(mask)
# The response operator consists of
# - an harmonic transform (to get to image space)
# - a harmonic transform (to get to image space)
# - the application of the mask
# - the removal of geometric information
# The removal of geometric information is included in the MaskOperator
# it can also be implemented with a GeometryRemover
# Operators can be composed either with parenthesis
R = GR(Mask(HT))
R = Mask(HT)
# or with @
R = GR @ Mask @ HT
R = Mask @ HT
data_space = GR.target
data_space = R.target
# Set the noise covariance N
noise = 5.
......@@ -144,16 +136,17 @@ if __name__ == '__main__':
filename = "getting_started_1_mode_{}.png".format(mode)
if rg and len(position_space.shape) == 1:
plot.add(
[HT(MOCK_SIGNAL), GR.adjoint(data),
[HT(MOCK_SIGNAL), Mask.adjoint(data),
HT(m)],
label=['Mock signal', 'Data', 'Reconstruction'],
alpha=[1, .3, 1])
plot.add(mask_to_nan(mask, HT(m - MOCK_SIGNAL)), title='Residuals')
plot.add(Mask.adjoint(Mask(HT(m - MOCK_SIGNAL))), title='Residuals')
plot.output(nx=2, ny=1, xsize=10, ysize=4, name=filename)
else:
plot.add(HT(MOCK_SIGNAL), title='Mock Signal')
plot.add(mask_to_nan(mask, (GR(Mask)).adjoint(data)), title='Data')
plot.add(Mask.adjoint(data), title='Data')
plot.add(HT(m), title='Reconstruction')
plot.add(mask_to_nan(mask, HT(m - MOCK_SIGNAL)), title='Residuals')
plot.add(Mask.adjoint(Mask(HT(m) - HT(MOCK_SIGNAL))),
title='Residuals')
plot.output(nx=2, ny=2, xsize=10, ysize=10, name=filename)
print("Saved results as '{}'.".format(filename))
......@@ -103,13 +103,15 @@ if __name__ == '__main__':
data = signal_response(mock_position) + N.draw_sample()
# Minimization parameters
ic_sampling = ift.GradientNormController(iteration_limit=100)
ic_newton = ift.GradInfNormController(
name='Newton', tol=1e-7, iteration_limit=35)
ic_sampling = ift.AbsDeltaEnergyController(
name='Sampling', deltaE=0.05, iteration_limit=100)
ic_newton = ift.AbsDeltaEnergyController(
name='Newton', deltaE=0.5, iteration_limit=35)
minimizer = ift.NewtonCG(ic_newton)
# Set up likelihood and information Hamiltonian
likelihood = ift.GaussianEnergy(mean=data, covariance=N)(signal_response)
likelihood = ift.GaussianEnergy(mean=data,
inverse_covariance=N.inverse)(signal_response)
H = ift.StandardHamiltonian(likelihood, ic_sampling)
initial_mean = ift.MultiField.full(H.domain, 0.)
......
import numpy as np
import nifty5 as ift
def convtest(test_signal, delta, func):
domain = test_signal.domain
# Create Convolution Operator
conv_op = ift.FuncConvolutionOperator(domain, func)
# Convolve, Adjoint-Convolve
conv_signal = conv_op(test_signal)
cac_signal = conv_op.adjoint_times(conv_signal)
print(test_signal.integrate(), conv_signal.integrate(),
cac_signal.integrate())
# generate kernel image
conv_delta = conv_op(delta)
# Plot results
plot = ift.Plot()
plot.add(signal, title='Signal')
plot.add(conv_signal, title='Signal Convolved')
plot.add(cac_signal, title='Signal, Conv, Adj-Conv')
plot.add(conv_delta, title='Kernel')
plot.output()
# Healpix test
nside = 64
npix = 12 * nside * nside
domain = ift.HPSpace(nside)
# Define test signal (some point sources)
signal_vals = np.zeros(npix, dtype=np.float64)
for i in range(0, npix, npix//12 + 27):
signal_vals[i] = 500.
signal = ift.from_global_data(domain, signal_vals)
delta_vals = np.zeros(npix, dtype=np.float64)
delta_vals[0] = 1.0
delta = ift.from_global_data(domain, delta_vals)
# Define kernel function
def func(theta):
ct = np.cos(theta)
return 1. * np.logical_and(ct > 0.7, ct <= 0.8)
convtest(signal, delta, func)
domain = ift.RGSpace((100, 100))
# Define test signal (some point sources)
signal_vals = np.zeros(domain.shape, dtype=np.float64)
signal_vals[35, 70] = 5000.
signal_vals[45, 8] = 5000.
signal = ift.from_global_data(domain, signal_vals)
# Define delta signal, generate kernel image
delta_vals = np.zeros(domain.shape, dtype=np.float64)
delta_vals[0, 0] = 1.0
delta = ift.from_global_data(domain, delta_vals)
# Define kernel function
def func(dist):
return 1. * np.logical_and(dist > 0.1, dist <= 0.2)
convtest(signal, delta, func)
......@@ -2,6 +2,12 @@ NIFTy-related publications
==========================
::
@article{asclnifty5,
title={NIFTy5: Numerical Information Field Theory v5},
author={Arras, Philipp and Baltac, Mihai and Ensslin, Torsten A and Frank, Philipp and Hutschenreuter, Sebastian and Knollmueller, Jakob and Leike, Reimar and Newrzella, Max-Niklas and Platz, Lukas and Reinecke, Martin and others},
journal={Astrophysics Source Code Library},
year={2019}
}
@software{nifty,
author = {{Martin Reinecke, Theo Steininger, Marco Selig}},
......@@ -11,7 +17,7 @@ NIFTy-related publications
date = {2018-04-05},
}
@ARTICLE{2013A&A...554A..26S,
@article{2013A&A...554A..26S,
author = {{Selig}, M. and {Bell}, M.~R. and {Junklewitz}, H. and {Oppermann}, N. and {Reinecke}, M. and {Greiner}, M. and {Pachajoa}, C. and {En{\ss}lin}, T.~A.},
title = "{NIFTY - Numerical Information Field Theory. A versatile PYTHON library for signal inference}",
journal = {\aap},
......@@ -29,7 +35,7 @@ NIFTy-related publications
adsnote = {Provided by the SAO/NASA Astrophysics Data System}
}
@ARTICLE{2017arXiv170801073S,
@article{2017arXiv170801073S,
author = {{Steininger}, T. and {Dixit}, J. and {Frank}, P. and {Greiner}, M. and {Hutschenreuter}, S. and {Knollm{\"u}ller}, J. and {Leike}, R. and {Porqueres}, N. and {Pumpe}, D. and {Reinecke}, M. and {{\v S}raml}, M. and {Varady}, C. and {En{\ss}lin}, T.},
title = "{NIFTy 3 - Numerical Information Field Theory - A Python framework for multicomponent signal inference on HPC clusters}",
journal = {ArXiv e-prints},
......
This diff is collapsed.
......@@ -37,7 +37,7 @@ NIFTy comes with reimplemented MAP and VI estimators.
Free Theory & Implicit Operators
--------------------------------
A free IFT appears when the signal field :math:`{s}` and the noise :math:`{n}` of the data :math:`{d}` are independent, zero-centered Gaussian processes of kown covariances :math:`{S}` and :math:`{N}`, respectively,
A free IFT appears when the signal field :math:`{s}` and the noise :math:`{n}` of the data :math:`{d}` are independent, zero-centered Gaussian processes of known covariances :math:`{S}` and :math:`{N}`, respectively,
.. math::
......@@ -49,7 +49,7 @@ and the measurement equation is linear in both signal and noise,
d= R\, s + n,
with :math:`{R}` being the measurement response, which maps the continous signal field into the discrete data space.
with :math:`{R}` being the measurement response, which maps the continuous signal field into the discrete data space.
This is called a free theory, as the information Hamiltonian
......@@ -96,8 +96,8 @@ These implicit operators can be combined into new operators, e.g. to :math:`{D^{
The invocation of an inverse operator applied to a vector might trigger the execution of a numerical linear algebra solver.
Thus, when NIFTy calculates :math:`{m = D\, j}`, it actually solves :math:`{D^{-1} m = j}` for :math:`{m}` behind the scenes.
The advantage of implicit operators to explicit matrices is the reduced memory requirements.
The reconstruction of only a Megapixel image would otherwithe require the storage and processing of matrices with sizes of several Terabytes.
The advantage of implicit operators compared to explicit matrices is the reduced memory consumption;
for the reconstruction of just a Megapixel image the latter would already require several Terabytes.
Larger images could not be dealt with due to the quadratic memory requirements of explicit operator representations.
The demo codes `demos/getting_started_1.py` and `demos/Wiener_Filter.ipynb` illustrate this.
......@@ -106,7 +106,7 @@ The demo codes `demos/getting_started_1.py` and `demos/Wiener_Filter.ipynb` illu
Generative Models
-----------------
For more sophisticated measurement situations, involving non-linear measuremnts, unknown covariances, calibration constants and the like, it is recommended to formulate those as generative models for which NIFTy provides powerful inference algorithms.
For more sophisticated measurement situations (involving non-linear measurements, unknown covariances, calibration constants and the like) it is recommended to formulate those as generative models for which NIFTy provides powerful inference algorithms.
In a generative model, all known or unknown quantities are described as the results of generative processes, which start with simple probability distributions, like the uniform, the i.i.d. Gaussian, or the delta distribution.
......@@ -129,7 +129,7 @@ NIFTy takes advantage of this formulation in several ways:
1) All prior degrees of freedom have unit covariance, which improves the condition number of operators that need to be inverted.
2) The amplitude operator can be regarded as part of the response, :math:`{R'=R\,A}`.
In general, more sophisticated responses can be constructed out of the composition of simpler operators.
In general, more sophisticated responses can be obtained by combining simpler operators.
3) The response can be non-linear, e.g. :math:`{R'(s)=R \exp(A\,\xi)}`, see `demos/getting_started_2.py`.
......@@ -168,7 +168,7 @@ Maximum a Posteriori
One popular field estimation method is Maximum a Posteriori (MAP).
It only requires to minimize the information Hamiltonian, e.g by a gradient descent method that stops when
It only requires minimizing the information Hamiltonian, e.g. by a gradient descent method that stops when
.. math::
......
......@@ -8,33 +8,35 @@ distributions, the "apt" lines will need slight changes.
NIFTy5 and its mandatory dependencies can be installed via::
sudo apt-get install git python3 python3-pip python3-dev
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/NIFTy.git@NIFTy_5
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty.git@NIFTy_5
pip3 install --user git+https://gitlab.mpcdf.mpg.de/mtr/pypocketfft
Plotting support is added via::
sudo apt-get install python3-matplotlib
NIFTy uses Numpy's FFT implementation by default. For large problems FFTW may be
used because of its higher performance. It can be installed via::
Support for spherical harmonic transforms is added via::
sudo apt-get install libfftw3-dev
pip3 install --user pyfftw
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
To enable FFTW usage in NIFTy, call::
Support for the radio interferometry gridder is added via::
nifty5.fft.enable_fftw()
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/nifty_gridder.git
at the beginning of your code.
MPI support is added via::
(Note: If you encounter problems related to `pyFFTW`, make sure that you are
using a pip-installed `pyFFTW` package. Unfortunately, some distributions are
shipping an incorrectly configured `pyFFTW` package, which does not cooperate
with the installed `FFTW3` libraries.)
sudo apt-get install python3-mpi4py
Support for spherical harmonic transforms is added via::
NIFTy documentation is provided by Sphinx. To build the documentation::
pip3 install --user git+https://gitlab.mpcdf.mpg.de/ift/pyHealpix.git
sudo apt-get install python3-sphinx-rtd-theme dvipng
cd <nifty_directory>
sh docs/generate.sh
MPI support is added via::
To view the documentation in firefox::
firefox docs/build/index.html
(Note: Make sure that you reinstall nifty after each change since sphinx
imports nifty from the Python path.)
sudo apt-get install python3-mpi4py
Discretization and Volume in NIFTy
Discretisation and Volume in NIFTy
==================================
.. note:: Some of this discussion is rather technical and may be skipped in a first read-through.
......@@ -160,15 +160,21 @@ Often, log-likelihoods contain integrals over the quantity of interest :math:`s`
\int_\Omega \text{d}x\, s(x) \approx \sum_i s^i\int_{\Omega_i}\text{d}x\, 1
Here the domain of the integral :math:`\Omega = \dot{\bigcup_q} \; \Omega_i` is the disjoint union over smaller :math:`\Omega_i`, e.g. the pixels of the space, and :math:`s_i` is the discretized field value on the :math:`i`-th pixel.
Here the domain of the integral :math:`\Omega = \dot{\bigcup_q} \; \Omega_i` is the disjoint union over smaller :math:`\Omega_i`, e.g. the pixels of the space, and :math:`s_i` is the discretised field value on the :math:`i`-th pixel.
This introduces the weighting :math:`V_i=\int_{\Omega_i}\text{d}x\, 1`, also called the volume factor, a property of the space.
NIFTy aids you in constructing your own log-likelihood by providing methods like :func:`~field.Field.weight`, which weights all pixels of a field with their corresponding volume.
An integral over a :class:`~field.Field` :code:`s` can be performed by calling :code:`s.weight(1).sum()`, which is equivalent to :code:`s.integrate()`.
Volume factors are also applied automatically in the following places:
- :class:`~operators.harmonic_operators.FFTOperator` as well as all other harmonic operators. Here the zero mode of the transformed field is the integral over the original field, thus the whole field is weighted once.
- some response operators, such as the :class:`~library.los_response.LOSResponse`. In this operator a line integral is descritized, so a 1-dimensional volume factor is applied.
- In :class:`~library.correlated_fields.CorrelatedField` as well :class:`~library.correlated_fields.MfCorrelatedField`, the field is multiplied by the square root of the total volume in configuration space. This ensures that the same field reconstructed over a larger domain has the same variance in position space in the limit of infinite resolution. It also ensures that power spectra in NIFTy behave according to the definition of a power spectrum, namely the power of a k-mode is the expectation of the k-mode square, divided by the volume of the space.
- :class:`~operators.harmonic_operators.FFTOperator` as well as all other harmonic operators.
Here the zero mode of the transformed field is the integral over the original field, thus the whole field is weighted once.
- Some response operators, such as the :class:`~library.los_response.LOSResponse`.
In this operator a line integral is discretised, so a 1-dimensional volume factor is applied.
- In :class:`~library.correlated_fields.CorrelatedField` as well as :class:`~library.correlated_fields.MfCorrelatedField`.
Both describe fields with a smooth, a priori unknown correlation structure specified by a power spectrum.
The field is multiplied by the square root of the total volume of it domain's harmonic counterpart.
This ensures that the same power spectrum can be used regardless of the chosen resolution, provided the total volume of the space remains the same.
It also guarantees that the power spectra in NIFTy behave according to their definition, i.e. the power of a mode :math:`s_k` is the expectation value of that mode squared, divided by the volume of its space :math:`P(k) = \left\langle s_k^2 \right\rangle / V_k`.
Note that in contrast to some older versions of NIFTy, the dot product :code:`s.vdot(t)` of fields does **not** apply a volume factor, but instead just sums over the field components,
......@@ -205,3 +211,18 @@ Consequently, the inverse covariance operator will automatically have lower indi
ensuring that the whole log-likelihood expression does not scale with resolution.
**This upper-lower index convention is not coded into NIFTy**, in order to not reduce user freedom.
One should however have this in mind when constructing log-likelihoods in order to ensure resolution independence.
Harmonic Transform Convention
.............................
In NIFTy the convention for the harmonic transformations is set by requiring the zero mode of the transformed field to be the integral over the original field.
This choice is convenient for the Fourier transformation and used throughout the literature.
Note that for the spherical harmonics this convention is only rarely used and might result in unexpected factors in the transformed field.
To be specific, for the spherical harmonics transformation this means that a monopole of unit amplitude in position-space which is transformed to the spherical harmonics domain and back to the original position space via the adjoint transformation will have a non-unit amplitude afterwards.
The factor between the input field and the twice transformed field is :math:`1/4\pi`.
In comparison to the convention used in HEALPix, this corresponds to dividing the output of the HEALPix transformed field by :math:`\sqrt{4\pi}` in each transformation.
Depending on the use-case, additional volume factors must be accounted for.
This is especially true if one wants to define the inverse transformation.
Note that the convention for the harmonic transformations used in NIFTy results in non-unitary operators.
......@@ -45,19 +45,22 @@ from .operators.block_diagonal_operator import BlockDiagonalOperator
from .operators.outer_product_operator import OuterProduct
from .operators.simple_linear_operators import (
VdotOperator, ConjugationOperator, Realizer,
FieldAdapter, ducktape, GeometryRemover, NullOperator)
FieldAdapter, ducktape, GeometryRemover, NullOperator,
MatrixProductOperator, PartialExtractor)
from .operators.value_inserter import ValueInserter
from .operators.energy_operators import (
EnergyOperator, GaussianEnergy, PoissonianEnergy, InverseGammaLikelihood,
BernoulliEnergy, StandardHamiltonian, AveragedEnergy)
BernoulliEnergy, StandardHamiltonian, AveragedEnergy, QuadraticFormOperator,
Squared2NormOperator, StudentTEnergy)
from .operators.convolution_operators import FuncConvolutionOperator
from .probing import probe_with_posterior_samples, probe_diagonal, \
StatCalculator
StatCalculator, approximation2endo
from .minimization.line_search import LineSearch
from .minimization.iteration_controllers import (
IterationController, GradientNormController, DeltaEnergyController,
GradInfNormController)
GradInfNormController, AbsDeltaEnergyController)
from .minimization.minimizer import Minimizer
from .minimization.conjugate_gradient import ConjugateGradient
from .minimization.nonlinear_cg import NonlinearCG
......@@ -73,7 +76,8 @@ from .minimization.metric_gaussian_kl import MetricGaussianKL
from .sugar import *
from .plot import Plot
from .library.smooth_linear_amplitude import SLAmplitude, CepstrumOperator
from .library.smooth_linear_amplitude import (
SLAmplitude, LinearSLAmplitude, CepstrumOperator)
from .library.inverse_gamma_operator import InverseGammaOperator
from .library.los_response import LOSResponse
from .library.dynamic_operator import (dynamic_operator,
......@@ -84,6 +88,7 @@ from .library.wiener_filter_curvature import WienerFilterCurvature
from .library.correlated_fields import CorrelatedField, MfCorrelatedField
from .library.adjust_variances import (make_adjust_variances_hamiltonian,
do_adjust_variances)
from .library.gridder import GridderMaker
from . import extra
......@@ -93,5 +98,12 @@ from .logger import logger
from .linearization import Linearization
from .operator_spectrum import operator_spectrum
from . import internal_config
_scheme = internal_config.parallelization_scheme()
if _scheme == "Samples":
from .minimization.metric_gaussian_kl_mpi import MetricGaussianKL_MPI
# We deliberately don't set __all__ here, because we don't want people to do a
# "from nifty5 import *"; that would swamp the global namespace.
......@@ -32,7 +32,7 @@ __all__ = ["ntask", "rank", "master", "local_shape", "data_object", "full",
"redistribute", "default_distaxis", "is_numpy", "absmax", "norm",
"lock", "locked", "uniform_full", "transpose", "to_global_data_rw",
"ensure_not_distributed", "ensure_default_distributed",
"tanh", "conjugate", "sin", "cos", "tan",
"tanh", "conjugate", "sin", "cos", "tan", "log10", "log1p", "expm1",
"sinh", "cosh", "sinc", "absolute", "sign", "clip"]
_comm = MPI.COMM_WORLD
......@@ -235,7 +235,6 @@ class data_object(object):
for op in ["__add__", "__radd__", "__iadd__",
"__sub__", "__rsub__", "__isub__",
"__mul__", "__rmul__", "__imul__",
"__div__", "__rdiv__", "__idiv__",
"__truediv__", "__rtruediv__", "__itruediv__",
"__floordiv__", "__rfloordiv__", "__ifloordiv__",
"__pow__", "__rpow__", "__ipow__",
......@@ -298,7 +297,8 @@ def _math_helper(x, function, out):
_current_module = sys.modules[__name__]
for f in ["sqrt", "exp", "log", "tanh", "conjugate", "sin", "cos", "tan",
"sinh", "cosh", "sinc", "absolute", "sign"]:
"sinh", "cosh", "sinc", "absolute", "sign", "log10", "log1p",
"expm1"]:
def func(f):
def func2(x, out=None):
return _math_helper(x, f, out)
......@@ -397,6 +397,8 @@ def from_global_data(arr, sum_up=False, distaxis=0):
raise TypeError
if sum_up:
arr = np_allreduce_sum(arr)
if arr.ndim == 0:
distaxis = -1
if distaxis == -1:
return data_object(arr.shape, arr, distaxis)
lo, hi = _shareRange(arr.shape[distaxis], ntask, rank)
......
......@@ -18,9 +18,11 @@
# Data object module that uses simple numpy ndarrays.
import numpy as np
from numpy import absolute, clip, cos, cosh, empty, empty_like, exp, full, log
from numpy import ndarray as data_object
from numpy import ones, sign, sin, sinc, sinh, sqrt, tan, tanh, vdot, zeros
from numpy import empty, empty_like, ones, zeros, full
from numpy import absolute, sign, clip, vdot
from numpy import sin, cos, sinh, cosh, tan, tanh
from numpy import exp, log, log10, sqrt, sinc, log1p, expm1
from .random import Random
......@@ -33,8 +35,8 @@ __all__ = ["ntask", "rank", "master", "local_shape", "data_object", "full",
"redistribute", "default_distaxis", "is_numpy", "absmax", "norm",
"lock", "locked", "uniform_full", "to_global_data_rw",
"ensure_not_distributed", "ensure_default_distributed",
"clip", "sin", "cos", "tan", "sinh",
"cosh", "absolute", "sign", "sinc"]
"clip", "sin", "cos", "tan", "sinh", "cosh",
"absolute", "sign", "sinc", "log10", "log1p", "expm1"]
ntask = 1
rank = 0
......@@ -149,7 +151,7 @@ def ensure_default_distributed(arr):
def absmax(arr):
return np.linalg.norm(arr.rehape(-1), ord=np.inf)
return np.linalg.norm(arr.reshape(-1), ord=np.inf)
def norm(arr, ord=2):
......
......@@ -15,11 +15,19 @@
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
try:
from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() == 1:
from .data_objects.numpy_do import *
else:
from .data_objects.distributed_do import *
except ImportError:
from . import internal_config
_scheme = internal_config.parallelization_scheme()
if _scheme in ("Samples", "None"):
from .data_objects.numpy_do import *
else:
try:
from mpi4py import MPI
if MPI.COMM_WORLD.Get_size() == 1:
from .data_objects.numpy_do import *
else:
from .data_objects.distributed_do import *
except ImportError:
from .data_objects.numpy_do import *
......@@ -19,6 +19,8 @@ from functools import reduce
from . import utilities
from .domains.domain import Domain
import numpy as np
class DomainTuple(object):
"""Ordered sequence of Domain objects.
......@@ -125,6 +127,58 @@ class DomainTuple(object):
"""
return self._size
def scalar_weight(self, spaces=None):
"""Returns the uniform volume element for a sub-domain of `self`.
Parameters
----------
spaces : int, tuple of int or None
Indices of the sub-domains to be considered.
If `None`, the entire domain is used.
Returns
-------
float or None
If the requested sub-domain has a uniform volume element, it is
returned. Otherwise, `None` is returned.
"""
if np.isscalar(spaces):
return self._dom[spaces].scalar_dvol
if spaces is None:
spaces = range(len(self._dom))
res = 1.
for i in spaces:
tmp = self._dom[i].scalar_dvol
if tmp is None:
return None
res *= tmp
return res
def total_volume(self, spaces=None):
"""Returns the total volume of `self` or of a subspace of it.
Parameters
----------
spaces : int, tuple of int or None
Indices of the sub-domains of the domain to be considered.
If `None`, the total volume of the whole domain is returned.
Returns
-------
float
the total volume of the requested (sub-)domain.
"""
if np.isscalar(spaces):
return self._dom[spaces].total_volume
if spaces is None:
spaces = range(len(self._dom))
res = 1.
for i in spaces:
res *= self._dom[i].total_volume
return res
@property
def axes(self):
"""tuple of tuple of int : axis indices of the underlying domains"""
......
......@@ -103,7 +103,9 @@ class GLSpace(StructuredDomain):
The partner domain
"""
from ..domains.lm_space import LMSpace
return LMSpace(lmax=self._nlat-1, mmax=self._nlon//2)
mmax = self._nlon//2
lmax = max(mmax, self._nlat-1)
return LMSpace(lmax=lmax, mmax=mmax)
def check_codomain(self, codomain):
"""Raises `TypeError` if `codomain` is not a matching partner domain
......
......@@ -103,6 +103,36 @@ class LMSpace(StructuredDomain):
def get_fft_smoothing_kernel_function(self, sigma):
return lambda x: self._kernel(x, sigma)
def get_conv_kernel_from_func(self, func):
"""Creates a convolution kernel defined by a function.
Parameters
----------
func: function
This function needs to take exactly one argument, which is
colatitude in radians, and return the kernel amplitude at that
colatitude.
Assumes the function to be radially symmetric,
e.g. only dependant on theta in radians"""
from .gl_space import GLSpace
from ..operators.harmonic_operators import HarmonicTransformOperator
import pyHealpix
# define azimuthally symmetric spaces for kernel transform
gl = GLSpace(self.lmax + 1, 1)
lm0 = gl.get_default_codomain()
theta = pyHealpix.GL_thetas(gl.nlat)
# evaluate the kernel function at the required thetas
kernel_sphere = Field.from_global_data(gl, func(theta))
# normalize the kernel such that the integral over the sphere is 4pi
kernel_sphere = kernel_sphere * (4 * np.pi / kernel_sphere.integrate())
# compute the spherical harmonic coefficients of the kernel
op = HarmonicTransformOperator(lm0, gl)
kernel_lm = op.adjoint_times(kernel_sphere.weight(1)).to_global_data()
# evaluate the k lengths of the harmonic space
k_lengths = self.get_k_length_array().to_global_data().astype(np.int)
return Field.from_global_data(self, kernel_lm[k_lengths])
@property
def lmax(self):
"""int : maximum allowed :math:`l`
......
......@@ -50,6 +50,8 @@ class LogRGSpace(StructuredDomain):
self._bindistances = tuple(bindistances)
self._t_0 = tuple(t_0)
if min(self._bindistances) <= 0:
raise ValueError('Non-positive bindistances encountered')
self._dim = int(reduce(lambda x, y: x*y, self._shape))
self._dvol = float(reduce(lambda x, y: x*y, self._bindistances))
......@@ -80,8 +82,8 @@ class LogRGSpace(StructuredDomain):
return np.array(self._t_0)
def __repr__(self):
return ("LogRGSpace(shape={}, harmonic={})".format(
self.shape, self.harmonic))
return ("LogRGSpace(shape={}, bindistances={}, t_0={}, harmonic={})".format(
self.shape, self.bindistances, self.t_0, self.harmonic))
def get_default_codomain(self):
"""Returns a :class:`LogRGSpace` object representing the (position or
......@@ -91,10 +93,10 @@ class LogRGSpace(StructuredDomain):
Returns
-------
LogRGSpace
The parter domain
The partner domain
"""
codomain_bindistances = 1./(self.bindistances*self.shape)
return LogRGSpace(self.shape, codomain_bindistances, self._t_0, True)
return LogRGSpace(self.shape, codomain_bindistances, self._t_0, not self.harmonic)
def get_k_length_array(self):
"""Generates array of distances to origin of the space.
......
......@@ -165,6 +165,8 @@ class PowerSpace(StructuredDomain):
if binbounds is not None:
binbounds = tuple(binbounds)
if min(binbounds) < 0:
raise ValueError('Negative binbounds encountered')
key = (harmonic_partner, binbounds)
if self._powerIndexCache.get(key) is None:
......
......@@ -54,6 +54,8 @@ class RGSpace(StructuredDomain):
if np.isscalar(shape):
shape = (shape,)
self._shape = tuple(int(i) for i in shape)
if min(self._shape) < 0:
raise ValueError('Negative number of pixels encountered')
if distances is None:
if self.harmonic:
......@@ -66,6 +68,8 @@ class RGSpace(StructuredDomain):
temp = np.empty(len(self.shape), dtype=np.float64)
temp[:] = distances
self._distances = tuple(temp)
if min(self._distances) <= 0:
raise ValueError('Non-positive distances encountered')
self._dvol = float(reduce(lambda x, y: x*y, self._distances))
self._size = int(reduce(lambda x, y: x*y, self._shape))
......@@ -90,9 +94,7 @@ class RGSpace(StructuredDomain):
def scalar_dvol(self):
return self._dvol
def get_k_length_array(self):
if (not self.harmonic):
raise NotImplementedError
def _get_dist_array(self):
ibegin = dobj.ibegin_from_shape(self._shape)
res = np.arange(self.local_shape[0], dtype=np.float64) + ibegin[0]
res = np.minimum(res, self.shape[0]-res)*self.distances[0]
......@@ -106,6 +108,11 @@ class RGSpace(StructuredDomain):
res = np.add.outer(res, tmp)
return Field.from_local_data(self, np.sqrt(res))
def get_k_length_array(self):
if (not self.harmonic):
raise NotImplementedError
return self._get_dist_array()
def get_unique_k_lengths(self):
if (not self.harmonic):
raise NotImplementedError
......@@ -146,6 +153,27 @@ class RGSpace(StructuredDomain):
raise NotImplementedError
return lambda x: self._kernel(x, sigma)
def get_conv_kernel_from_func(self, func):
"""Creates a convolution kernel defined by a function.
Parameters
----------
func: function
This function needs to take exactly one argument, which is
distance from center (in the same units as the RGSpace distances),
and return the kernel amplitude at that distance.
Assumes the function to be radially symmetric,
e.g. only dependant on distance"""
from ..operators.harmonic_operators import HarmonicTransformOperator
if (not self.harmonic):
raise NotImplementedError
op = HarmonicTransformOperator(self, self.get_default_codomain())
dist = op.target[0]._get_dist_array()
kernel = Field.from_local_data(op.target, func(dist.local_data))
kernel = kernel / kernel.integrate()
return op.adjoint_times(kernel.weight(1))
def get_default_codomain(self):
"""Returns a :class:`RGSpace` object representing the (position or
harmonic) partner domain of `self`, depending on `self.harmonic`.
......@@ -153,7 +181,7 @@ class RGSpace(StructuredDomain):
Returns
-------
RGSpace
The parter domain
The partner domain
"""
distances = 1. / (np.array(self.shape)*np.array(self.distances))
return RGSpace(self.shape, distances, not self.harmonic)
......
......@@ -16,9 +16,13 @@
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import numpy as np
from numpy.testing import assert_
from .domain_tuple import DomainTuple
from .field import Field
from .linearization import Linearization
from .multi_domain import MultiDomain
from .multi_field import MultiField
from .operators.linear_operator import LinearOperator
from .sugar import from_random
......@@ -33,7 +37,8 @@ def _assert_allclose(f1, f2, atol, rtol):
_assert_allclose(val, f2[key], atol=atol, rtol=rtol)
def _adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol):
def _adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear):
needed_cap = op.TIMES | op.ADJOINT_TIMES
if (op.capability & needed_cap) != needed_cap:
return
......@@ -41,6 +46,8 @@ def _adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol):
f2 = from_random("normal", op.target, dtype=target_dtype)
res1 = f1.vdot(op.adjoint_times(f2))
res2 = op.times(f1).vdot(f2)
if only_r_linear:
res1, res2 = res1.real, res2.real
np.testing.assert_allclose(res1, res2, atol=atol, rtol=rtol)
......@@ -57,22 +64,67 @@ def _inverse_implementation(op, domain_dtype, target_dtype, atol, rtol):
_assert_allclose(res, foo, atol=atol, rtol=rtol)
def _full_implementation(op, domain_dtype, target_dtype, atol, rtol):
_adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol)
def _full_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear):
_adjoint_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear)
_inverse_implementation(op, domain_dtype, target_dtype, atol, rtol)
def _check_linearity(op, domain_dtype, atol, rtol):
needed_cap = op.TIMES
if (op.capability & needed_cap) != needed_cap:
return
fld1 = from_random("normal", op.domain, dtype=domain_dtype)
fld2 = from_random("normal", op.domain, dtype=domain_dtype)
alpha = np.random.random()
alpha = np.random.random() # FIXME: this can break badly with MPI!
val1 = op(alpha*fld1+fld2)
val2 = alpha*op(fld1)+op(fld2)
_assert_allclose(val1, val2, atol=atol, rtol=rtol)
def _actual_domain_check(op, domain_dtype=None, inp=None):
needed_cap = op.TIMES
if (op.capability & needed_cap) != needed_cap:
return
if domain_dtype is not None:
inp = from_random("normal", op.domain, dtype=domain_dtype)
elif inp is None:
raise ValueError('Need to specify either dtype or inp')
assert_(inp.domain is op.domain)
assert_(op(inp).domain is op.target)
def _actual_domain_check_nonlinear(op, loc):
assert isinstance(loc, (Field, MultiField))
assert_(loc.domain is op.domain)
lin = Linearization.make_var(loc, False)
reslin = op(lin)
assert_(lin.domain is op.domain)
assert_(lin.target is op.domain)
assert_(lin.val.domain is lin.domain)
assert_(reslin.domain is op.domain)
assert_(reslin.target is op.target)
assert_(reslin.val.domain is reslin.target)
assert_(reslin.target is op.target)
assert_(reslin.jac.domain is reslin.domain)
assert_(reslin.jac.target is reslin.target)
_actual_domain_check(reslin.jac, inp=loc)
_actual_domain_check(reslin.jac.adjoint, inp=reslin.jac(loc))
def _domain_check(op):
for dd in [op.domain, op.target]:
if not isinstance(dd, (DomainTuple, MultiDomain)):
raise TypeError(
'The domain and the target of an operator need to',
'be instances of either DomainTuple or MultiDomain.')
def consistency_check(op, domain_dtype=np.float64, target_dtype=np.float64,
atol=0, rtol=1e-7):
atol=0, rtol=1e-7, only_r_linear=False):
"""
Checks an operator for algebraic consistency of its capabilities.
......@@ -98,15 +150,29 @@ def consistency_check(op, domain_dtype=np.float64, target_dtype=np.float64,
Relative tolerance for the check. If atol is specified,
then satisfying any tolerance will let the check pass.
Default: 0.
only_r_linear: bool
set to True if the operator is only R-linear, not C-linear.
This will relax the adjointness test accordingly.
"""
if not isinstance(op, LinearOperator):
raise TypeError('This test tests only linear operators.')
_domain_check(op)
_actual_domain_check(op, domain_dtype)
_actual_domain_check(op.adjoint, target_dtype)
_actual_domain_check(op.inverse, target_dtype)
_actual_domain_check(op.adjoint.inverse, domain_dtype)
_check_linearity(op, domain_dtype, atol, rtol)
_full_implementation(op, domain_dtype, target_dtype, atol, rtol)
_full_implementation(op.adjoint, target_dtype, domain_dtype, atol, rtol)
_full_implementation(op.inverse, target_dtype, domain_dtype, atol, rtol)
_check_linearity(op.adjoint, target_dtype, atol, rtol)
_check_linearity(op.inverse, target_dtype, atol, rtol)
_check_linearity(op.adjoint.inverse, domain_dtype, atol, rtol)
_full_implementation(op, domain_dtype, target_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.inverse, target_dtype, domain_dtype, atol, rtol,
only_r_linear)
_full_implementation(op.adjoint.inverse, domain_dtype, target_dtype, atol,
rtol)
rtol, only_r_linear)
def _get_acceptable_location(op, loc, lin):
......@@ -151,6 +217,8 @@ def check_jacobian_consistency(op, loc, tol=1e-8, ntries=100):
tol : float
Tolerance for the check.
"""
_domain_check(op)
_actual_domain_check_nonlinear(op, loc)
for _ in range(ntries):
lin = op(Linearization.make_var(loc))
loc2, lin2 = _get_acceptable_location(op, loc, lin)
......
......@@ -15,152 +15,28 @@
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
from .utilities import iscomplextype
import numpy as np
import pypocketfft
_nthreads = 1
_use_fftw = False
_fftw_prepped = False
_fft_extra_args = {}
def nthreads():
return _nthreads
def enable_fftw():
global _use_fftw
_use_fftw = True
def disable_fftw():
global _use_fftw
_use_fftw = False
def _init_pyfftw():
global _fft_extra_args, _fftw_prepped
if not _fftw_prepped:
import pyfftw
from pyfftw.interfaces.numpy_fft import fftn, rfftn, ifftn
pyfftw.interfaces.cache.enable()
pyfftw.interfaces.cache.set_keepalive_time(1000.)
# Optional extra arguments for the FFT calls
# if exact reproducibility is needed,
# set "planner_effort" to "FFTW_ESTIMATE"
import os
nthreads = int(os.getenv("OMP_NUM_THREADS", "1"))
_fft_extra_args = dict(planner_effort='FFTW_ESTIMATE',
threads=nthreads)
_fftw_prepped = True
def set_nthreads(nthr):
global _nthreads
_nthreads = int(nthr)
def fftn(a, axes=None):
if _use_fftw:
from pyfftw.interfaces.numpy_fft import fftn
_init_pyfftw()
return fftn(a, axes=axes, **_fft_extra_args)
else:
return np.fft.fftn(a, axes=axes)
def rfftn(a, axes=None):
if _use_fftw:
from pyfftw.interfaces.numpy_fft import rfftn
_init_pyfftw()
return rfftn(a, axes=axes, **_fft_extra_args)
else:
return np.fft.rfftn(a, axes=axes)
return pypocketfft.c2c(a, axes=axes, nthreads=_nthreads)
def ifftn(a, axes=None):
if _use_fftw:
from pyfftw.interfaces.numpy_fft import ifftn
_init_pyfftw()
return ifftn(a, axes=axes, **_fft_extra_args)
else:
return np.fft.ifftn(a, axes=axes)
return pypocketfft.c2c(a, axes=axes, inorm=2, forward=False,
nthreads=_nthreads)
def hartley(a, axes=None):
# Check if the axes provided are valid given the shape
if axes is not None and \
not all(axis < len(a.shape) for axis in axes):
raise ValueError("Provided axes do not match array shape")
if iscomplextype(a.dtype):
raise TypeError("Hartley transform requires real-valued arrays.")
tmp = rfftn(a, axes=axes)
def _fill_array(tmp, res, axes):
if axes is None:
axes = tuple(range(tmp.ndim))
lastaxis = axes[-1]
ntmplast = tmp.shape[lastaxis]
slice1 = (slice(None),)*lastaxis + (slice(0, ntmplast),)
np.add(tmp.real, tmp.imag, out=res[slice1])
def _fill_upper_half(tmp, res, axes):
lastaxis = axes[-1]
nlast = res.shape[lastaxis]
ntmplast = tmp.shape[lastaxis]
nrem = nlast - ntmplast
slice1 = [slice(None)]*lastaxis + [slice(ntmplast, None)]
slice2 = [slice(None)]*lastaxis + [slice(nrem, 0, -1)]
for i in axes[:-1]:
slice1[i] = slice(1, None)
slice2[i] = slice(None, 0, -1)
slice1 = tuple(slice1)
slice2 = tuple(slice2)
np.subtract(tmp[slice2].real, tmp[slice2].imag, out=res[slice1])
for i, ax in enumerate(axes[:-1]):
dim1 = (slice(None),)*ax + (slice(0, 1),)
axes2 = axes[:i] + axes[i+1:]
_fill_upper_half(tmp[dim1], res[dim1], axes2)
_fill_upper_half(tmp, res, axes)
return res
return _fill_array(tmp, np.empty_like(a), axes)
# Do a real-to-complex forward FFT and return the _full_ output array
def my_fftn_r2c(a, axes=None):
# Check if the axes provided are valid given the shape
if axes is not None and \
not all(axis < len(a.shape) for axis in axes):
raise ValueError("Provided axes do not match array shape")
if iscomplextype(a.dtype):
raise TypeError("Transform requires real-valued input arrays.")
tmp = rfftn(a, axes=axes)
def _fill_complex_array(tmp, res, axes):
if axes is None:
axes = tuple(range(tmp.ndim))
lastaxis = axes[-1]
ntmplast = tmp.shape[lastaxis]
slice1 = [slice(None)]*lastaxis + [slice(0, ntmplast)]
res[tuple(slice1)] = tmp
def _fill_upper_half_complex(tmp, res, axes):
lastaxis = axes[-1]
nlast = res.shape[lastaxis]
ntmplast = tmp.shape[lastaxis]
nrem = nlast - ntmplast
slice1 = [slice(None)]*lastaxis + [slice(ntmplast, None)]
slice2 = [slice(None)]*lastaxis + [slice(nrem, 0, -1)]
for i in axes[:-1]:
slice1[i] = slice(1, None)
slice2[i] = slice(None, 0, -1)
# np.conjugate(tmp[slice2], out=res[slice1])
res[tuple(slice1)] = np.conjugate(tmp[tuple(slice2)])
for i, ax in enumerate(axes[:-1]):
dim1 = tuple([slice(None)]*ax + [slice(0, 1)])
axes2 = axes[:i] + axes[i+1:]
_fill_upper_half_complex(tmp[dim1], res[dim1], axes2)
_fill_upper_half_complex(tmp, res, axes)
return res
return _fill_complex_array(tmp, np.empty_like(a, dtype=tmp.dtype), axes)
def my_fftn(a, axes=None):
return fftn(a, axes=axes)
return pypocketfft.genuine_hartley(a, axes=axes, nthreads=_nthreads)
......@@ -246,42 +246,23 @@ class Field(object):
If the requested sub-domain has a uniform volume element, it is
returned. Otherwise, `None` is returned.
"""
if np.isscalar(spaces):
return self._domain[spaces].scalar_dvol
if spaces is None:
spaces = range(len(self._domain))
res = 1.
for i in spaces:
tmp = self._domain[i].scalar_dvol
if tmp is None:
return None
res *= tmp
return res
return self._domain.scalar_weight(spaces)
def total_volume(self, spaces=None):
"""Returns the total volume of a sub-domain of `self`.
"""Returns the total volume of the field's domain or of a subspace of it.
Parameters
----------
spaces : int, tuple of int or None
Indices of the sub-domains of the field's domain to be considered.
If `None`, the entire domain is used.
If `None`, the total volume of the whole domain is returned.
Returns
-------
float
the total volume of the requested sub-domain.
the total volume of the requested (sub-)domain.
"""
if np.isscalar(spaces):
return self._domain[spaces].total_volume
if spaces is None:
spaces = range(len(self._domain))
res = 1.
for i in spaces:
res *= self._domain[i].total_volume
return res
return self._domain.total_volume(spaces)
def weight(self, power=1, spaces=None):
"""Weights the pixels of `self` with their invidual pixel volumes.
......@@ -626,6 +607,11 @@ class Field(object):
raise ValueError("domain mismatch")
return self
def extract_part(self, dom):
if dom != self._domain:
raise ValueError("domain mismatch")
return self
def unite(self, other):
return self+other
......@@ -636,6 +622,8 @@ class Field(object):
return 0.5*(1.+self.tanh())
def clip(self, min=None, max=None):
min = min.local_data if isinstance(min, Field) else min
max = max.local_data if isinstance(max, Field) else max
return Field(self._domain, dobj.clip(self._val, min, max))
def one_over(self):
......@@ -656,7 +644,6 @@ class Field(object):
for op in ["__add__", "__radd__",
"__sub__", "__rsub__",
"__mul__", "__rmul__",
"__div__", "__rdiv__",
"__truediv__", "__rtruediv__",
"__floordiv__", "__rfloordiv__",
"__pow__", "__rpow__",
......@@ -676,9 +663,8 @@ for op in ["__iadd__", "__isub__", "__imul__", "__idiv__",
return func2
setattr(Field, op, func(op))
for f in ["sqrt", "exp", "log", "tanh",
"sin", "cos", "tan", "cosh", "sinh",
"absolute", "sinc", "sign"]:
for f in ["sqrt", "exp", "log", "sin", "cos", "tan", "sinh", "cosh", "tanh",
"absolute", "sinc", "sign", "log10", "log1p", "expm1"]:
def func(f):
def func2(self):
return Field(self._domain, getattr(dobj, f)(self.val))
......
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2019 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
# Internal configuration switches, typically for experimental features.
# Leave unchanged unless you know what you are doing!
def parallelization_scheme():
"""Sets the MPI parallelization scheme according to the
environment variable `NIFTY_MPI_SCHEME`.
If not set, "Standard" parallelization is used.
Possible Values:
---------------
"Standard": Fields are distributed over all MPI instances
along their first axis.
This mode is useful if the fields involved are
too large for the memory of a single machine,
otherwise it is not recommended.
"Samples": :class:`MetricGaussianKL_MPI` becomes available.
The :class:`MetricGaussianKL` usually has
multiple samples for which it needs to perform
the same calculations.
:class:`MetricGaussianKL_MPI` distributes these
calculations to the MPI instances by sample.
This mode is useful when the "Standard" mode
is not needed and the calculations w.r.t. each
sample take long w.r.t. the MPI communication
overhead introduced by the parallelization.
"None": Disables all parallelization.
"""
import os
scheme = os.getenv("NIFTY_MPI_SCHEME", default="Standard")
if scheme not in ["Standard", "Samples", "None"]:
raise ValueError("Unrecognized MPI parallelization scheme!")
return scheme
......@@ -17,9 +17,8 @@
from ..minimization.energy_adapter import EnergyAdapter
from ..multi_field import MultiField
from ..operators.distributors import PowerDistributor
from ..operators.energy_operators import (StandardHamiltonian,
InverseGammaLikelihood)
from ..operators.energy_operators import (InverseGammaLikelihood,
StandardHamiltonian)
from ..operators.scaling_operator import ScalingOperator
from ..operators.simple_linear_operators import ducktape
......@@ -79,27 +78,23 @@ def make_adjust_variances_hamiltonian(a,
ic_samp=ic_samp)
def do_adjust_variances(position