Commit 63833958 authored by Martin Reinecke's avatar Martin Reinecke

Merge remote-tracking branch 'origin/NIFTy_5' into spectral2

parents 71674e9a 3f52237c
......@@ -39,9 +39,9 @@ test_serial:
script:
- pytest-3 -q --cov=nifty5 test
- >
python3 -m coverage report --omit "*plot*,*distributed_do*"
python3 -m coverage report --omit "*plot*,*distributed_do*" | tee coverage.txt
- >
python3 -m coverage report --omit "*plot*,*distributed_do*" | grep TOTAL | awk '{ print "TOTAL: "$4; }'
grep TOTAL coverage.txt | awk '{ print "TOTAL: "$4; }'
test_mpi:
stage: test
......@@ -51,14 +51,16 @@ test_mpi:
- mpiexec -n 2 --bind-to none pytest-3 -q test
pages:
# FIXME Build only for main branch and set stage to release
stage: test
stage: release
script:
- sh docs/generate.sh
- mv docs/build/ public/
artifacts:
paths:
- public
only:
- NIFTy_5
before_script:
- python3 setup.py install --user -f
......
......@@ -43,22 +43,22 @@ def exposure_2d():
if __name__ == '__main__':
# FIXME All random seeds to 42
np.random.seed(41)
np.random.seed(42)
# Choose space on which the signal field is defined
mode = 2
mode = 1
if mode == 0:
# One-dimensional regular grid with uniform exposure
# One-dimensional regular grid with uniform exposure of 10
position_space = ift.RGSpace(1024)
exposure = ift.Field.full(position_space, 1.)
exposure = ift.Field.full(position_space, 10.)
elif mode == 1:
# Two-dimensional regular grid with inhomogeneous exposure
position_space = ift.RGSpace([512, 512])
exposure = exposure_2d()
else:
# Sphere with uniform exposure
# Sphere with uniform exposure of 100
position_space = ift.HPSpace(128)
exposure = ift.Field.full(position_space, 1.)
exposure = ift.Field.full(position_space, 100.)
# Define harmonic space and harmonic transform
harmonic_space = position_space.get_default_codomain()
......
......@@ -17,8 +17,12 @@
############################################################
# Non-linear tomography
# The data is integrated lines of sight
# Random lines (set mode=0), radial lines (mode=1)
#
# The signal is a sigmoid-normal distributed field.
# The data is the field integrated along lines of sight that are
# randomly (set mode=0) or radially (mode=1) distributed
#
# Demo takes a while to compute
#############################################################
import numpy as np
......@@ -28,22 +32,22 @@ import nifty5 as ift
def random_los(n_los):
starts = list(np.random.uniform(0, 1, (n_los, 2)).T)
ends = list(0.5 + 0*np.random.uniform(0, 1, (n_los, 2)).T)
ends = list(np.random.uniform(0, 1, (n_los, 2)).T)
return starts, ends
def radial_los(n_los):
starts = list(np.random.uniform(0, 1, (n_los, 2)).T)
ends = list(np.random.uniform(0, 1, (n_los, 2)).T)
ends = list(0.5 + 0*np.random.uniform(0, 1, (n_los, 2)).T)
return starts, ends
if __name__ == '__main__':
np.random.seed(420)
np.random.seed(420) # picked for a nice field realization
# Choose between random line-of-sight response (mode=1) and radial lines
# of sight (mode=2)
mode = 1
# Choose between random line-of-sight response (mode=0) and radial lines
# of sight (mode=1)
mode = 0
position_space = ift.RGSpace([128, 128])
harmonic_space = position_space.get_default_codomain()
......@@ -62,8 +66,8 @@ if __name__ == '__main__':
# Power-law part of spectrum:
'sm': -5, # preferred power-law slope
'sv': .5, # low variance of power-law slope
'im': .4, # y-intercept mean
'iv': .3 # relatively high y-intercept variance
'im': 0, # y-intercept mean, in-/decrease for more/less contrast
'iv': .3 # y-intercept variance
}
A = ift.SLAmplitude(**dct)
......@@ -79,7 +83,7 @@ if __name__ == '__main__':
signal = ift.sigmoid(correlated_field)
# Build the line-of-sight response and define signal response
LOS_starts, LOS_ends = random_los(100) if mode == 1 else radial_los(100)
LOS_starts, LOS_ends = random_los(100) if mode == 0 else radial_los(100)
R = ift.LOSResponse(position_space, starts=LOS_starts, ends=LOS_ends)
signal_response = R(signal)
......
......@@ -44,7 +44,8 @@ def polynomial(coefficients, sampling_points):
class PolynomialResponse(ift.LinearOperator):
"""Calculates values of a polynomial parameterized by input at sampling points.
"""Calculates values of a polynomial parameterized by input at sampling
points.
Parameters
----------
......
# rm -rf docs/build docs/source/mod
rm -rf docs/build docs/source/mod
sphinx-apidoc -e -o docs/source/mod nifty5
sphinx-build -b html docs/source/ docs/build/
......@@ -41,7 +41,6 @@ Abstract base class
One of the fundamental building blocks of the NIFTy5 framework is the *domain*.
Its required capabilities are expressed by the abstract :py:class:`Domain` class.
A domain must be able to answer the following queries:
m
- its total number of data entries (pixels), which is accessible via the
:attr:`~Domain.size` property
......@@ -129,7 +128,7 @@ specify full field domains. In principle, a :class:`~domain_tuple.DomainTuple`
can even be empty, which implies that the field living on it is a scalar.
A :class:`~domain_tuple.DomainTuple` supports iteration and indexing, and also
provides the properties :attr:`~domain_tuple.DomainTuple.shape`,
provides the properties :attr:`~domain_tuple.DomainTuple.shape` and
:attr:`~domain_tuple.DomainTuple.size` in analogy to the elementary
:class:`~domains.domain.Domain`.
......@@ -159,10 +158,11 @@ Contractions (like summation, integration, minimum/maximum, computation of
statistical moments) can be carried out either over an entire field (producing
a scalar result) or over sub-domains (resulting in a field defined on a smaller
domain). Scalar products of two fields can also be computed easily.
See the documentation of :class:`~field.Field` for details.
There is also a set of convenience functions to generate fields with constant
values or fields filled with random numbers according to a user-specified
distribution.
distribution: :attr:`~sugar.full`, :attr:`~sugar.from_random`.
Like almost all NIFTy objects, fields are immutable: their value or any other
attribute cannot be modified after construction. To manipulate a field in ways
......@@ -311,11 +311,15 @@ and ``f1`` and ``f2`` are of type :class:`~field.Field`, writing::
will perform the operation suggested intuitively by the notation, checking
domain compatibility while building the composed operator.
The combined operator infers its domain and target from its constituents,
as well as the set of operations it can support.
The properties :attr:`~LinearOperator.adjoint` and
:attr:`~LinearOperator.inverse` return a new operator which behaves as if it
were the original operator's adjoint or inverse, respectively.
The combined operator infers its domain and target from its constituents,
as well as the set of operations it can support.
Instantiating operator adjoints or inverses by :attr:`~LinearOperator.adjoint`
and similar methods is to be distinguished from the instant application of
operators performed by :attr:`~LinearOperator.adjoint_times` and similar
methods.
.. _minimization:
......@@ -368,8 +372,8 @@ failure.
Sensible stopping criteria can vary significantly with the problem being
solved; NIFTy provides one concrete sub-class of :class:`IterationController`
called :class:`GradientNormController`, which should be appropriate in many
circumstances, but users have complete freedom to implement custom sub-classes
for their specific applications.
circumstances, but users have complete freedom to implement custom
:class:`IterationController` sub-classes for their specific applications.
Minimization algorithms
......@@ -424,11 +428,13 @@ the information propagator whose inverse is defined as:
:math:`D^{-1} = \left(R^\dagger N^{-1} R + S^{-1}\right)`.
It needs to be applied in forward direction in order to calculate the Wiener
filter solution. Only its inverse application is straightforward; to use it in
forward direction, we make use of NIFTy's
filter solution, but only its inverse application is straightforward.
To use it in forward direction, we make use of NIFTy's
:class:`~operators.inversion_enabler.InversionEnabler` class, which internally
performs a minimization of a
:class:`~minimization.quadratic_energy.QuadraticEnergy` by means of the
:class:`~minimization.conjugate_gradient.ConjugateGradient` algorithm. An
example is provided in
applies the (approximate) inverse of the given operator :math:`x = Op^{-1} (y)` by
solving the equation :math:`y = Op (x)` for :math:`x`.
This is accomplished by minimizing a suitable
:class:`~minimization.quadratic_energy.QuadraticEnergy`
with the :class:`~minimization.conjugate_gradient.ConjugateGradient`
algorithm. An example is provided in
:func:`~library.wiener_filter_curvature.WienerFilterCurvature`.
......@@ -4,9 +4,6 @@ IFT -- Information Field Theory
Theoretical Background
----------------------
Introduction
............
`Information Field Theory <http://www.mpa-garching.mpg.de/ift/>`_ [1]_ (IFT) is information theory, the logic of reasoning under uncertainty, applied to fields.
A field can be any quantity defined over some space, e.g. the air temperature over Europe, the magnetic field strength in the Milky Way, or the matter density in the Universe.
IFT describes how data and knowledge can be used to infer field properties.
......
NIFTy -- Numerical Information Field Theory
===========================================
**NIFTy** [1]_, [2]_, "\ **N**\umerical **I**\nformation **F**\ield **T**\heor\ **y**\ ", is a versatile library designed to enable the development of signal inference algorithms that are independent of the underlying spatial grid and its resolution.
**NIFTy** [1]_, [2]_, "\ **N**\umerical **I**\nformation **F**\ield **T**\heor\ **y**\ ", is a versatile library designed to enable the development of signal inference algorithms that are independent of the underlying grids (spatial, spectral, temporal, …) and their resolutions.
Its object-oriented framework is written in Python, although it accesses libraries written in C++ and C for efficiency.
NIFTy offers a toolkit that abstracts discretized representations of continuous spaces, fields in these spaces, and operators acting on fields into classes.
Thereby, the correct normalization of operations on fields is taken care of automatically without concerning the user.
NIFTy offers a toolkit that abstracts discretized representations of continuous spaces, fields in these spaces, and operators acting on these fields into classes.
This allows for an abstract formulation and programming of inference algorithms, including those derived within information field theory.
Thus, NIFTy permits its user to rapidly prototype algorithms in 1D and then apply the developed code in higher-dimensional settings to real world problems.
NIFTy's interface is designed to resemble IFT formulae in the sense that the user implements algorithms in NIFTy independent of the topology of the underlying spaces and the discretization scheme.
Thus, the user can develop algorithms on subsets of problems and on spaces where the detailed performance of the algorithm can be properly evaluated and then easily generalize them to other, more complex spaces and the full problem, respectively.
The set of spaces on which NIFTy operates comprises point sets, *n*-dimensional regular grids, spherical spaces, their harmonic counterparts, and product spaces constructed as combinations of those.
NIFTy takes care of numerical subtleties like the normalization of operations on fields and the numerical representation of model components, allowing the user to focus on formulating the abstract inference procedures and process-specific model properties.
References
----------
......@@ -21,6 +23,7 @@ Contents
........
.. toctree::
:maxdepth: 2
ift
volume
......
......@@ -14,12 +14,13 @@ Plotting support is added via::
pip3 install --user matplotlib
FFTW support is added via::
NIFTy uses Numpy's FFT implementation by default. For large problems FFTW may be
used because of its higher performance. It can be installed via::
sudo apt-get install libfftw3-dev
pip3 install --user pyfftw
To actually use FFTW in your Nifty calculations, you need to call::
To enable FFTW usage in NIFTy, call::
nifty5.fft.enable_fftw()
......
......@@ -34,8 +34,8 @@ Discretisation and index notation
.................................
To compute anything numerically, we first need to represent the problem in finite dimensions.
As for stochastic processes, several discretisations of :math:`\mathcal{S}` like collocation methods, expansion into orthogonal polynomials, etc. can be used (see [6]_, [7]_ for an overview and further information about their reliability).
In particular, NIFTy uses the midpoint method as reviewed in section 2.1 in [6]_ and Fourier expansion.
As for stochastic processes, several discretisations of :math:`\mathcal{S}` like collocation methods, expansion into orthogonal polynomials, etc. can be used (see [1]_, [2]_ for an overview and further information about their reliability).
In particular, NIFTy uses the midpoint method as reviewed in section 2.1 of [1]_ and Fourier expansion.
Without going into the details, discretisation methods basically introduce a finite set of basis functions :math:`\{\phi_i\}_{i\in \mathcal{I}}`, where :math:`\mathcal{I}` denotes a generic index set with :math:`|\mathcal{I}| = N` being the chosen discretisation dimension.
Any Riemannian manifold :math:`(\mathcal{M},g)` is equipped with a canonical scalar product given by
......@@ -70,7 +70,7 @@ After projection, any function :math:`f \in \mathcal{S}` is represented by its a
which defines an embedding :math:`\hat{\mathcal{S}} \hookrightarrow \mathcal{S} = \mathcal{F}(\mathcal{M})`.
**Changes of bases** are performed by reapproximating the :math:`\{\phi_i\}_{i\in \mathcal{I}}` in terms of another basis :math:`\{\phi'_i\}_{i\in \mathcal{I'}}` :
**Changes of base** are performed by reapproximating the :math:`\{\phi_i\}_{i\in \mathcal{I}}` in terms of another basis :math:`\{\phi'_i\}_{i\in \mathcal{I'}}`:
.. math::
......@@ -81,7 +81,7 @@ The latter is e.g. true for regular collocation grids on tori and the associated
The discrete Fourier transform then maps between those bases without loss of information.
**Discretisation of operators** works in the same way by expansion.
For illustration purposes, let :math:`A: \mathcal{S} \rightarrow \mathcal{S}` be a not necessarily linear operator.
For illustration purposes, let :math:`A: \mathcal{S} \rightarrow \mathcal{S}` be a (not necessarily linear) operator.
The result of its action on functions :math:`s` is known and may be expanded in :math:`\{\phi_i\}_{i\in \mathcal{I}}`, i.e.
.. math::
......@@ -94,14 +94,14 @@ Integrals can now be written as
\left< s , A[t] \right>_{\mathcal{M}} \approx s^i \left< \phi_i , \phi_j \right>_{\mathcal{M}} (A[t])^j \equiv s^i \, v_{ij} \, (A[t])^j \, ,
where the appearence of the volume metric can be hidden by lowering the first index of the operator,
where the appearance of the volume metric can be hidden by lowering the first index of the operator,
.. math::
(A[w])_k := v_{km} \, (A[w])^m \, .
Hence, the volume metric needs not to be carried along if the operators are defined in this fashion right from the start.
Linear operators mapping several functions to another function are completly specified by their action on a given basis, and we define
Hence, the volume metric need not be carried along if the operators are defined in this fashion right from the start.
Linear operators mapping several functions to another function are completely specified by their action on a given basis, and we define
.. math::
......@@ -115,8 +115,8 @@ If :math:`A` is a (linear) integral operator defined by a kernel :math:`\tilde{A
&= v^{km} \, \left< \phi_m, A[\phi_i,\phi_j,\ldots] \right>_{\mathcal{M}} \\
&= v^{km} \, \int_{\mathcal{M}} \mathrm{d} x\,\sqrt{|g|}\,\left(\prod_{n}^{|\{ij\ldots\}|}\int_{\mathcal{M}} \mathrm{d} y_n \, \sqrt{|g|}\right) \,\,\phi_m(x)\, \tilde{A}(x,y_1,y_2,\ldots)\, \phi_i(y_1) \, \phi_j(y_2) \cdots \, .
.. [6] Bruno Sudret and Armen Der Kiureghian (2000), "Stochastic Finite Element Methods and Reliability: A State-of-the-Art Report"
.. [7] Dongbin Xiu (2010), "Numerical methods for stochastic computations", Princeton University Press.
.. [1] Bruno Sudret and Armen Der Kiureghian (2000), "Stochastic Finite Element Methods and Reliability: A State-of-the-Art Report"
.. [2] Dongbin Xiu (2010), "Numerical methods for stochastic computations", Princeton University Press.
Resolution and self-consistency
...............................
......@@ -129,40 +129,40 @@ Apparently, the discretisation and the discretised response need to satisfy a se
.. math::
R = \hat{R} \circ D \, .
An obvious corrollary is that different discretisations :math:`D, D'` with resulting discretised responses :math:`\hat{R}, \hat{R}'` will need to satisfy
An obvious corollary is that different discretisations :math:`D, D'` with resulting discretised responses :math:`\hat{R}, \hat{R}'` will need to satisfy
.. math::
\hat{R} \circ D = \hat{R}' \circ D' \, .
NIFTy is implemented such that in order to change resolution, only the line of code defining the space needs to be altered.
It automatically takes care of depended structures like volume factors, discretised operators and responses.
A visualisation of this can be seen in figure 2 and 3, which displays the MAP inference of a signal at various resolutions.
It automatically takes care of dependent structures like volume factors, discretised operators and responses.
A visualisation of this can be seen in figure 2, which displays the MAP inference of a signal at various resolutions.
.. figure:: images/converging_discretization.png
:scale: 80%
:align: center
Figure 3: Inference result converging at high resolution.
Figure 2: Inference result converging at high resolution.
Implementation in NIFTy
-----------------------
.......................
.. currentmodule:: nifty5
Most codes in NIFTy will contain the description of a measurement process,
or more generally, a log-likelihood.
Most codes in NIFTy will contain the description of a measurement process or,
more generally, a log-likelihood.
This log-likelihood is necessarily a map from the quantity of interest (a field) to a real number.
The likelihood has to be unitless because it is a log-probability and should not scale with resolution.
Often, likelihoods contain integrals over the quantity of interest :math:`s`, which have to be discretized, e.g. by a sum
The log-likelihood has to be unitless because it is a log-probability and should not scale with resolution.
Often, log-likelihoods contain integrals over the quantity of interest :math:`s`, which have to be discretized, e.g. by a sum
.. math::
\int_\Omega \text{d}x\, s(x) \approx \sum_i s_i\int_{\Omega_i}\text{d}x\, 1
\int_\Omega \text{d}x\, s(x) \approx \sum_i s^i\int_{\Omega_i}\text{d}x\, 1
Here the domain of the integral :math:`\Omega = \dot{\bigcup_q} \; \Omega_i` is the disjoint union over smaller :math:`\Omega_i`, e.g. the pixels of the space, and :math:`s_i` is the discretized field value on the :math:`i`-th pixel.
This introduces the weighting :math:`V_i=\int_{\Omega_i}\text{d}x\, 1`, also called the volume factor, a property of the space.
NIFTy aids you in constructing your own likelihood by providing methods like :func:`~field.Field.weight`, which weights all pixels of a field with its corresponding volume.
NIFTy aids you in constructing your own log-likelihood by providing methods like :func:`~field.Field.weight`, which weights all pixels of a field with their corresponding volume.
An integral over a :class:`~field.Field` :code:`s` can be performed by calling :code:`s.weight(1).sum()`, which is equivalent to :code:`s.integrate()`.
Volume factors are also applied automatically in the following places:
......@@ -170,32 +170,38 @@ Volume factors are also applied automatically in the following places:
- some response operators, such as the :class:`~library.los_response.LOSResponse`. In this operator a line integral is descritized, so a 1-dimensional volume factor is applied.
- In :class:`~library.correlated_fields.CorrelatedField` as well :class:`~library.correlated_fields.MfCorrelatedField`, the field is multiplied by the square root of the total volume in configuration space. This ensures that the same field reconstructed over a larger domain has the same variance in position space in the limit of infinite resolution. It also ensures that power spectra in NIFTy behave according to the definition of a power spectrum, namely the power of a k-mode is the expectation of the k-mode square, divided by the volume of the space.
Note that in contrast to some older versions of NIFTy, the dot product of fields does not apply a volume factor
Note that in contrast to some older versions of NIFTy, the dot product :code:`s.vdot(t)` of fields does **not** apply a volume factor, but instead just sums over the field components,
.. math::
s^\dagger t = \sum_i s_i^* t_i .
s^\dagger t = \sum_i \overline{s^i}\, t^i \, ,
If this dot product is supposed to be invariant under changes in resolution, then either :math:`s_i` or :math:`t_i` has to decrease as the number of pixels increases, or more specifically, one of the two fields has to be an extensive quantity while the other has to be intensive.
One can make this more explicit by denoting intensive quantities with upper index and extensive quantities with lower index
where the bar denotes complex conjugation.
This dot product is **not** invariant under changes in resolution, as then the number of discretised field components increases.
Upper index components like :math:`s^i`, however, are designed **not** to scale with the volume.
One solution to obtain a resolution independent quantity is to make one of the two factors extensive while the other stays intensive.
This is more explicit when intensive quantities are denoted by an upper index and extensive quantities by a lower index,
.. math::
s^\dagger t = (s^*)^i t_i
s^\dagger t = \overline{s^i} t_i
where we used Einstein sum convention.
This notation connects to the theoretical discussion before.
One of the field has to have the volume metric already incorperated to assure the continouum limit works.
Here, the volume metric is incorporated by lowering one index, i.e. :math:`t_i = v_{ij}\,t^j`.
When building statistical models, all indices will end up matching this upper-lower convention automatically, e.g. for a Gaussian log-likelihood :math:`L` we have
.. math::
L = \frac{1}{2}s^i \left(S^{-1}\right)_{ij} s^j
L = \frac{1}{2}\overline{s^i} \left(S^{-1}\right)_{ij} s^j
with
with the covariance defined by
.. math::
\left(S^{-1}\right)_{ij} = \left(S^{kl}\right)_ij^{-1} = \left(\left<(s^*)^ks^l\right>\right)^{-1})_{ij}\ .
S^{ij} = \left<s^i\overline{s^j}\right>\ .
Thus the covariance matrix :math:`S` will ensure that the whole likelihood expression does not scale with resolution.
**This upper-lower index convention is not coded into NIFTy**, in order to not reduce user freedom.
One should however have this in mind when constructing algorithms in order to ensure resolution independence.
Consequently, the inverse covariance operator will automatically have lower indices,
Note that while the upper-lower index convention ensures resolution independence, this does not automatically fix the pixilization.
.. math::
\left(S^{-1}\right)_{ij} S^{jk} = \delta^{\,\,k}_j\ ,
ensuring that the whole log-likelihood expression does not scale with resolution.
**This upper-lower index convention is not coded into NIFTy**, in order to not reduce user freedom.
One should however have this in mind when constructing log-likelihoods in order to ensure resolution independence.
......@@ -86,7 +86,10 @@ class Domain(metaclass=NiftyMeta):
@property
def local_shape(self):
"""tuple of int: number of pixels along each axis on the local task
"""tuple of int: number of pixels along each axis on the local task,
mainly relevant for MPI.
See :meth:`.shape()` for general explanation of property.
The shape of the array-like object required to store information
defined on part of the domain which is stored on the local MPI task.
......
......@@ -87,11 +87,11 @@ class StructuredDomain(Domain):
def get_fft_smoothing_kernel_function(self, sigma):
"""Helper for Gaussian smoothing.
This method, which is only implemented for harmonic domains, helps
smoothing fields that are defined on a domain that has this domain as
its harmonic partner. The returned function multiplies field values of
a field with a zero centered Gaussian which corresponds to a
convolution with a Gaussian kernel and sigma standard deviation in
This method, which is only implemented for harmonic domains, helps to
smoothe fields that are defined on a domain that has this domain as
its harmonic partner. The returned function does a pointwise evaluation
of a zero-centered Gaussian on the field values, which corresponds to a
convolution with a Gaussian kernel with sigma standard deviation in
position space.
Parameters
......
......@@ -171,4 +171,3 @@ def check_jacobian_consistency(op, loc, tol=1e-8, ntries=100):
else:
raise ValueError("gradient and value seem inconsistent")
loc = locnext
......@@ -97,24 +97,24 @@ def _cone_arrays(c, domain, sigx, want_gradient):
class LightConeOperator(Operator):
'''Constructs a Light cone from a set of lightspeed parameters.
The resulting cone is defined as follows
.. math::
\\exp \\left(- \\frac{1}{2} \\Re \\left( \\Delta \\right)^2 \\right)
with
.. math::
\\Delta = \\sqrt{- \\left(t^2 - \\frac{x^\\dagger C^{-1} x}
{\\sigma_x^2} \\right)}
where t and x are the coordinates of the target space. Note that axis zero
of the space is interpreted as the time axis. C denotes the input
paramters of the operator and parametrizes the shape of the cone.
sigx is the width of the asymptotic Gaussian in x necessary for
discretization.
Parameters
----------
domain : Domain, tuple of Domain or DomainTuple
......
......@@ -261,7 +261,6 @@ class BernoulliEnergy(EnergyOperator):
"""
def __init__(self, d):
print(d.dtype)
if not isinstance(d, Field) or not np.issubdtype(d.dtype, np.integer):
raise TypeError
if not np.all(np.logical_or(d.local_data == 0, d.local_data == 1)):
......
......@@ -31,8 +31,8 @@ class ExpTransform(LinearOperator):
This operator creates a log-space subject to the degrees of freedom and
and its target-domain.
Then it transforms between this log-space and its target, which is defined in
normal units.
Then it transforms between this log-space and its target, which is defined
in normal units.
FIXME Write something on t_0 of domain space
......
......@@ -26,7 +26,7 @@ from .linear_operator import LinearOperator
class RegriddingOperator(LinearOperator):
"""Linearly interpolates a RGSpace to an RGSpace with coarser resolution.
"""Linearly interpolates an RGSpace to an RGSpace with coarser resolution.
Parameters
----------
......@@ -47,7 +47,6 @@ class RegriddingOperator(LinearOperator):
if not isinstance(dom, RGSpace):
raise TypeError("RGSpace required")
if len(new_shape) != len(dom.shape):
print(new_shape, dom.shape)
raise ValueError("Shape mismatch")
if any([a > b for a, b in zip(new_shape, dom.shape)]):
raise ValueError("New shape must not be larger than old shape")
......
......@@ -530,8 +530,14 @@ class Plot(object):
fig = plt.figure()
if "title" in kwargs:
plt.suptitle(kwargs.pop("title"))
nx = kwargs.pop("nx", int(np.ceil(np.sqrt(nplot))))
ny = kwargs.pop("ny", int(np.ceil(np.sqrt(nplot))))
nx = kwargs.pop("nx", 0)
ny = kwargs.pop("ny", 0)
if nx == ny == 0:
nx = ny = int(np.ceil(np.sqrt(nplot)))
elif nx == 0:
nx = np.ceil(nplot/ny)
elif ny == 0:
ny = np.ceil(nplot/nx)
if nx*ny < nplot:
raise ValueError(
'Figure dimensions not sufficient for number of plots. '
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment