Skip to content
GitLab
Menu
Projects
Groups
Snippets
Loading...
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
ift
NIFTy
Commits
15e8159c
Commit
15e8159c
authored
Jan 16, 2019
by
Martin Reinecke
Browse files
Merge branch 'docstrings_torsten' into 'NIFTy_5'
Docstrings torsten See merge request ift/nifty-dev!167
parents
4f692156
661d1746
Changes
5
Show whitespace changes
Inline
Side-by-side
docs/source/ift.rst
View file @
15e8159c
...
...
@@ -94,7 +94,7 @@ A free IFT appears when the signal field :math:`{s}` and the noise :math:`{n}` o
\mathcal{P}(s,n) = \mathcal{G}(s,S)\,\mathcal{G}(n,N),
and the measurement equation is linear in both
,
signal and noise,
and the measurement equation is linear in both signal and noise,
.. math::
...
...
nifty5/__init__.py
View file @
15e8159c
...
...
@@ -48,7 +48,7 @@ from .operators.simple_linear_operators import (
FieldAdapter
,
ducktape
,
GeometryRemover
,
NullOperator
)
from
.operators.energy_operators
import
(
EnergyOperator
,
GaussianEnergy
,
PoissonianEnergy
,
InverseGammaLikelihood
,
BernoulliEnergy
,
Hamiltonian
,
SampledKullbachLeiblerDi
verge
nce
)
BernoulliEnergy
,
Hamiltonian
,
A
ver
a
ge
dEnergy
)
from
.probing
import
probe_with_posterior_samples
,
probe_diagonal
,
\
StatCalculator
...
...
nifty5/field.py
View file @
15e8159c
...
...
@@ -442,7 +442,7 @@ class Field(object):
----------
spaces : None, int or tuple of int
The summation is only carried out over the sub-domains in this
tuple. If None, it is carried out over all sub-domains.
Default: None.
tuple. If None, it is carried out over all sub-domains.
Returns
-------
...
...
@@ -463,7 +463,6 @@ class Field(object):
spaces : None, int or tuple of int
The summation is only carried out over the sub-domains in this
tuple. If None, it is carried out over all sub-domains.
Default: None.
Returns
-------
...
...
@@ -547,7 +546,7 @@ class Field(object):
----------
spaces : None, int or tuple of int
The operation is only carried out over the sub-domains in this
tuple. If None, it is carried out over all sub-domains.
Default: None.
tuple. If None, it is carried out over all sub-domains.
Returns
-------
...
...
nifty5/operators/energy_operators.py
View file @
15e8159c
...
...
@@ -15,11 +15,14 @@
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
import
numpy
as
np
from
..
import
utilities
from
..domain_tuple
import
DomainTuple
from
..field
import
Field
from
..linearization
import
Linearization
from
..sugar
import
makeOp
,
makeDomain
from
..sugar
import
makeDomain
,
makeOp
from
.linear_operator
import
LinearOperator
from
.operator
import
Operator
from
.sampling_enabler
import
SamplingEnabler
from
.sandwich_operator
import
SandwichOperator
...
...
@@ -27,11 +30,28 @@ from .simple_linear_operators import VdotOperator
class
EnergyOperator
(
Operator
):
"""Operator which has a scalar domain as target domain."""
"""Operator which has a scalar domain as target domain.
It is intended as an objective function for field inference.
Examples
--------
- Information Hamiltonian, i.e. negative-log-probabilities.
- Gibbs free energy, i.e. an averaged Hamiltonian, aka Kullbach-Leibler
divergence.
"""
_target
=
DomainTuple
.
scalar_domain
()
class
SquaredNormOperator
(
EnergyOperator
):
"""Computes the L2-norm of the output of an operator.
Parameters
----------
domain : Domain, DomainTuple or tuple of Domain
Target domain of the operator in which the L2-norm shall be computed.
"""
def
__init__
(
self
,
domain
):
self
.
_domain
=
domain
...
...
@@ -45,12 +65,24 @@ class SquaredNormOperator(EnergyOperator):
class
QuadraticFormOperator
(
EnergyOperator
):
def
__init__
(
self
,
op
):
"""Computes the L2-norm of a Field or MultiField with respect to a
specific metric `endo`.
.. math ::
E(f) =
\\
frac12 f^
\\
dagger
\\
text{endo}(f)
Parameters
----------
endo : EndomorphicOperator
Kernel of quadratic form.
"""
def
__init__
(
self
,
endo
):
from
.endomorphic_operator
import
EndomorphicOperator
if
not
isinstance
(
o
p
,
EndomorphicOperator
):
if
not
isinstance
(
end
o
,
EndomorphicOperator
):
raise
TypeError
(
"op must be an EndomorphicOperator"
)
self
.
_op
=
o
p
self
.
_domain
=
o
p
.
domain
self
.
_op
=
end
o
self
.
_domain
=
end
o
.
domain
def
apply
(
self
,
x
):
self
.
_check_input
(
x
)
...
...
@@ -63,7 +95,38 @@ class QuadraticFormOperator(EnergyOperator):
class
GaussianEnergy
(
EnergyOperator
):
"""Class for energies of fields with Gaussian probability distribution.
Represents up to constants in :math:`m`:
.. math ::
E(f) = -
\\
log G(f-m, D) = 0.5 (f-m)^
\\
dagger D^{-1} (f-m),
an information energy for a Gaussian distribution with mean m and
covariance D.
Parameters
----------
mean : Field
Mean of the Gaussian. Default is 0.
covariance : LinearOperator
Covariance of the Gaussian. Default is the identity operator.
domain : Domain, DomainTuple, tuple of Domain or MultiDomain
Operator domain. By default it is inferred from `mean` or
`covariance` if specified
Note
----
At least one of the arguments has to be provided.
"""
def
__init__
(
self
,
mean
=
None
,
covariance
=
None
,
domain
=
None
):
if
mean
is
not
None
and
not
isinstance
(
mean
,
Field
):
raise
TypeError
if
covariance
is
not
None
and
not
isinstance
(
covariance
,
LinearOperator
):
raise
TypeError
self
.
_domain
=
None
if
mean
is
not
None
:
self
.
_checkEquivalence
(
mean
.
domain
)
...
...
@@ -90,7 +153,7 @@ class GaussianEnergy(EnergyOperator):
def
apply
(
self
,
x
):
self
.
_check_input
(
x
)
residual
=
x
if
self
.
_mean
is
None
else
x
-
self
.
_mean
residual
=
x
if
self
.
_mean
is
None
else
x
-
self
.
_mean
res
=
self
.
_op
(
residual
).
real
if
not
isinstance
(
x
,
Linearization
)
or
not
x
.
want_metric
:
return
res
...
...
@@ -99,7 +162,29 @@ class GaussianEnergy(EnergyOperator):
class
PoissonianEnergy
(
EnergyOperator
):
"""Class for likelihood Hamiltonians of expected count field constrained
by Poissonian count data.
Represents up to an f-independent term :math:`log(d!)`:
.. math ::
E(f) = -
\\
log
\\
text{Poisson}(d|f) =
\\
sum f - d^
\\
dagger
\\
log(f),
where f is a :class:`Field` in data space with the expectation values for
the counts.
Parameters
----------
d : Field
Data field with counts. Needs to have integer dtype and all field
values need to be non-negative.
"""
def
__init__
(
self
,
d
):
if
not
isinstance
(
d
,
Field
)
or
not
np
.
issubdtype
(
d
.
dtype
,
np
.
integer
):
raise
TypeError
if
np
.
any
(
d
.
local_data
<
0
):
raise
ValueError
self
.
_d
=
d
self
.
_domain
=
DomainTuple
.
make
(
d
.
domain
)
...
...
@@ -115,7 +200,13 @@ class PoissonianEnergy(EnergyOperator):
class
InverseGammaLikelihood
(
EnergyOperator
):
"""
FIXME
"""
def
__init__
(
self
,
d
):
if
not
isinstance
(
d
,
Field
):
raise
TypeError
self
.
_d
=
d
self
.
_domain
=
DomainTuple
.
make
(
d
.
domain
)
...
...
@@ -131,23 +222,78 @@ class InverseGammaLikelihood(EnergyOperator):
class
BernoulliEnergy
(
EnergyOperator
):
"""Computes likelihood energy of expected event frequency constrained by
event data.
.. math ::
E(f) = -
\\
log
\\
text{Bernoulli}(d|f)
= -d^
\\
dagger
\\
log f - (1-d)^
\\
dagger
\\
log(1-f),
where f is a field defined on `d.domain` with the expected
frequencies of events.
Parameters
----------
d : Field
Data field with events (1) or non-events (0).
"""
def
__init__
(
self
,
d
):
print
(
d
.
dtype
)
if
not
isinstance
(
d
,
Field
)
or
not
np
.
issubdtype
(
d
.
dtype
,
np
.
integer
):
raise
TypeError
if
not
np
.
all
(
np
.
logical_or
(
d
.
local_data
==
0
,
d
.
local_data
==
1
)):
raise
ValueError
self
.
_d
=
d
self
.
_domain
=
DomainTuple
.
make
(
d
.
domain
)
def
apply
(
self
,
x
):
self
.
_check_input
(
x
)
v
=
x
.
log
().
vdot
(
-
self
.
_d
)
-
(
1.
-
x
).
log
().
vdot
(
1.
-
self
.
_d
)
v
=
-
(
x
.
log
().
vdot
(
self
.
_d
)
+
(
1.
-
x
).
log
().
vdot
(
1.
-
self
.
_d
)
)
if
not
isinstance
(
x
,
Linearization
):
return
Field
.
scalar
(
v
)
if
not
x
.
want_metric
:
return
v
met
=
makeOp
(
1.
/
(
x
.
val
*
(
1.
-
x
.
val
)))
met
=
makeOp
(
1.
/
(
x
.
val
*
(
1.
-
x
.
val
)))
met
=
SandwichOperator
.
make
(
x
.
jac
,
met
)
return
v
.
add_metric
(
met
)
class
Hamiltonian
(
EnergyOperator
):
"""Computes an information Hamiltonian in its standard form, i.e. with the
prior being a Gaussian with unit covariance.
Let the likelihood energy be :math:`E_{lh}`. Then this operator computes:
.. math ::
H(f) = 0.5 f^
\\
dagger f + E_{lh}(f):
Other field priors can be represented via transformations of a white
Gaussian field into a field with the desired prior probability structure.
By implementing prior information this way, the field prior is represented
by a generative model, from which NIFTy can draw samples and infer a field
using the Maximum a Posteriori (MAP) or the Variational Bayes (VB) method.
The metric of this operator can be used as covariance for drawing Gaussian
samples.
Parameters
----------
lh : EnergyOperator
The likelihood energy.
ic_samp : IterationController
Tells an internal :class:`SamplingEnabler` which convergence criterium
to use to draw Gaussian samples.
See also
--------
`Encoding prior knowledge in the structure of the likelihood`,
Jakob Knollmüller, Torsten A. Ensslin,
`<https://arxiv.org/abs/1812.04403>`_
"""
def
__init__
(
self
,
lh
,
ic_samp
=
None
):
self
.
_lh
=
lh
self
.
_prior
=
GaussianEnergy
(
domain
=
lh
.
domain
)
...
...
@@ -156,14 +302,14 @@ class Hamiltonian(EnergyOperator):
def
apply
(
self
,
x
):
self
.
_check_input
(
x
)
if
(
self
.
_ic_samp
is
None
or
not
isinstance
(
x
,
Linearization
)
or
not
x
.
want_metric
):
return
self
.
_lh
(
x
)
+
self
.
_prior
(
x
)
if
(
self
.
_ic_samp
is
None
or
not
isinstance
(
x
,
Linearization
)
or
not
x
.
want_metric
):
return
self
.
_lh
(
x
)
+
self
.
_prior
(
x
)
else
:
lhx
,
prx
=
self
.
_lh
(
x
),
self
.
_prior
(
x
)
mtr
=
SamplingEnabler
(
lhx
.
metric
,
prx
.
metric
.
inverse
,
self
.
_ic_samp
,
prx
.
metric
.
inverse
)
return
(
lhx
+
prx
).
add_metric
(
mtr
)
return
(
lhx
+
prx
).
add_metric
(
mtr
)
def
__repr__
(
self
):
subs
=
'Likelihood:
\n
{}'
.
format
(
utilities
.
indent
(
self
.
_lh
.
__repr__
()))
...
...
@@ -171,18 +317,62 @@ class Hamiltonian(EnergyOperator):
return
'Hamiltonian:
\n
'
+
utilities
.
indent
(
subs
)
class
SampledKullbachLeiblerDivergence
(
EnergyOperator
):
def
__init__
(
self
,
h
,
res_samples
):
"""
# MR FIXME: does h have to be a Hamiltonian? Couldn't it be any energy?
class
AveragedEnergy
(
EnergyOperator
):
"""Computes Kullbach-Leibler (KL) divergence or Gibbs free energies.
A sample-averaged energy, e.g. an Hamiltonian, approximates the relevant
part of a KL to be used in Variational Bayes inference if the samples are
drawn from the approximating Gaussian:
.. math ::
\\
text{KL}(m) =
\\
frac1{
\\
#\{v_i\}}
\\
sum_{v_i} H(m+v_i),
where :math:`v_i` are the residual samples and :math:`m` is the mean field
around which the samples are drawn.
Parameters
----------
h: Hamiltonian
N: Number of samples to be used
The energy to be averaged.
res_samples : iterable of Fields
Set of residual sample points to be added to mean field for approximate
estimation of the KL.
Note
----
Having symmetrized residual samples, with both v_i and -v_i being present
ensures that the distribution mean is exactly represented. This reduces
sampling noise and helps the numerics of the KL minimization process in the
variational Bayes inference.
See also
--------
Let :math:`Q(f) = G(f-m,D)` be the Gaussian distribution
which is used to approximate the accurate posterior :math:`P(f|d)` with
information Hamiltonian
:math:`H(d,f) = -
\\
log P(d,f) = -
\\
log P(f|d) +
\\
text{const}`. In
Variational Bayes one needs to optimize the KL divergence between those
two distributions for m. It is:
:math:`KL(Q,P) =
\\
int Df Q(f)
\\
log Q(f)/P(f)
\\\\
=
\\
left<
\\
log Q(f)
\\
right>_Q(f) -
\\
left<
\\
log P(f)
\\
right>_Q(f)
\\\\
=
\\
text{const} +
\\
left< H(f)
\\
right>_G(f-m,D)`
in essence the information Hamiltonian averaged over a Gaussian
distribution centered on the mean m.
:class:`AveragedEnergy(h)` approximates
:math:`
\\
left< H(f)
\\
right>_{G(f-m,D)}` if the residuals
:math:`f-m` are drawn from a Gaussian distribution with covariance
:math:`D`.
"""
def
__init__
(
self
,
h
,
res_samples
):
self
.
_h
=
h
self
.
_domain
=
h
.
domain
self
.
_res_samples
=
tuple
(
res_samples
)
def
apply
(
self
,
x
):
self
.
_check_input
(
x
)
mymap
=
map
(
lambda
v
:
self
.
_h
(
x
+
v
),
self
.
_res_samples
)
return
utilities
.
my_sum
(
mymap
)
*
(
1.
/
len
(
self
.
_res_samples
))
mymap
=
map
(
lambda
v
:
self
.
_h
(
x
+
v
),
self
.
_res_samples
)
return
utilities
.
my_sum
(
mymap
)
*
(
1.
/
len
(
self
.
_res_samples
))
test/test_energy_gradients.py
View file @
15e8159c
...
...
@@ -73,7 +73,7 @@ def test_hamiltonian_and_KL(field):
ift
.
extra
.
check_value_gradient_consistency
(
hamiltonian
,
field
)
S
=
ift
.
ScalingOperator
(
1.
,
space
)
samps
=
[
S
.
draw_sample
()
for
i
in
range
(
3
)]
kl
=
ift
.
SampledKullbachLeiblerDi
verge
nce
(
hamiltonian
,
samps
)
kl
=
ift
.
A
ver
a
ge
dEnergy
(
hamiltonian
,
samps
)
ift
.
extra
.
check_value_gradient_consistency
(
kl
,
field
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment