Commit 15e8159c by Martin Reinecke

### Merge branch 'docstrings_torsten' into 'NIFTy_5'

Docstrings torsten

See merge request ift/nifty-dev!167
parents 4f692156 661d1746
 ... @@ -94,7 +94,7 @@ A free IFT appears when the signal field :math:{s} and the noise :math:{n} o ... @@ -94,7 +94,7 @@ A free IFT appears when the signal field :math:{s} and the noise :math:{n} o \mathcal{P}(s,n) = \mathcal{G}(s,S)\,\mathcal{G}(n,N), \mathcal{P}(s,n) = \mathcal{G}(s,S)\,\mathcal{G}(n,N), and the measurement equation is linear in both, signal and noise, and the measurement equation is linear in both signal and noise, .. math:: .. math:: ... ...
 ... @@ -48,7 +48,7 @@ from .operators.simple_linear_operators import ( ... @@ -48,7 +48,7 @@ from .operators.simple_linear_operators import ( FieldAdapter, ducktape, GeometryRemover, NullOperator) FieldAdapter, ducktape, GeometryRemover, NullOperator) from .operators.energy_operators import ( from .operators.energy_operators import ( EnergyOperator, GaussianEnergy, PoissonianEnergy, InverseGammaLikelihood, EnergyOperator, GaussianEnergy, PoissonianEnergy, InverseGammaLikelihood, BernoulliEnergy, Hamiltonian, SampledKullbachLeiblerDivergence) BernoulliEnergy, Hamiltonian, AveragedEnergy) from .probing import probe_with_posterior_samples, probe_diagonal, \ from .probing import probe_with_posterior_samples, probe_diagonal, \ StatCalculator StatCalculator ... ...
 ... @@ -442,7 +442,7 @@ class Field(object): ... @@ -442,7 +442,7 @@ class Field(object): ---------- ---------- spaces : None, int or tuple of int spaces : None, int or tuple of int The summation is only carried out over the sub-domains in this The summation is only carried out over the sub-domains in this tuple. If None, it is carried out over all sub-domains. Default: None. tuple. If None, it is carried out over all sub-domains. Returns Returns ------- ------- ... @@ -463,7 +463,6 @@ class Field(object): ... @@ -463,7 +463,6 @@ class Field(object): spaces : None, int or tuple of int spaces : None, int or tuple of int The summation is only carried out over the sub-domains in this The summation is only carried out over the sub-domains in this tuple. If None, it is carried out over all sub-domains. tuple. If None, it is carried out over all sub-domains. Default: None. Returns Returns ------- ------- ... @@ -547,7 +546,7 @@ class Field(object): ... @@ -547,7 +546,7 @@ class Field(object): ---------- ---------- spaces : None, int or tuple of int spaces : None, int or tuple of int The operation is only carried out over the sub-domains in this The operation is only carried out over the sub-domains in this tuple. If None, it is carried out over all sub-domains. Default: None. tuple. If None, it is carried out over all sub-domains. Returns Returns ------- ------- ... ...
 ... @@ -15,11 +15,14 @@ ... @@ -15,11 +15,14 @@ # # # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik. # NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik. import numpy as np from .. import utilities from .. import utilities from ..domain_tuple import DomainTuple from ..domain_tuple import DomainTuple from ..field import Field from ..field import Field from ..linearization import Linearization from ..linearization import Linearization from ..sugar import makeOp, makeDomain from ..sugar import makeDomain, makeOp from .linear_operator import LinearOperator from .operator import Operator from .operator import Operator from .sampling_enabler import SamplingEnabler from .sampling_enabler import SamplingEnabler from .sandwich_operator import SandwichOperator from .sandwich_operator import SandwichOperator ... @@ -27,11 +30,28 @@ from .simple_linear_operators import VdotOperator ... @@ -27,11 +30,28 @@ from .simple_linear_operators import VdotOperator class EnergyOperator(Operator): class EnergyOperator(Operator): """Operator which has a scalar domain as target domain.""" """Operator which has a scalar domain as target domain. It is intended as an objective function for field inference. Examples -------- - Information Hamiltonian, i.e. negative-log-probabilities. - Gibbs free energy, i.e. an averaged Hamiltonian, aka Kullbach-Leibler divergence. """ _target = DomainTuple.scalar_domain() _target = DomainTuple.scalar_domain() class SquaredNormOperator(EnergyOperator): class SquaredNormOperator(EnergyOperator): """Computes the L2-norm of the output of an operator. Parameters ---------- domain : Domain, DomainTuple or tuple of Domain Target domain of the operator in which the L2-norm shall be computed. """ def __init__(self, domain): def __init__(self, domain): self._domain = domain self._domain = domain ... @@ -45,12 +65,24 @@ class SquaredNormOperator(EnergyOperator): ... @@ -45,12 +65,24 @@ class SquaredNormOperator(EnergyOperator): class QuadraticFormOperator(EnergyOperator): class QuadraticFormOperator(EnergyOperator): def __init__(self, op): """Computes the L2-norm of a Field or MultiField with respect to a specific metric endo. .. math :: E(f) = \\frac12 f^\\dagger \\text{endo}(f) Parameters ---------- endo : EndomorphicOperator Kernel of quadratic form. """ def __init__(self, endo): from .endomorphic_operator import EndomorphicOperator from .endomorphic_operator import EndomorphicOperator if not isinstance(op, EndomorphicOperator): if not isinstance(endo, EndomorphicOperator): raise TypeError("op must be an EndomorphicOperator") raise TypeError("op must be an EndomorphicOperator") self._op = op self._op = endo self._domain = op.domain self._domain = endo.domain def apply(self, x): def apply(self, x): self._check_input(x) self._check_input(x) ... @@ -63,7 +95,38 @@ class QuadraticFormOperator(EnergyOperator): ... @@ -63,7 +95,38 @@ class QuadraticFormOperator(EnergyOperator): class GaussianEnergy(EnergyOperator): class GaussianEnergy(EnergyOperator): """Class for energies of fields with Gaussian probability distribution. Represents up to constants in :math:m: .. math :: E(f) = - \\log G(f-m, D) = 0.5 (f-m)^\\dagger D^{-1} (f-m), an information energy for a Gaussian distribution with mean m and covariance D. Parameters ---------- mean : Field Mean of the Gaussian. Default is 0. covariance : LinearOperator Covariance of the Gaussian. Default is the identity operator. domain : Domain, DomainTuple, tuple of Domain or MultiDomain Operator domain. By default it is inferred from mean or covariance if specified Note ---- At least one of the arguments has to be provided. """ def __init__(self, mean=None, covariance=None, domain=None): def __init__(self, mean=None, covariance=None, domain=None): if mean is not None and not isinstance(mean, Field): raise TypeError if covariance is not None and not isinstance(covariance, LinearOperator): raise TypeError self._domain = None self._domain = None if mean is not None: if mean is not None: self._checkEquivalence(mean.domain) self._checkEquivalence(mean.domain) ... @@ -90,7 +153,7 @@ class GaussianEnergy(EnergyOperator): ... @@ -90,7 +153,7 @@ class GaussianEnergy(EnergyOperator): def apply(self, x): def apply(self, x): self._check_input(x) self._check_input(x) residual = x if self._mean is None else x-self._mean residual = x if self._mean is None else x - self._mean res = self._op(residual).real res = self._op(residual).real if not isinstance(x, Linearization) or not x.want_metric: if not isinstance(x, Linearization) or not x.want_metric: return res return res ... @@ -99,7 +162,29 @@ class GaussianEnergy(EnergyOperator): ... @@ -99,7 +162,29 @@ class GaussianEnergy(EnergyOperator): class PoissonianEnergy(EnergyOperator): class PoissonianEnergy(EnergyOperator): """Class for likelihood Hamiltonians of expected count field constrained by Poissonian count data. Represents up to an f-independent term :math:log(d!): .. math :: E(f) = -\\log \\text{Poisson}(d|f) = \\sum f - d^\\dagger \\log(f), where f is a :class:Field in data space with the expectation values for the counts. Parameters ---------- d : Field Data field with counts. Needs to have integer dtype and all field values need to be non-negative. """ def __init__(self, d): def __init__(self, d): if not isinstance(d, Field) or not np.issubdtype(d.dtype, np.integer): raise TypeError if np.any(d.local_data < 0): raise ValueError self._d = d self._d = d self._domain = DomainTuple.make(d.domain) self._domain = DomainTuple.make(d.domain) ... @@ -115,7 +200,13 @@ class PoissonianEnergy(EnergyOperator): ... @@ -115,7 +200,13 @@ class PoissonianEnergy(EnergyOperator): class InverseGammaLikelihood(EnergyOperator): class InverseGammaLikelihood(EnergyOperator): """ FIXME """ def __init__(self, d): def __init__(self, d): if not isinstance(d, Field): raise TypeError self._d = d self._d = d self._domain = DomainTuple.make(d.domain) self._domain = DomainTuple.make(d.domain) ... @@ -131,23 +222,78 @@ class InverseGammaLikelihood(EnergyOperator): ... @@ -131,23 +222,78 @@ class InverseGammaLikelihood(EnergyOperator): class BernoulliEnergy(EnergyOperator): class BernoulliEnergy(EnergyOperator): """Computes likelihood energy of expected event frequency constrained by event data. .. math :: E(f) = -\\log \\text{Bernoulli}(d|f) = -d^\\dagger \\log f - (1-d)^\\dagger \\log(1-f), where f is a field defined on d.domain with the expected frequencies of events. Parameters ---------- d : Field Data field with events (1) or non-events (0). """ def __init__(self, d): def __init__(self, d): print(d.dtype) if not isinstance(d, Field) or not np.issubdtype(d.dtype, np.integer): raise TypeError if not np.all(np.logical_or(d.local_data == 0, d.local_data == 1)): raise ValueError self._d = d self._d = d self._domain = DomainTuple.make(d.domain) self._domain = DomainTuple.make(d.domain) def apply(self, x): def apply(self, x): self._check_input(x) self._check_input(x) v = x.log().vdot(-self._d) - (1.-x).log().vdot(1.-self._d) v = -(x.log().vdot(self._d) + (1. - x).log().vdot(1. - self._d)) if not isinstance(x, Linearization): if not isinstance(x, Linearization): return Field.scalar(v) return Field.scalar(v) if not x.want_metric: if not x.want_metric: return v return v met = makeOp(1./(x.val*(1.-x.val))) met = makeOp(1./(x.val*(1. - x.val))) met = SandwichOperator.make(x.jac, met) met = SandwichOperator.make(x.jac, met) return v.add_metric(met) return v.add_metric(met) class Hamiltonian(EnergyOperator): class Hamiltonian(EnergyOperator): """Computes an information Hamiltonian in its standard form, i.e. with the prior being a Gaussian with unit covariance. Let the likelihood energy be :math:E_{lh}. Then this operator computes: .. math :: H(f) = 0.5 f^\\dagger f + E_{lh}(f): Other field priors can be represented via transformations of a white Gaussian field into a field with the desired prior probability structure. By implementing prior information this way, the field prior is represented by a generative model, from which NIFTy can draw samples and infer a field using the Maximum a Posteriori (MAP) or the Variational Bayes (VB) method. The metric of this operator can be used as covariance for drawing Gaussian samples. Parameters ---------- lh : EnergyOperator The likelihood energy. ic_samp : IterationController Tells an internal :class:SamplingEnabler which convergence criterium to use to draw Gaussian samples. See also -------- Encoding prior knowledge in the structure of the likelihood, Jakob Knollmüller, Torsten A. Ensslin, _ """ def __init__(self, lh, ic_samp=None): def __init__(self, lh, ic_samp=None): self._lh = lh self._lh = lh self._prior = GaussianEnergy(domain=lh.domain) self._prior = GaussianEnergy(domain=lh.domain) ... @@ -156,14 +302,14 @@ class Hamiltonian(EnergyOperator): ... @@ -156,14 +302,14 @@ class Hamiltonian(EnergyOperator): def apply(self, x): def apply(self, x): self._check_input(x) self._check_input(x) if (self._ic_samp is None or not isinstance(x, Linearization) or if (self._ic_samp is None or not isinstance(x, Linearization) not x.want_metric): or not x.want_metric): return self._lh(x)+self._prior(x) return self._lh(x) + self._prior(x) else: else: lhx, prx = self._lh(x), self._prior(x) lhx, prx = self._lh(x), self._prior(x) mtr = SamplingEnabler(lhx.metric, prx.metric.inverse, mtr = SamplingEnabler(lhx.metric, prx.metric.inverse, self._ic_samp, prx.metric.inverse) self._ic_samp, prx.metric.inverse) return (lhx+prx).add_metric(mtr) return (lhx + prx).add_metric(mtr) def __repr__(self): def __repr__(self): subs = 'Likelihood:\n{}'.format(utilities.indent(self._lh.__repr__())) subs = 'Likelihood:\n{}'.format(utilities.indent(self._lh.__repr__())) ... @@ -171,18 +317,62 @@ class Hamiltonian(EnergyOperator): ... @@ -171,18 +317,62 @@ class Hamiltonian(EnergyOperator): return 'Hamiltonian:\n' + utilities.indent(subs) return 'Hamiltonian:\n' + utilities.indent(subs) class SampledKullbachLeiblerDivergence(EnergyOperator): class AveragedEnergy(EnergyOperator): def __init__(self, h, res_samples): """Computes Kullbach-Leibler (KL) divergence or Gibbs free energies. """ # MR FIXME: does h have to be a Hamiltonian? Couldn't it be any energy? A sample-averaged energy, e.g. an Hamiltonian, approximates the relevant part of a KL to be used in Variational Bayes inference if the samples are drawn from the approximating Gaussian: .. math :: \\text{KL}(m) = \\frac1{\\#\{v_i\}} \\sum_{v_i} H(m+v_i), where :math:v_i are the residual samples and :math:m is the mean field around which the samples are drawn. Parameters ---------- h: Hamiltonian h: Hamiltonian N: Number of samples to be used The energy to be averaged. res_samples : iterable of Fields Set of residual sample points to be added to mean field for approximate estimation of the KL. Note ---- Having symmetrized residual samples, with both v_i and -v_i being present ensures that the distribution mean is exactly represented. This reduces sampling noise and helps the numerics of the KL minimization process in the variational Bayes inference. See also -------- Let :math:Q(f) = G(f-m,D) be the Gaussian distribution which is used to approximate the accurate posterior :math:P(f|d) with information Hamiltonian :math:H(d,f) = -\\log P(d,f) = -\\log P(f|d) + \\text{const}. In Variational Bayes one needs to optimize the KL divergence between those two distributions for m. It is: :math:KL(Q,P) = \\int Df Q(f) \\log Q(f)/P(f)\\\\ = \\left< \\log Q(f) \\right>_Q(f) - \\left< \\log P(f) \\right>_Q(f)\\\\ = \\text{const} + \\left< H(f) \\right>_G(f-m,D) in essence the information Hamiltonian averaged over a Gaussian distribution centered on the mean m. :class:AveragedEnergy(h) approximates :math:\\left< H(f) \\right>_{G(f-m,D)} if the residuals :math:f-m are drawn from a Gaussian distribution with covariance :math:D. """ """ def __init__(self, h, res_samples): self._h = h self._h = h self._domain = h.domain self._domain = h.domain self._res_samples = tuple(res_samples) self._res_samples = tuple(res_samples) def apply(self, x): def apply(self, x): self._check_input(x) self._check_input(x) mymap = map(lambda v: self._h(x+v), self._res_samples) mymap = map(lambda v: self._h(x + v), self._res_samples) return utilities.my_sum(mymap) * (1./len(self._res_samples)) return utilities.my_sum(mymap)*(1./len(self._res_samples))
 ... @@ -73,7 +73,7 @@ def test_hamiltonian_and_KL(field): ... @@ -73,7 +73,7 @@ def test_hamiltonian_and_KL(field): ift.extra.check_value_gradient_consistency(hamiltonian, field) ift.extra.check_value_gradient_consistency(hamiltonian, field) S = ift.ScalingOperator(1., space) S = ift.ScalingOperator(1., space) samps = [S.draw_sample() for i in range(3)] samps = [S.draw_sample() for i in range(3)] kl = ift.SampledKullbachLeiblerDivergence(hamiltonian, samps) kl = ift.AveragedEnergy(hamiltonian, samps) ift.extra.check_value_gradient_consistency(kl, field) ift.extra.check_value_gradient_consistency(kl, field) ... ...
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!