nonlinear_power_energy.py 5.42 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2018 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik
# and financially supported by the Studienstiftung des deutschen Volkes.

19
from ..sugar import exp
20
from ..minimization.energy import Energy
Philipp Arras's avatar
Philipp Arras committed
21
from ..operators.smoothness_operator import SmoothnessOperator
22
from ..operators.inversion_enabler import InversionEnabler
23
from ..utilities import memo
24
25
26


def _LinearizedPowerResponse(Instrument, nonlinearity, ht, Distributor, tau,
Martin Reinecke's avatar
Martin Reinecke committed
27
28
29
30
31
                             xi):
    power = exp(0.5*tau)
    position = ht(Distributor(power)*xi)
    linearization = nonlinearity.derivative(position)
    return 0.5*Instrument*linearization*ht*xi*Distributor*power
32
33
34
35
36


class NonlinearPowerEnergy(Energy):
    """The Energy of the power spectrum according to the critical filter.

Martin Reinecke's avatar
Martin Reinecke committed
37
38
39
    It describes the energy of the logarithmic amplitudes of the power spectrum
    of a Gaussian random field under reconstruction uncertainty with smoothness
    and inverse gamma prior. It is used to infer the correlation structure of a
40
41
    correlated signal. The smoothness prior operates on logarithmic scale, i.e.
    it prefers power-law-like power spectra.
42
43
44

    Parameters
    ----------
Martin Reinecke's avatar
Martin Reinecke committed
45
    position : Field
46
        The current position of this energy.
47
48
    xi : Field
        The excitation field.
Martin Reinecke's avatar
Martin Reinecke committed
49
    D : EndomorphicOperator
50
51
52
53
        The curvature of the Gaussian encoding the posterior covariance.
        If not specified, the map is assumed to be no reconstruction.
        default : None
    sigma : float
54
55
56
        The parameter of the smoothness prior. Needs to be positive. A bigger
        number means a stronger smoothness prior.
        default : 0
Martin Reinecke's avatar
Martin Reinecke committed
57
    samples : int
Martin Reinecke's avatar
Martin Reinecke committed
58
59
        Number of samples used for the estimation of the uncertainty
        corrections.
60
61
        default : 3
    """
Martin Reinecke's avatar
Martin Reinecke committed
62
    # MR FIXME: docstring incomplete and outdated
Philipp Arras's avatar
Philipp Arras committed
63
    def __init__(self, position, d, N, xi, D, ht, Instrument, nonlinearity,
Martin Reinecke's avatar
Martin Reinecke committed
64
                 Distributor, sigma=0., samples=3, xi_sample_list=None,
Philipp Arras's avatar
Philipp Arras committed
65
                 inverter=None):
Martin Reinecke's avatar
Martin Reinecke committed
66
        super(NonlinearPowerEnergy, self).__init__(position)
Philipp Arras's avatar
Philipp Arras committed
67
        self.xi = xi
68
69
70
        self.D = D
        self.d = d
        self.N = N
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
71
        self.T = SmoothnessOperator(domain=position.domain[0],
Martin Reinecke's avatar
Martin Reinecke committed
72
                                    strength=sigma, logarithmic=True)
73
        self.ht = ht
74
75
        self.Instrument = Instrument
        self.nonlinearity = nonlinearity
Martin Reinecke's avatar
Martin Reinecke committed
76
        self.Distributor = Distributor
77
        self.sigma = sigma
Philipp Arras's avatar
Philipp Arras committed
78
        if xi_sample_list is None:
Martin Reinecke's avatar
Martin Reinecke committed
79
            if samples is None or samples == 0:
Philipp Arras's avatar
Philipp Arras committed
80
                xi_sample_list = [xi]
81
            else:
82
                xi_sample_list = [D.draw_sample(from_inverse=True) + xi
Philipp Arras's avatar
Philipp Arras committed
83
                                  for _ in range(samples)]
Philipp Arras's avatar
Philipp Arras committed
84
        self.xi_sample_list = xi_sample_list
85
        self.inverter = inverter
86

Martin Reinecke's avatar
Martin Reinecke committed
87
        A = Distributor(exp(.5 * position))
88
89

        self._gradient = None
Philipp Arras's avatar
Philipp Arras committed
90
        for xi_sample in self.xi_sample_list:
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
91
            map_s = ht(A*xi_sample)
92
93
            LinR = _LinearizedPowerResponse(Instrument, nonlinearity, ht,
                                            Distributor, position, xi_sample)
Philipp Arras's avatar
Philipp Arras committed
94

Martin Reinecke's avatar
Martin Reinecke committed
95
            residual = d - Instrument(nonlinearity(map_s))
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
96
            tmp = N.inverse_times(residual)
Martin Reinecke's avatar
Martin Reinecke committed
97
98
            lh = 0.5 * residual.vdot(tmp)
            grad = LinR.adjoint_times(tmp)
99
100
101
102
103
104
105
106

            if self._gradient is None:
                self._value = lh
                self._gradient = grad.copy()
            else:
                self._value += lh
                self._gradient += grad

Philipp Arras's avatar
Philipp Arras committed
107
        self._value *= 1. / len(self.xi_sample_list)
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
108
109
        Tpos = self.T(position)
        self._value += 0.5 * position.vdot(Tpos)
Philipp Arras's avatar
Philipp Arras committed
110
        self._gradient *= -1. / len(self.xi_sample_list)
111
        self._gradient += Tpos
Martin Reinecke's avatar
Martin Reinecke committed
112
        self._gradient.lock()
113
114

    def at(self, position):
Philipp Arras's avatar
Philipp Arras committed
115
        return self.__class__(position, self.d, self.N, self.xi, self.D,
116
                              self.ht, self.Instrument, self.nonlinearity,
Martin Reinecke's avatar
Martin Reinecke committed
117
                              self.Distributor, sigma=self.sigma,
Philipp Arras's avatar
Philipp Arras committed
118
119
                              samples=len(self.xi_sample_list),
                              xi_sample_list=self.xi_sample_list,
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
120
                              inverter=self.inverter)
121

Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
122
123
124
125
126
127
128
    @property
    def value(self):
        return self._value

    @property
    def gradient(self):
        return self._gradient
129
130
131
132

    @property
    @memo
    def curvature(self):
133
134
135
136
137
138
139
140
141
        result = None
        for xi_sample in self.xi_sample_list:
            LinearizedResponse = _LinearizedPowerResponse(
                self.Instrument, self.nonlinearity, self.ht, self.Distributor,
                self.position, xi_sample)
            op = LinearizedResponse.adjoint*self.N.inverse*LinearizedResponse
            result = op if result is None else result + op
        result = result*(1./len(self.xi_sample_list)) + self.T
        return InversionEnabler(result, self.inverter)