metric_gaussian_kl.py 10.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
13
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
14
# Copyright(C) 2013-2020 Max-Planck-Society
15
16
17
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.

18
import numpy as np
Philipp Arras's avatar
Philipp Arras committed
19
20

from .. import random, utilities
21
from ..field import Field
Philipp Arras's avatar
Philipp Arras committed
22
from ..linearization import Linearization
23
from ..multi_field import MultiField
Philipp Arras's avatar
Philipp Arras committed
24
25
26
27
28
from ..operators.endomorphic_operator import EndomorphicOperator
from ..operators.energy_operators import StandardHamiltonian
from ..probing import approximation2endo
from ..sugar import full, makeOp
from .energy import Energy
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71


def _shareRange(nwork, nshares, myshare):
    nbase = nwork//nshares
    additional = nwork % nshares
    lo = myshare*nbase + min(myshare, additional)
    hi = lo + nbase + int(myshare < additional)
    return lo, hi


def np_allreduce_sum(comm, arr):
    if comm is None:
        return arr
    from mpi4py import MPI
    arr = np.array(arr)
    res = np.empty_like(arr)
    comm.Allreduce(arr, res, MPI.SUM)
    return res


def allreduce_sum_field(comm, fld):
    if comm is None:
        return fld
    if isinstance(fld, Field):
        return Field(fld.domain, np_allreduce_sum(fld.val))
    res = tuple(
        Field(f.domain, np_allreduce_sum(comm, f.val))
        for f in fld.values())
    return MultiField(fld.domain, res)


class KLMetric(EndomorphicOperator):
    def __init__(self, KL):
        self._KL = KL
        self._capability = self.TIMES | self.ADJOINT_TIMES
        self._domain = KL.position.domain

    def apply(self, x, mode):
        self._check_input(x, mode)
        return self._KL.apply_metric(x)

    def draw_sample(self, from_inverse=False, dtype=np.float64):
        return self._KL.metric_sample(from_inverse, dtype)
72
73
74


class MetricGaussianKL(Energy):
Martin Reinecke's avatar
Martin Reinecke committed
75
76
77
    """Provides the sampled Kullback-Leibler divergence between a distribution
    and a Metric Gaussian.

Philipp Arras's avatar
Docs    
Philipp Arras committed
78
79
80
81
    A Metric Gaussian is used to approximate another probability distribution.
    It is a Gaussian distribution that uses the Fisher information metric of
    the other distribution at the location of its mean to approximate the
    variance. In order to infer the mean, a stochastic estimate of the
Martin Reinecke's avatar
Martin Reinecke committed
82
    Kullback-Leibler divergence is minimized. This estimate is obtained by
Philipp Arras's avatar
Docs    
Philipp Arras committed
83
84
85
86
87
    sampling the Metric Gaussian at the current mean. During minimization
    these samples are kept constant; only the mean is updated. Due to the
    typically nonlinear structure of the true distribution these samples have
    to be updated eventually by intantiating `MetricGaussianKL` again. For the
    true probability distribution the standard parametrization is assumed.
88
    The samples of this class can be distributed among MPI tasks.
89
90
91
92

    Parameters
    ----------
    mean : Field
Philipp Arras's avatar
Docs    
Philipp Arras committed
93
        Mean of the Gaussian probability distribution.
Jakob Knollmueller's avatar
Jakob Knollmueller committed
94
    hamiltonian : StandardHamiltonian
Philipp Arras's avatar
Docs    
Philipp Arras committed
95
        Hamiltonian of the approximated probability distribution.
96
    n_samples : integer
Philipp Arras's avatar
Docs    
Philipp Arras committed
97
        Number of samples used to stochastically estimate the KL.
98
    constants : list
Philipp Arras's avatar
Docs    
Philipp Arras committed
99
100
        List of parameter keys that are kept constant during optimization.
        Default is no constants.
101
    point_estimates : list
Philipp Arras's avatar
Docs    
Philipp Arras committed
102
103
104
        List of parameter keys for which no samples are drawn, but that are
        (possibly) optimized for, corresponding to point estimates of these.
        Default is to draw samples for the complete domain.
105
106
    mirror_samples : boolean
        Whether the negative of the drawn samples are also used,
Philipp Arras's avatar
Docs    
Philipp Arras committed
107
        as they are equally legitimate samples. If true, the number of used
108
        samples doubles. Mirroring samples stabilizes the KL estimate as
Philipp Arras's avatar
Docs    
Philipp Arras committed
109
        extreme sample variation is counterbalanced. Default is False.
110
111
112
    napprox : int
        Number of samples for computing preconditioner for sampling. No
        preconditioning is done by default.
113
114
115
116
117
    use_mpi : bool
        whether MPI should be used.
        If MPI is enabled, samples will be distributed as evenly as possible
        across MPI.COMM_WORLD. If `mirror_samples` is set, then a sample and
        its mirror image will always reside on the same task.
Philipp Arras's avatar
Philipp Arras committed
118
119
120
121
122
123
124
    lh_sampling_dtype : type
        Determines which dtype in data space shall be used for drawing samples
        from the metric. If the inference is based on complex data,
        lh_sampling_dtype shall be set to complex accordingly. The reason for
        the presence of this parameter is that metric of the likelihood energy
        is just an `Operator` which does not know anything about the dtype of
        the fields on which it acts. Default is float64.
Philipp Arras's avatar
Philipp Arras committed
125
126
    _samples : None
        Only a parameter for internal uses. Typically not to be set by users.
127

Philipp Arras's avatar
Docs    
Philipp Arras committed
128
129
130
131
132
133
134
135
    Note
    ----
    The two lists `constants` and `point_estimates` are independent from each
    other. It is possible to sample along domains which are kept constant
    during minimization and vice versa.

    See also
    --------
136
137
    `Metric Gaussian Variational Inference`, Jakob Knollmüller,
    Torsten A. Enßlin, `<https://arxiv.org/abs/1901.11033>`_
138
139
    """

Martin Reinecke's avatar
typo    
Martin Reinecke committed
140
    def __init__(self, mean, hamiltonian, n_samples, constants=[],
Philipp Arras's avatar
Philipp Arras committed
141
                 point_estimates=[], mirror_samples=False,
142
143
                 napprox=0, use_mpi=False, _samples=None,
                 lh_sampling_dtype=np.float64):
144
        super(MetricGaussianKL, self).__init__(mean)
Philipp Arras's avatar
Philipp Arras committed
145
146
147

        if not isinstance(hamiltonian, StandardHamiltonian):
            raise TypeError
148
        if hamiltonian.domain is not mean.domain:
Philipp Arras's avatar
Philipp Arras committed
149
150
151
            raise ValueError
        if not isinstance(n_samples, int):
            raise TypeError
152
153
        self._constants = tuple(constants)
        self._point_estimates = tuple(point_estimates)
Philipp Arras's avatar
Philipp Arras committed
154
        if not isinstance(mirror_samples, bool):
155
            raise TypeError
Philipp Arras's avatar
Philipp Arras committed
156

157
        self._hamiltonian = hamiltonian
Philipp Arras's avatar
Philipp Arras committed
158

159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
        self._use_mpi = bool(use_mpi)
        if self._use_mpi:
            from mpi4py import MPI
            self._comm = MPI.COMM_WORLD
            self._ntask = self._comm.Get_size()
            self._rank = self._comm.Get_rank()
            self._master = (self._rank == 0)
        else:
            self._comm, self._ntask, self._rank, self._master = None, 1, 0, True

        self._n_samples = int(n_samples)
        self._lo, self._hi = _shareRange(self._n_samples, self._ntask, self._rank)
        self._mirror_samples = bool(mirror_samples)
        self._n_eff_samples = self._n_samples
        if self._mirror_samples:
            self._n_eff_samples *= 2

176
177
        if _samples is None:
            met = hamiltonian(Linearization.make_partial_var(
178
                mean, self._point_estimates, True)).metric
Philipp Arras's avatar
Fixup    
Philipp Arras committed
179
            if napprox >= 1:
180
                met._approximation = makeOp(approximation2endo(met, napprox))
181
182
183
184
185
186
187
188
189
190
191
            _samples = []
            sseq = random.spawn_sseq(self._n_samples)
            for i in range(self._lo, self._hi):
                random.push_sseq(sseq[i])
                _samples.append(met.draw_sample(from_inverse=True,
                                                dtype=lh_sampling_dtype))
                random.pop_sseq()
            _samples = tuple(_samples)
        else:
            if len(_samples) != self._n_samples:
                raise ValueError("# of samples mismatch")
192
        self._samples = _samples
193
        self._lin = Linearization.make_partial_var(mean, self._constants)
194
        v, g = None, None
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
        if len(self._samples) == 0:  # hack if there are too many MPI tasks
            tmp = self._hamiltonian(self._lin)
            v = 0. * tmp.val.val
            g = 0. * tmp.gradient
        else:
            for s in self._samples:
                tmp = self._hamiltonian(self._lin+s)
                if self._mirror_samples:
                    tmp = tmp + self._hamiltonian(self._lin-s)
                if v is None:
                    v = tmp.val.val_rw()
                    g = tmp.gradient
                else:
                    v += tmp.val.val
                    g = g + tmp.gradient
        self._val = np_allreduce_sum(self._comm, v)[()] / self._n_eff_samples
        self._grad = allreduce_sum_field(self._comm, g) / self._n_eff_samples
212
        self._metric = None
213
        self._sampdt = lh_sampling_dtype
214
215

    def at(self, position):
216
217
218
219
        return MetricGaussianKL(
            position, self._hamiltonian, self._n_samples, self._constants,
            self._point_estimates, self._mirror_samples, use_mpi=self._use_mpi,
            _samples=self._samples, lh_sampling_dtype=self._sampdt)
220
221
222
223
224
225
226
227
228
229

    @property
    def value(self):
        return self._val

    @property
    def gradient(self):
        return self._grad

    def _get_metric(self):
230
        lin = self._lin.with_want_metric()
231
        if self._metric is None:
232
233
234
235
236
237
238
            if len(self._samples) == 0:  # hack if there are too many MPI tasks
                self._metric = self._hamiltonian(lin).metric.scale(0.)
            else:
                mymap = map(lambda v: self._hamiltonian(lin+v).metric,
                            self._samples)
                self.unscaled_metric = utilities.my_sum(mymap)
                self._metric = self.unscaled_metric.scale(1./self._n_eff_samples)
239
240
241

    def apply_metric(self, x):
        self._get_metric()
242
        return allreduce_sum_field(self._comm, self._metric(x))
243
244
245

    @property
    def metric(self):
246
        return KLMetric(self)
247
248
249

    @property
    def samples(self):
250
        if self._comm is not None:
Philipp Arras's avatar
Fixups    
Philipp Arras committed
251
            res = self._comm.allgather(self._samples)
252
253
254
255
256
257
258
259
260
261
262
263
            res = [item for sublist in res for item in sublist]
        else:
            res = self._samples
        if self._mirror_samples:
            res = res + tuple(-item for item in res)
        return res

    def unscaled_metric_sample(self, from_inverse=False, dtype=np.float64):
        if from_inverse:
            raise NotImplementedError()
        lin = self._lin.with_want_metric()
        samp = full(self._hamiltonian.domain, 0.)
Philipp Arras's avatar
Fixups    
Philipp Arras committed
264
        sseq = random.spawn_sseq(self._n_samples)
265
266
267
268
269
270
271
        for i, v in enumerate(self._samples):
            random.push_sseq(sseq[self._lo+i])
            samp = samp + self._hamiltonian(lin+v).metric.draw_sample(from_inverse=False, dtype=dtype)
            if self._mirror_samples:
                samp = samp + self._hamiltonian(lin-v).metric.draw_sample(from_inverse=False, dtype=dtype)
            random.pop_sseq()
        return allreduce_sum_field(self._comm, samp)
272

273
274
    def metric_sample(self, from_inverse=False, dtype=np.float64):
        return self.unscaled_metric_sample(from_inverse, dtype)/self._n_eff_samples