metric_gaussian_kl_mpi.py 4.99 KB
Newer Older
Martin Reinecke's avatar
Martin Reinecke committed
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
#
# Copyright(C) 2013-2019 Max-Planck-Society
#
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.

from .. import utilities
from ..linearization import Linearization
from ..operators.energy_operators import StandardHamiltonian
from .energy import Energy
from mpi4py import MPI
import numpy as np
from ..field import Field
from ..multi_field import MultiField


_comm = MPI.COMM_WORLD
ntask = _comm.Get_size()
rank = _comm.Get_rank()
master = (rank == 0)


def _shareRange(nwork, nshares, myshare):
    nbase = nwork//nshares
    additional = nwork % nshares
    lo = myshare*nbase + min(myshare, additional)
    hi = lo + nbase + int(myshare < additional)
    return lo, hi


def np_allreduce_sum(arr):
    res = np.empty_like(arr)
    _comm.Allreduce(arr, res, MPI.SUM)
    return res


def allreduce_sum_field(fld):
    if isinstance(fld, Field):
        return Field.from_local_data(fld.domain,
                                     np_allreduce_sum(fld.local_data))
    res = tuple(
        Field.from_local_data(f.domain, np_allreduce_sum(f.local_data))
        for f in fld.values())
    return MultiField(fld.domain, res)


class MetricGaussianKL_MPI(Energy):
    def __init__(self, mean, hamiltonian, n_samples, constants=[],
                 point_estimates=[], mirror_samples=False,
                 _samples=None):
        super(MetricGaussianKL_MPI, self).__init__(mean)

        if not isinstance(hamiltonian, StandardHamiltonian):
            raise TypeError
        if hamiltonian.domain is not mean.domain:
            raise ValueError
        if not isinstance(n_samples, int):
            raise TypeError
        self._constants = list(constants)
        self._point_estimates = list(point_estimates)
        if not isinstance(mirror_samples, bool):
            raise TypeError

        self._hamiltonian = hamiltonian

        if _samples is None:
            lo, hi = _shareRange(n_samples, ntask, rank)
            met = hamiltonian(Linearization.make_partial_var(
                mean, point_estimates, True)).metric
            _samples = []
            for i in range(lo, hi):
                np.random.seed(i)
                _samples.append(met.draw_sample(from_inverse=True))
            if mirror_samples:
                _samples += [-s for s in _samples]
                n_samples *= 2
            _samples = tuple(_samples)
        self._samples = _samples
        self._n_samples = n_samples
        self._lin = Linearization.make_partial_var(mean, constants)
        v, g = None, None
        if len(self._samples) == 0:  # hack if there are too many MPI tasks
            tmp = self._hamiltonian(self._lin)
Martin Reinecke's avatar
Martin Reinecke committed
95
            v = 0. * tmp.val.local_data
Martin Reinecke's avatar
Martin Reinecke committed
96
97
98
99
100
            g = 0. * tmp.gradient
        else:
            for s in self._samples:
                tmp = self._hamiltonian(self._lin+s)
                if v is None:
Martin Reinecke's avatar
Martin Reinecke committed
101
                    v = tmp.val.local_data.copy()
Martin Reinecke's avatar
Martin Reinecke committed
102
103
                    g = tmp.gradient
                else:
Martin Reinecke's avatar
Martin Reinecke committed
104
                    v += tmp.val.local_data
Martin Reinecke's avatar
Martin Reinecke committed
105
                    g = g + tmp.gradient
Martin Reinecke's avatar
Martin Reinecke committed
106
        self._val = np_allreduce_sum(v)[()] / self._n_samples
Martin Reinecke's avatar
Martin Reinecke committed
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
        self._grad = allreduce_sum_field(g) / self._n_samples
        self._metric = None

    def at(self, position):
        return MetricGaussianKL_MPI(
            position, self._hamiltonian, self._n_samples, self._constants,
            self._point_estimates, _samples=self._samples)

    @property
    def value(self):
        return self._val

    @property
    def gradient(self):
        return self._grad

    def _get_metric(self):
        lin = self._lin.with_want_metric()
        if self._metric is None:
            if len(self._samples) == 0:  # hack if there are too many MPI tasks
                self._metric = self._hamiltonian(lin).metric.scale(0.)
            else:
                mymap = map(lambda v: self._hamiltonian(lin+v).metric,
                            self._samples)
                self._metric = utilities.my_sum(mymap)
                self._metric = self._metric.scale(1./self._n_samples)

    def apply_metric(self, x):
        self._get_metric()
        return allreduce_sum_field(self._metric(x))

    @property
    def metric(self):
        if ntask > 1:
            raise ValueError("not supported when MPI is active")
        return self._metric

    @property
    def samples(self):
        res = _comm.allgather(self._samples)
        res = [item for sublist in res for item in sublist]
        return res