descent_minimizers.py 14.8 KB
Newer Older
1
2
3
4
5
6
7
8
9
10
11
12
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program.  If not, see <http://www.gnu.org/licenses/>.
Theo Steininger's avatar
Theo Steininger committed
13
#
14
# Copyright(C) 2013-2019 Max-Planck-Society
Theo Steininger's avatar
Theo Steininger committed
15
#
16
# NIFTy is being developed at the Max-Planck-Institut fuer Astrophysik.
17

18
import numpy as np
Philipp Arras's avatar
Philipp Arras committed
19

20
from ..logger import logger
Martin Reinecke's avatar
Martin Reinecke committed
21
from .line_search import LineSearch
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from .minimizer import Minimizer


class DescentMinimizer(Minimizer):
    """ A base class used by gradient methods to find a local minimum.

    Descent minimization methods are used to find a local minimum of a scalar
    function by following a descent direction. This class implements the
    minimization procedure once a descent direction is known. The descent
    direction has to be implemented separately.

    Parameters
    ----------
    controller : IterationController
        Object that decides when to terminate the minimization.
    line_searcher : callable *optional*
        Function which infers the step size in the descent direction
Martin Reinecke's avatar
Martin Reinecke committed
39
        (default : LineSearch()).
40
41
    """

Martin Reinecke's avatar
Martin Reinecke committed
42
    def __init__(self, controller, line_searcher=LineSearch()):
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
        self._controller = controller
        self.line_searcher = line_searcher

    def __call__(self, energy):
        """ Performs the minimization of the provided Energy functional.

        Parameters
        ----------
        energy : Energy
           Energy object which provides value, gradient and metric at a
           specific position in parameter space.

        Returns
        -------
        Energy
            Latest `energy` of the minimization.
        int
            Can be controller.CONVERGED or controller.ERROR

        Notes
        -----
        The minimization is stopped if
            * the controller returns controller.CONVERGED or controller.ERROR,
            * a perfectly flat point is reached,
            * according to the line-search the minimum is found,
        """
        f_k_minus_1 = None
        controller = self._controller
        status = controller.start(energy)
        if status != controller.CONTINUE:
            return energy, status

        while True:
            # check if position is at a flat point
            if energy.gradient_norm == 0:
                return energy, controller.CONVERGED

            # compute a step length that reduces energy.value sufficiently
            new_energy, success = self.line_searcher.perform_line_search(
                energy=energy, pk=self.get_descent_direction(energy),
                f_k_minus_1=f_k_minus_1)
            if not success:
                self.reset()

            f_k_minus_1 = energy.value

            if new_energy.value > energy.value:
                logger.error("Error: Energy has increased")
                return energy, controller.ERROR

            if new_energy.value == energy.value:
                logger.warning(
                    "Warning: Energy has not changed. Assuming convergence...")
                return new_energy, controller.CONVERGED

            energy = new_energy
            status = self._controller.check(energy)
            if status != controller.CONTINUE:
                return energy, status

    def reset(self):
        pass

    def get_descent_direction(self, energy):
        """ Calculates the next descent direction.

        Parameters
        ----------
        energy : Energy
            An instance of the Energy class which shall be minimized. The
            position of `energy` is used as the starting point of minimization.

        Returns
        -------
        Field
           The descent direction.
        """
        raise NotImplementedError


class SteepestDescent(DescentMinimizer):
    """ Implementation of the steepest descent minimization scheme.

    Also known as 'gradient descent'. This algorithm simply follows the
    functional's gradient for minimization.
    """

    def get_descent_direction(self, energy):
        return -energy.gradient


Martin Reinecke's avatar
Martin Reinecke committed
134
135
136
137
138
139
140
141
142
class RelaxedNewton(DescentMinimizer):
    """ Calculates the descent direction according to a Newton scheme.

    The descent direction is determined by weighting the gradient at the
    current parameter position with the inverse local metric.
    """

    def __init__(self, controller, line_searcher=None):
        if line_searcher is None:
Martin Reinecke's avatar
Martin Reinecke committed
143
            line_searcher = LineSearch(preferred_initial_step_size=1.)
Martin Reinecke's avatar
Martin Reinecke committed
144
145
146
147
148
149
150
        super(RelaxedNewton, self).__init__(controller=controller,
                                            line_searcher=line_searcher)

    def get_descent_direction(self, energy):
        return -energy.metric.inverse_times(energy.gradient)


151
152
153
154
155
156
157
158
class NewtonCG(DescentMinimizer):
    """ Calculates the descent direction according to a Newton-CG scheme.

    Algorithm derived from SciPy sources.
    """

    def __init__(self, controller, line_searcher=None):
        if line_searcher is None:
Martin Reinecke's avatar
Martin Reinecke committed
159
            line_searcher = LineSearch(preferred_initial_step_size=1.)
160
161
162
163
164
165
166
        super(NewtonCG, self).__init__(controller=controller,
                                       line_searcher=line_searcher)

    def get_descent_direction(self, energy):
        float64eps = np.finfo(np.float64).eps
        grad = energy.gradient
        maggrad = abs(grad).sum()
Martin Reinecke's avatar
Martin Reinecke committed
167
        termcond = np.min([0.5, np.sqrt(maggrad)]) * maggrad
168
169
170
171
172
        xsupi = energy.position*0
        ri = grad
        psupi = -ri
        dri0 = ri.vdot(ri)

Martin Reinecke's avatar
Martin Reinecke committed
173
        i = 0
174
175
176
        while True:
            if abs(ri).sum() <= termcond:
                return xsupi
Martin Reinecke's avatar
Martin Reinecke committed
177
            Ap = energy.apply_metric(psupi)
178
179
            # check curvature
            curv = psupi.vdot(Ap)
Martin Reinecke's avatar
Martin Reinecke committed
180
            if 0 <= curv <= 3*float64eps:
181
182
                return xsupi
            elif curv < 0:
Martin Reinecke's avatar
Martin Reinecke committed
183
                return xsupi if i > 0 else (dri0/curv) * grad
184
185
186
187
188
189
190
191
192
193
194
195
196
            alphai = dri0/curv
            xsupi = xsupi + alphai*psupi
            ri = ri + alphai*Ap
            dri1 = ri.vdot(ri)
            psupi = (dri1/dri0)*psupi - ri
            i += 1
            dri0 = dri1  # update numpy.dot(ri,ri) for next time.

        # curvature keeps increasing, bail out
        raise ValueError("Warning: CG iterations didn't converge. "
                         "The Hessian is not positive definite.")


197
class L_BFGS(DescentMinimizer):
Martin Reinecke's avatar
Martin Reinecke committed
198
    def __init__(self, controller, line_searcher=LineSearch(),
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
                 max_history_length=5):
        super(L_BFGS, self).__init__(controller=controller,
                                     line_searcher=line_searcher)
        self.max_history_length = max_history_length

    def __call__(self, energy):
        self.reset()
        return super(L_BFGS, self).__call__(energy)

    def reset(self):
        self._k = 0
        self._s = [None]*self.max_history_length
        self._y = [None]*self.max_history_length

    def get_descent_direction(self, energy):
        x = energy.position
        s = self._s
        y = self._y
        k = self._k
        maxhist = self.max_history_length
        gradient = energy.gradient

        nhist = min(k, maxhist)
        alpha = [None]*maxhist
        p = -gradient
        if k > 0:
            idx = (k-1) % maxhist
            s[idx] = x-self._lastx
            y[idx] = gradient-self._lastgrad
        if nhist > 0:
            for i in range(k-1, k-nhist-1, -1):
                idx = i % maxhist
                alpha[idx] = s[idx].vdot(p)/s[idx].vdot(y[idx])
                p = p - alpha[idx]*y[idx]
            idx = (k-1) % maxhist
            fact = s[idx].vdot(y[idx]) / y[idx].vdot(y[idx])
            if fact <= 0.:
                logger.error("L-BFGS curvature not positive definite!")
            p = p*fact
            for i in range(k-nhist, k):
                idx = i % maxhist
                beta = y[idx].vdot(p) / s[idx].vdot(y[idx])
                p = p + (alpha[idx]-beta)*s[idx]
        self._lastx = x
        self._lastgrad = gradient
        self._k += 1
        return p
Theo Steininger's avatar
Theo Steininger committed
246
247


248
class VL_BFGS(DescentMinimizer):
Martin Reinecke's avatar
Martin Reinecke committed
249
250
251
252
253
254
255
256
257
258
259
260
261
262
    """Implementation of the Vector-free L-BFGS minimization scheme.

    Find the descent direction by using the inverse Hessian.
    Instead of storing the whole matrix, it stores only the last few
    updates, which are used to do operations requiring the inverse
    Hessian product. The updates are represented in a new basis to optimize
    the algorithm.

    References
    ----------
    W. Chen, Z. Wang, J. Zhou, "Large-scale L-BFGS using MapReduce", 2014,
    Microsoft
    """

Martin Reinecke's avatar
Martin Reinecke committed
263
    def __init__(self, controller, line_searcher=LineSearch(),
Martin Reinecke's avatar
Martin Reinecke committed
264
                 max_history_length=5):
Martin Reinecke's avatar
cleanup    
Martin Reinecke committed
265
266
        super(VL_BFGS, self).__init__(controller=controller,
                                      line_searcher=line_searcher)
267
268
        self.max_history_length = max_history_length

269
    def __call__(self, energy):
270
        self._information_store = None
271
        return super(VL_BFGS, self).__call__(energy)
272

Martin Reinecke's avatar
stage 1    
Martin Reinecke committed
273
274
275
    def reset(self):
        self._information_store = None

276
    def get_descent_direction(self, energy):
277
278
        x = energy.position
        gradient = energy.gradient
279
280
281
282
        # initialize the information store if it doesn't already exist
        try:
            self._information_store.add_new_point(x, gradient)
        except AttributeError:
Martin Reinecke's avatar
Martin Reinecke committed
283
284
            self._information_store = _InformationStore(
                self.max_history_length, x0=x, gradient=gradient)
285
286
287
288

        b = self._information_store.b
        delta = self._information_store.delta

289
        descent_direction = delta[0] * b[0]
Martin Reinecke's avatar
Martin Reinecke committed
290
        for i in range(1, len(delta)):
291
            descent_direction = descent_direction + delta[i]*b[i]
292

293
        return descent_direction
Theo Steininger's avatar
Theo Steininger committed
294
295


Martin Reinecke's avatar
Martin Reinecke committed
296
class _InformationStore(object):
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
297
    """Class for storing a list of past updates.
298

Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
299
300
    Parameters
    ----------
Martin Reinecke's avatar
Martin Reinecke committed
301
    max_history_length : int
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
302
303
304
305
306
        Maximum number of stored past updates.
    x0 : Field
        Initial position in variable space.
    gradient : Field
        Gradient at position x0.
307

Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
308
309
    Attributes
    ----------
Martin Reinecke's avatar
Martin Reinecke committed
310
    max_history_length : int
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
311
312
        Maximum number of stored past updates.
    s : List
Martin Reinecke's avatar
Martin Reinecke committed
313
        Circular buffer of past position differences, which are Fields.
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
314
    y : List
Martin Reinecke's avatar
Martin Reinecke committed
315
        Circular buffer of past gradient differences, which are Fields.
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
316
    last_x : Field
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
317
        Latest position in variable space.
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
318
    last_gradient : Field
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
319
        Gradient at latest position.
Martin Reinecke's avatar
Martin Reinecke committed
320
    k : int
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
321
        Number of updates that have taken place
Martin Reinecke's avatar
Martin Reinecke committed
322
    ss : numpy.ndarray
Martin Reinecke's avatar
Martin Reinecke committed
323
        2D circular buffer of scalar products between different elements of s.
Martin Reinecke's avatar
Martin Reinecke committed
324
    sy : numpy.ndarray
Martin Reinecke's avatar
Martin Reinecke committed
325
        2D circular buffer of scalar products between elements of s and y.
Martin Reinecke's avatar
Martin Reinecke committed
326
    yy : numpy.ndarray
Martin Reinecke's avatar
Martin Reinecke committed
327
        2D circular buffer of scalar products between different elements of y.
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
328
    """
Philipp Arras's avatar
Philipp Arras committed
329

330
331
    def __init__(self, max_history_length, x0, gradient):
        self.max_history_length = max_history_length
332
333
        self.s = [None]*max_history_length
        self.y = [None]*max_history_length
334
335
        self.last_x = x0
        self.last_gradient = gradient
Theo Steininger's avatar
Theo Steininger committed
336
        self.k = 0
337

Martin Reinecke's avatar
Martin Reinecke committed
338
        mmax = max_history_length
Martin Reinecke's avatar
Martin Reinecke committed
339
340
341
        self.ss = np.empty((mmax, mmax), dtype=np.float64)
        self.sy = np.empty((mmax, mmax), dtype=np.float64)
        self.yy = np.empty((mmax, mmax), dtype=np.float64)
342
343
344

    @property
    def history_length(self):
Martin Reinecke's avatar
Martin Reinecke committed
345
        """Returns the number of currently stored updates."""
346
347
348
349
        return min(self.k, self.max_history_length)

    @property
    def b(self):
350
351
        """Combines s, y and gradient to form the new base vectors b.

Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
352
353
        Returns
        -------
Martin Reinecke's avatar
Martin Reinecke committed
354
        List
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
355
356
            List of new basis vectors.
        """
357
358
        result = []
        m = self.history_length
Martin Reinecke's avatar
Martin Reinecke committed
359
        mmax = self.max_history_length
360

Martin Reinecke's avatar
Martin Reinecke committed
361
        for i in range(m):
Martin Reinecke's avatar
Martin Reinecke committed
362
            result.append(self.s[(self.k-m+i) % mmax])
363

Martin Reinecke's avatar
Martin Reinecke committed
364
        for i in range(m):
Martin Reinecke's avatar
Martin Reinecke committed
365
            result.append(self.y[(self.k-m+i) % mmax])
366
367
368
369
370
371
372

        result.append(self.last_gradient)

        return result

    @property
    def b_dot_b(self):
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
373
        """Generates the (2m+1) * (2m+1) scalar matrix.
374

Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
375
        The i,j-th element of the matrix is a scalar product between the i-th
376
377
        and j-th base vector.

Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
378
379
        Returns
        -------
Martin Reinecke's avatar
Martin Reinecke committed
380
        numpy.ndarray
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
381
382
            Scalar matrix.
        """
383
        m = self.history_length
Martin Reinecke's avatar
Martin Reinecke committed
384
        mmax = self.max_history_length
385
386
387
        k = self.k
        result = np.empty((2*m+1, 2*m+1), dtype=np.float)

Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
388
        # update the stores
Martin Reinecke's avatar
Martin Reinecke committed
389
        k1 = (k-1) % mmax
Martin Reinecke's avatar
Martin Reinecke committed
390
        for i in range(m):
Martin Reinecke's avatar
Martin Reinecke committed
391
            kmi = (k-m+i) % mmax
Martin Reinecke's avatar
Martin Reinecke committed
392
393
394
            self.ss[kmi, k1] = self.ss[k1, kmi] = self.s[kmi].vdot(self.s[k1])
            self.yy[kmi, k1] = self.yy[k1, kmi] = self.y[kmi].vdot(self.y[k1])
            self.sy[kmi, k1] = self.s[kmi].vdot(self.y[k1])
Martin Reinecke's avatar
Martin Reinecke committed
395
        for j in range(m-1):
Martin Reinecke's avatar
Martin Reinecke committed
396
397
            kmj = (k-m+j) % mmax
            self.sy[k1, kmj] = self.s[k1].vdot(self.y[kmj])
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
398

Martin Reinecke's avatar
Martin Reinecke committed
399
        for i in range(m):
Martin Reinecke's avatar
Martin Reinecke committed
400
            kmi = (k-m+i) % mmax
Martin Reinecke's avatar
Martin Reinecke committed
401
            for j in range(m):
Martin Reinecke's avatar
Martin Reinecke committed
402
                kmj = (k-m+j) % mmax
Martin Reinecke's avatar
Martin Reinecke committed
403
404
405
                result[i, j] = self.ss[kmi, kmj]
                result[i, m+j] = result[m+j, i] = self.sy[kmi, kmj]
                result[m+i, m+j] = self.yy[kmi, kmj]
406

407
            sgrad_i = self.s[kmi].vdot(self.last_gradient)
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
408
            result[2*m, i] = result[i, 2*m] = sgrad_i
409

Martin Reinecke's avatar
fix    
Martin Reinecke committed
410
            ygrad_i = self.y[kmi].vdot(self.last_gradient)
Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
411
            result[2*m, m+i] = result[m+i, 2*m] = ygrad_i
412

Martin Reinecke's avatar
tweaks    
Martin Reinecke committed
413
        result[2*m, 2*m] = self.last_gradient.norm()
414
        return result
Theo Steininger's avatar
Theo Steininger committed
415
416

    @property
417
    def delta(self):
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
418
        """Calculates the new scalar coefficients (deltas).
419

Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
420
421
        Returns
        -------
Martin Reinecke's avatar
Martin Reinecke committed
422
        List
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
423
424
            List of the new scalar coefficients (deltas).
        """
425
426
427
428
429
430
431
432
        m = self.history_length
        b_dot_b = self.b_dot_b

        delta = np.zeros(2*m+1, dtype=np.float)
        delta[2*m] = -1

        alpha = np.empty(m, dtype=np.float)

Martin Reinecke's avatar
Martin Reinecke committed
433
434
        for j in range(m-1, -1, -1):
            delta_b_b = sum([delta[l] * b_dot_b[l, j] for l in range(2*m+1)])
435
436
437
            alpha[j] = delta_b_b/b_dot_b[j, m+j]
            delta[m+j] -= alpha[j]

Martin Reinecke's avatar
Martin Reinecke committed
438
        for i in range(2*m+1):
439
440
            delta[i] *= b_dot_b[m-1, 2*m-1]/b_dot_b[2*m-1, 2*m-1]

Martin Reinecke's avatar
Martin Reinecke committed
441
        for j in range(m):
Martin Reinecke's avatar
Martin Reinecke committed
442
            delta_b_b = sum([delta[l]*b_dot_b[m+j, l] for l in range(2*m+1)])
443
444
445
446
447
            beta = delta_b_b/b_dot_b[j, m+j]
            delta[j] += (alpha[j] - beta)

        return delta

Theo Steininger's avatar
Theo Steininger committed
448
    def add_new_point(self, x, gradient):
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
449
450
        """Updates the s list and y list.

Martin Reinecke's avatar
Martin Reinecke committed
451
452
        Calculates the new position and gradient differences and enters them
        into the respective list.
Matevz, Sraml (sraml)'s avatar
Matevz, Sraml (sraml) committed
453
        """
Martin Reinecke's avatar
Martin Reinecke committed
454
455
456
        mmax = self.max_history_length
        self.s[self.k % mmax] = x - self.last_x
        self.y[self.k % mmax] = gradient - self.last_gradient
Theo Steininger's avatar
Theo Steininger committed
457

458
459
        self.last_x = x
        self.last_gradient = gradient
Theo Steininger's avatar
Theo Steininger committed
460

Martin Reinecke's avatar
fixes    
Martin Reinecke committed
461
        self.k += 1