Commit 38d3a559 authored by Martin Reinecke's avatar Martin Reinecke

cleanups

parent eadb48d6
Pipeline #21616 passed with stage
in 4 minutes and 22 seconds
......@@ -40,7 +40,6 @@ class ConjugateGradient(Minimizer):
----------
Jorge Nocedal & Stephen Wright, "Numerical Optimization", Second Edition,
2006, Springer-Verlag New York
"""
def __init__(self, controller, preconditioner=None):
......@@ -62,9 +61,7 @@ class ConjugateGradient(Minimizer):
state at last point of the iteration
status : integer
Can be controller.CONVERGED or controller.ERROR
"""
if preconditioner is None:
preconditioner = self._preconditioner
......
......@@ -18,7 +18,6 @@
from __future__ import division
import abc
from .minimizer import Minimizer
from .line_searching import LineSearchStrongWolfe
......@@ -38,7 +37,6 @@ class DescentMinimizer(Minimizer):
line_searcher : callable *optional*
Function which infers the step size in the descent direction
(default : LineSearchStrongWolfe()).
"""
def __init__(self, controller, line_searcher=LineSearchStrongWolfe()):
......@@ -68,9 +66,7 @@ class DescentMinimizer(Minimizer):
* the controller returns controller.CONVERGED or controller.ERROR,
* a perfectly flat point is reached,
* according to the line-search the minimum is found,
"""
f_k_minus_1 = None
controller = self._controller
status = controller.start(energy)
......@@ -82,22 +78,17 @@ class DescentMinimizer(Minimizer):
if energy.gradient_norm == 0:
return energy, controller.CONVERGED
# current position is encoded in energy object
descent_direction = self.get_descent_direction(energy)
# compute the step length, which minimizes energy.value along the
# search direction
# compute a step length that reduces energy.value sufficiently
try:
new_energy = \
self.line_searcher.perform_line_search(
energy=energy,
pk=descent_direction,
f_k_minus_1=f_k_minus_1)
new_energy = self.line_searcher.perform_line_search(
energy=energy, pk=self.get_descent_direction(energy),
f_k_minus_1=f_k_minus_1)
except RuntimeError:
return energy, controller.ERROR
f_k_minus_1 = energy.value
# check if new energy value is bigger than old energy value
if (new_energy.value - energy.value) > 0:
if new_energy.value > energy.value:
return energy, controller.ERROR
energy = new_energy
......
......@@ -55,7 +55,6 @@ class IterationController(with_metaclass(NiftyMeta, type('NewBase',
-------
status : integer status, can be CONVERGED, CONTINUE or ERROR
"""
raise NotImplementedError
@abc.abstractmethod
......@@ -70,5 +69,4 @@ class IterationController(with_metaclass(NiftyMeta, type('NewBase',
-------
status : integer status, can be CONVERGED, CONTINUE or ERROR
"""
raise NotImplementedError
......@@ -36,7 +36,6 @@ class LineSearch(with_metaclass(abc.ABCMeta,
LineEnergy object from which we can extract energy at a specific point.
preferred_initial_step_size : float
Initial guess for the step length.
"""
def __init__(self):
......
......@@ -19,7 +19,6 @@
from __future__ import division
from builtins import range
import numpy as np
from .line_search import LineSearch
from ...energies import LineEnergy
from ... import dobj
......@@ -62,7 +61,6 @@ class LineSearchStrongWolfe(LineSearch):
Maximum number of iterations performed by the line search algorithm.
max_zoom_iterations : integer
Maximum number of iterations performed by the zoom algorithm.
"""
def __init__(self, c1=1e-4, c2=0.9,
......@@ -99,9 +97,7 @@ class LineSearchStrongWolfe(LineSearch):
-------
energy_star : Energy object
The new Energy object on the new position.
"""
le_0 = LineEnergy(0., energy, pk, 0.)
# initialize the zero phis
......@@ -203,7 +199,6 @@ class LineSearchStrongWolfe(LineSearch):
-------
energy_star : Energy object
The new Energy object on the new position.
"""
cubic_delta = 0.2 # cubic interpolant checks
quad_delta = 0.1 # quadratic interpolant checks
......@@ -285,7 +280,6 @@ class LineSearchStrongWolfe(LineSearch):
xmin : float
Position of the approximated minimum.
"""
with np.errstate(divide='raise', over='raise', invalid='raise'):
try:
C = fpa
......
......@@ -22,8 +22,7 @@ from future.utils import with_metaclass
class Minimizer(with_metaclass(NiftyMeta, type('NewBase', (object,), {}))):
""" A base class used by all minimizers.
"""
""" A base class used by all minimizers."""
@abc.abstractmethod
def __call__(self, energy, preconditioner=None):
......@@ -44,5 +43,4 @@ class Minimizer(with_metaclass(NiftyMeta, type('NewBase', (object,), {}))):
Latest `energy` of the minimization.
status : integer
"""
raise NotImplementedError
......@@ -34,7 +34,6 @@ class NonlinearCG(Minimizer):
----------
Jorge Nocedal & Stephen Wright, "Numerical Optimization", Second Edition,
2006, Springer-Verlag New York
"""
def __init__(self, controller, line_searcher=LineSearchStrongWolfe()):
......@@ -56,9 +55,7 @@ class NonlinearCG(Minimizer):
state at last point of the iteration
status : integer
Can be controller.CONVERGED or controller.ERROR
"""
controller = self._controller
status = controller.start(energy)
if status != controller.CONTINUE:
......
......@@ -24,7 +24,7 @@ class RelaxedNewton(DescentMinimizer):
def __init__(self, controller, line_searcher=LineSearchStrongWolfe()):
super(RelaxedNewton, self).__init__(controller=controller,
line_searcher=line_searcher)
# FIXME: this does not look idiomatic
self.line_searcher.preferred_initial_step_size = 1.
def get_descent_direction(self, energy):
......@@ -46,6 +46,5 @@ class RelaxedNewton(DescentMinimizer):
descent_direction : Field
Returns the descent direction with proposed step length. In a
quadratic potential this corresponds to the optimal step.
"""
return -energy.curvature.inverse_times(energy.gradient)
......@@ -26,7 +26,7 @@ class SteepestDescent(DescentMinimizer):
""" Implementation of the steepest descent minimization scheme.
Also known as 'gradient descent'. This algorithm simply follows the
functionals gradient for minization.
functional's gradient for minization.
Parameters
----------
......@@ -37,8 +37,6 @@ class SteepestDescent(DescentMinimizer):
Returns
-------
descent_direction : Field
Returns the descent direction.
the descent direction.
"""
return -energy.gradient
......@@ -20,7 +20,6 @@ from __future__ import division
from builtins import range
from builtins import object
import numpy as np
from .descent_minimizer import DescentMinimizer
from .line_searching import LineSearchStrongWolfe
......@@ -28,10 +27,8 @@ from .line_searching import LineSearchStrongWolfe
class VL_BFGS(DescentMinimizer):
def __init__(self, controller, line_searcher=LineSearchStrongWolfe(),
max_history_length=5):
super(VL_BFGS, self).__init__(controller=controller,
line_searcher=line_searcher)
self.max_history_length = max_history_length
def __call__(self, energy):
......@@ -117,7 +114,6 @@ class InformationStore(object):
2D circular buffer of scalar products between elements of s and y.
yy : numpy.ndarray
2D circular buffer of scalar products between different elements of y.
"""
def __init__(self, max_history_length, x0, gradient):
self.max_history_length = max_history_length
......@@ -134,9 +130,7 @@ class InformationStore(object):
@property
def history_length(self):
"""Returns the number of currently stored updates.
"""
"""Returns the number of currently stored updates."""
return min(self.k, self.max_history_length)
@property
......@@ -173,7 +167,6 @@ class InformationStore(object):
-------
result : numpy.ndarray
Scalar matrix.
"""
m = self.history_length
mmax = self.max_history_length
......@@ -216,7 +209,6 @@ class InformationStore(object):
-------
delta : List
List of the new scalar coefficients (deltas).
"""
m = self.history_length
b_dot_b = self.b_dot_b
......@@ -246,7 +238,6 @@ class InformationStore(object):
Calculates the new position and gradient differences and enters them
into the respective list.
"""
mmax = self.max_history_length
self.s[self.k % mmax] = x - self.last_x
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment