Commit 8fcb0150 authored by Theo Steininger's avatar Theo Steininger

Fixed minimizers and their tests.

parent 668c15f7
Pipeline #14513 passed with stage
in 5 minutes and 53 seconds
......@@ -156,24 +156,20 @@ class DescentMinimizer(Loggable, object):
pk=descend_direction,
f_k_minus_1=f_k_minus_1)
f_k_minus_1 = energy.value
# check if new energy value is bigger than old energy value
if (new_energy.value - energy.value) > 0:
self.logger.info("Line search algorithm was unsuccessful. "
"Stopping.")
new_energy = energy.at( position=energy.position +
np.random.rand()/10*(new_energy.position-energy.position) )
energy = new_energy
convergence = self.convergence_level + 2
self.logger.info("Line search algorithm returned a new energy "
"that was larger than the old one. Stopping.")
break
energy = new_energy
# check convergence
delta = abs(gradient).max() * (step_length/gradient_norm)
self.logger.debug("Iteration : %08u step_length = %3.1E "
"delta = %3.1E" %
(iteration_number, step_length, delta))
self.logger.debug("Iteration:%08u step_length=%3.1E "
"delta=%3.1E energy=%3.1E" %
(iteration_number, step_length, delta,
energy.value))
if delta == 0:
convergence = self.convergence_level + 2
self.logger.info("Found minimum according to line-search. "
......
......@@ -40,8 +40,4 @@ class SteepestDescent(DescentMinimizer):
"""
descend_direction = energy.gradient
norm = descend_direction.norm()
if norm != 1:
return descend_direction / -norm
else:
return descend_direction * -1
return descend_direction * -1
......@@ -25,7 +25,7 @@ from .line_searching import LineSearchStrongWolfe
class VL_BFGS(DescentMinimizer):
def __init__(self, line_searcher=LineSearchStrongWolfe(), callback=None,
convergence_tolerance=1E-4, convergence_level=3,
iteration_limit=None, max_history_length=10):
iteration_limit=None, max_history_length=5):
super(VL_BFGS, self).__init__(
line_searcher=line_searcher,
......@@ -84,9 +84,6 @@ class VL_BFGS(DescentMinimizer):
for i in xrange(1, len(delta)):
descend_direction += delta[i] * b[i]
norm = descend_direction.norm()
if norm != 1:
descend_direction /= norm
return descend_direction
......
# -*- coding: utf-8 -*-
from nifty import Energy
class QuadraticPotential(Energy):
def __init__(self, position, eigenvalues):
super(QuadraticPotential, self).__init__(position)
self.eigenvalues = eigenvalues
def at(self, position):
return self.__class__(position,
eigenvalues=self.eigenvalues)
@property
def value(self):
H = 0.5 * self.position.vdot(
self.eigenvalues(self.position))
return H.real
@property
def gradient(self):
g = self.eigenvalues(self.position)
return g
@property
def curvature(self):
return self.eigenvalues
import unittest
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from nifty import *
from nifty import Field, DiagonalOperator, RGSpace, HPSpace
from nifty import ConjugateGradient
from itertools import product
from test.common import expand
from test.common import generate_spaces
np.random.seed(42)
class ConjugateGradient_Tests(unittest.TestCase):
spaces = generate_spaces()
@expand(product(spaces, [10, 100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
def test_property(self, space, iteration_limit, convergence_tolerance,
convergence_level):
x0 = Field.from_random('normal', domain=space)
A = DiagonalOperator(space, diagonal = 1.)
b = Field(space, val=0.)
minimizer = ConjugateGradient(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level)
(position, convergence) = minimizer(A=A, x0=x0, b=b)
if position.domain[0] != space:
raise TypeError
if type(convergence) != int:
raise TypeError
@expand(product(spaces, [10, 100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
def test_property(self, space, iteration_limit, convergence_tolerance,
convergence_level):
x0 = Field.from_random('normal', domain=space)
test_x = Field(space, val = 1.)
A = DiagonalOperator(space, diagonal = 1.)
b = Field(space, val=1.)
minimizer = ConjugateGradient(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level)
(position, convergence) = minimizer(A=A, x0=x0, b=b)
assert_almost_equal(position.val.get_full_data(),
test_x.val.get_full_data(), decimal=3)
assert_equal(convergence, convergence_level+1)
spaces = [RGSpace([1024, 1024], distances=0.123), HPSpace(32)]
class Test_ConjugateGradient(unittest.TestCase):
def test_interface(self):
iteration_limit = 100
convergence_level = 4
convergence_tolerance = 1E-6
callback = lambda z: z
minimizer = ConjugateGradient(
iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level,
callback=callback)
assert_equal(minimizer.iteration_limit, iteration_limit)
assert_equal(minimizer.convergence_level, convergence_level)
assert_equal(minimizer.convergence_tolerance, convergence_tolerance)
assert(minimizer.callback is callback)
@expand([[space] for space in spaces])
def test_minimization(self, space):
np.random.seed(42)
starting_point = Field.from_random('normal', domain=space)*10
covariance_diagonal = Field.from_random('uniform', domain=space) + 0.5
covariance = DiagonalOperator(space, diagonal=covariance_diagonal)
required_result = Field(space, val=1.)
minimizer = ConjugateGradient()
(position, convergence) = minimizer(A=covariance, x0=starting_point,
b=required_result)
assert_almost_equal(position.val.get_full_data(),
1./covariance_diagonal.val.get_full_data(),
decimal=3)
import unittest
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from nifty import Field, DiagonalOperator, RGSpace, HPSpace
from nifty import SteepestDescent, RelaxedNewton, VL_BFGS
from itertools import product
from test.common import expand
from quadratic_potential import QuadraticPotential
from nifty import logger
minimizers = [SteepestDescent, RelaxedNewton, VL_BFGS]
spaces = [RGSpace([1024, 1024], distances=0.123), HPSpace(32)]
class Test_DescentMinimizers(unittest.TestCase):
@expand([[minimizer] for minimizer in minimizers])
def test_interface(self, minimizer):
iteration_limit = 100
convergence_level = 4
convergence_tolerance = 1E-6
callback = lambda z: z
minimizer = minimizer(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level,
callback=callback)
assert_equal(minimizer.iteration_limit, iteration_limit)
assert_equal(minimizer.convergence_level, convergence_level)
assert_equal(minimizer.convergence_tolerance, convergence_tolerance)
assert(minimizer.callback is callback)
@expand(product(minimizers, spaces))
def test_minimization(self, minimizer_class, space):
np.random.seed(42)
starting_point = Field.from_random('normal', domain=space)*10
covariance_diagonal = Field.from_random('uniform', domain=space) + 0.5
covariance = DiagonalOperator(space, diagonal=covariance_diagonal)
energy = QuadraticPotential(position=starting_point,
eigenvalues=covariance)
minimizer = minimizer_class(iteration_limit=30)
(energy, convergence) = minimizer(energy)
assert_almost_equal(energy.value, 0, decimal=5)
assert_almost_equal(energy.position.val.get_full_data(), 0., decimal=5)
import unittest
from numpy.testing import assert_equal, assert_almost_equal
from nifty import *
from itertools import product
from test.common import expand
from test.common import generate_spaces
np.random.seed(42)
class QuadraticPot(Energy):
def __init__(self, position, N):
super(QuadraticPot, self).__init__(position)
self.N = N
def at(self, position):
return self.__class__(position, N = self.N)
@property
def value(self):
H = 0.5 *self.position.dot(self.N.inverse_times(self.position))
return H.real
@property
def gradient(self):
g = self.N.inverse_times(self.position)
return_g = g.copy_empty(dtype=np.float)
return_g.val = g.val.real
return return_g
@property
def curvature(self):
return self.N
class RelaxedNewton_Tests(unittest.TestCase):
spaces = generate_spaces()
@expand(product(spaces, [10, 100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
def test_property(self, space, iteration_limit, convergence_tolerance,
convergence_level):
x = Field.from_random('normal', domain=space)
N = DiagonalOperator(space, diagonal = 1.)
energy = QuadraticPot(position=x , N=N)
minimizer = RelaxedNewton(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level)
(energy, convergence) = minimizer(energy)
if energy.position.domain[0] != space:
raise TypeError
if type(convergence) != int:
raise TypeError
@expand(product(spaces, [10, 100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
def test_property(self, space, iteration_limit, convergence_tolerance,
convergence_level):
x = Field.from_random('normal', domain=space)
test_x = Field(space, val = 0.)
N = DiagonalOperator(space, diagonal = 1.)
energy = QuadraticPot(position=x , N=N)
minimizer = RelaxedNewton(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level)
(energy, convergence) = minimizer(energy)
assert_almost_equal(energy.value, 0, decimal=3)
assert_almost_equal(energy.position.val.get_full_data(),
test_x.val.get_full_data(), decimal=3)
assert_equal(convergence, convergence_level+2)
import unittest
from numpy.testing import assert_equal, assert_almost_equal
from nifty import *
from itertools import product
from test.common import expand
from test.common import generate_spaces
np.random.seed(42)
class QuadraticPot(Energy):
def __init__(self, position, N):
super(QuadraticPot, self).__init__(position)
self.N = N
def at(self, position):
return self.__class__(position, N = self.N)
@property
def value(self):
H = 0.5 *self.position.dot(self.N.inverse_times(self.position))
return H.real
@property
def gradient(self):
g = self.N.inverse_times(self.position)
return_g = g.copy_empty(dtype=np.float)
return_g.val = g.val.real
return return_g
@property
def curvature(self):
return self.N
class SteepestDescent_Tests(unittest.TestCase):
spaces = generate_spaces()
@expand(product(spaces, [10, 100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
def test_property(self, space, iteration_limit, convergence_tolerance,
convergence_level):
x = Field.from_random('normal', domain=space)
N = DiagonalOperator(space, diagonal = 1.)
energy = QuadraticPot(position=x , N=N)
minimizer = SteepestDescent(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level)
(energy, convergence) = minimizer(energy)
if energy.position.domain[0] != space:
raise TypeError
if type(convergence) != int:
raise TypeError
@expand(product(spaces, [10, 100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
def test_property(self, space, iteration_limit, convergence_tolerance,
convergence_level):
x = Field.from_random('normal', domain=space)
test_x = Field(space, val = 0.)
N = DiagonalOperator(space, diagonal = 1.)
energy = QuadraticPot(position=x , N=N)
minimizer = SteepestDescent(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level)
(energy, convergence) = minimizer(energy)
assert_almost_equal(energy.value, 0, decimal=3)
assert_almost_equal(energy.position.val.get_full_data(),
test_x.val.get_full_data(), decimal=3)
assert_equal(convergence, convergence_level+2)
import unittest
from numpy.testing import assert_equal, assert_almost_equal
from nifty import *
from itertools import product
from test.common import expand
from test.common import generate_spaces
np.random.seed(42)
class QuadraticPot(Energy):
def __init__(self, position, N):
super(QuadraticPot, self).__init__(position)
self.N = N
def at(self, position):
return self.__class__(position, N = self.N)
@property
def value(self):
H = 0.5 *self.position.dot(self.N.inverse_times(self.position))
return H.real
@property
def gradient(self):
g = self.N.inverse_times(self.position)
return_g = g.copy_empty(dtype=np.float)
return_g.val = g.val.real
return return_g
@property
def curvature(self):
return self.N
class VL_BFGS_Tests(unittest.TestCase):
spaces = generate_spaces()
@expand(product(spaces, [10, 100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
def test_property(self, space, iteration_limit, convergence_tolerance,
convergence_level):
x = Field.from_random('normal', domain=space)
N = DiagonalOperator(space, diagonal = 1.)
energy = QuadraticPot(position=x , N=N)
minimizer = VL_BFGS(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level)
(energy, convergence) = minimizer(energy)
if energy.position.domain[0] != space:
raise TypeError
if type(convergence) != int:
raise TypeError
@expand(product(spaces, [10, 100, 1000], [1E-3, 1E-4, 1E-5], [2, 3, 4] ))
def test_property(self, space, iteration_limit, convergence_tolerance,
convergence_level):
x = Field.from_random('normal', domain=space)
test_x = Field(space, val = 0.)
N = DiagonalOperator(space, diagonal = 1.)
energy = QuadraticPot(position=x , N=N)
minimizer = VL_BFGS(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level)
(energy, convergence) = minimizer(energy)
assert_almost_equal(energy.value, 0, decimal=3)
assert_almost_equal(energy.position.val.get_full_data(),
test_x.val.get_full_data(), decimal=3)
assert_equal(convergence, convergence_level+2)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment