Commit 4a36f41c authored by Theo Steininger's avatar Theo Steininger

Merge branch 'master' into fix_hermitianizer

parents d3181f8d 50d6a969
Pipeline #14516 failed with stage
in 6 minutes and 8 seconds
......@@ -156,13 +156,20 @@ class DescentMinimizer(Loggable, object):
pk=descend_direction,
f_k_minus_1=f_k_minus_1)
f_k_minus_1 = energy.value
energy = new_energy
# check if new energy value is bigger than old energy value
if (new_energy.value - energy.value) > 0:
self.logger.info("Line search algorithm returned a new energy "
"that was larger than the old one. Stopping.")
break
energy = new_energy
# check convergence
delta = abs(gradient).max() * (step_length/gradient_norm)
self.logger.debug("Iteration : %08u step_length = %3.1E "
"delta = %3.1E" %
(iteration_number, step_length, delta))
self.logger.debug("Iteration:%08u step_length=%3.1E "
"delta=%3.1E energy=%3.1E" %
(iteration_number, step_length, delta,
energy.value))
if delta == 0:
convergence = self.convergence_level + 2
self.logger.info("Found minimum according to line-search. "
......
......@@ -40,8 +40,4 @@ class SteepestDescent(DescentMinimizer):
"""
descend_direction = energy.gradient
norm = descend_direction.norm()
if norm != 1:
return descend_direction / -norm
else:
return descend_direction * -1
return descend_direction * -1
......@@ -25,7 +25,7 @@ from .line_searching import LineSearchStrongWolfe
class VL_BFGS(DescentMinimizer):
def __init__(self, line_searcher=LineSearchStrongWolfe(), callback=None,
convergence_tolerance=1E-4, convergence_level=3,
iteration_limit=None, max_history_length=10):
iteration_limit=None, max_history_length=5):
super(VL_BFGS, self).__init__(
line_searcher=line_searcher,
......@@ -84,9 +84,6 @@ class VL_BFGS(DescentMinimizer):
for i in xrange(1, len(delta)):
descend_direction += delta[i] * b[i]
norm = descend_direction.norm()
if norm != 1:
descend_direction /= norm
return descend_direction
......
......@@ -21,7 +21,6 @@ import numpy as np
from d2o import distributed_data_object,\
STRATEGIES as DISTRIBUTION_STRATEGIES
from nifty.basic_arithmetics import log as nifty_log
from nifty.config import nifty_configuration as gc
from nifty.field import Field
from nifty.operators.endomorphic_operator import EndomorphicOperator
......
......@@ -73,7 +73,7 @@ class LinearOperator(Loggable, object):
__metaclass__ = NiftyMeta
def __init__(self, default_spaces=None):
self.default_spaces = default_spaces
self._default_spaces = default_spaces
@staticmethod
def _parse_domain(domain):
......@@ -119,10 +119,6 @@ class LinearOperator(Loggable, object):
def default_spaces(self):
return self._default_spaces
@default_spaces.setter
def default_spaces(self, spaces):
self._default_spaces = utilities.cast_axis_to_tuple(spaces)
def __call__(self, *args, **kwargs):
return self.times(*args, **kwargs)
......
......@@ -163,3 +163,9 @@ class ProjectionOperator(EndomorphicOperator):
@property
def self_adjoint(self):
return True
# ---Added properties and methods---
@property
def projection_field(self):
return self._projection_field
......@@ -135,8 +135,8 @@ class SmoothingOperator(EndomorphicOperator):
# "space as input domain.")
self._domain = self._parse_domain(domain)
self.sigma = sigma
self.log_distances = log_distances
self._sigma = sigma
self._log_distances = log_distances
def _inverse_times(self, x, spaces):
if self.sigma == 0:
......@@ -183,18 +183,10 @@ class SmoothingOperator(EndomorphicOperator):
def sigma(self):
return self._sigma
@sigma.setter
def sigma(self, sigma):
self._sigma = np.float(sigma)
@property
def log_distances(self):
return self._log_distances
@log_distances.setter
def log_distances(self, log_distances):
self._log_distances = bool(log_distances)
@abc.abstractmethod
def _smooth(self, x, spaces, inverse):
raise NotImplementedError
# -*- coding: utf-8 -*-
from nifty import Energy
class QuadraticPotential(Energy):
def __init__(self, position, eigenvalues):
super(QuadraticPotential, self).__init__(position)
self.eigenvalues = eigenvalues
def at(self, position):
return self.__class__(position,
eigenvalues=self.eigenvalues)
@property
def value(self):
H = 0.5 * self.position.vdot(
self.eigenvalues(self.position))
return H.real
@property
def gradient(self):
g = self.eigenvalues(self.position)
return g
@property
def curvature(self):
return self.eigenvalues
import unittest
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from nifty import Field, DiagonalOperator, RGSpace, HPSpace
from nifty import ConjugateGradient
from test.common import expand
spaces = [RGSpace([1024, 1024], distances=0.123), HPSpace(32)]
class Test_ConjugateGradient(unittest.TestCase):
def test_interface(self):
iteration_limit = 100
convergence_level = 4
convergence_tolerance = 1E-6
callback = lambda z: z
minimizer = ConjugateGradient(
iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level,
callback=callback)
assert_equal(minimizer.iteration_limit, iteration_limit)
assert_equal(minimizer.convergence_level, convergence_level)
assert_equal(minimizer.convergence_tolerance, convergence_tolerance)
assert(minimizer.callback is callback)
@expand([[space] for space in spaces])
def test_minimization(self, space):
np.random.seed(42)
starting_point = Field.from_random('normal', domain=space)*10
covariance_diagonal = Field.from_random('uniform', domain=space) + 0.5
covariance = DiagonalOperator(space, diagonal=covariance_diagonal)
required_result = Field(space, val=1.)
minimizer = ConjugateGradient()
(position, convergence) = minimizer(A=covariance, x0=starting_point,
b=required_result)
assert_almost_equal(position.val.get_full_data(),
1./covariance_diagonal.val.get_full_data(),
decimal=3)
import unittest
import numpy as np
from numpy.testing import assert_equal, assert_almost_equal
from nifty import Field, DiagonalOperator, RGSpace, HPSpace
from nifty import SteepestDescent, RelaxedNewton, VL_BFGS
from itertools import product
from test.common import expand
from quadratic_potential import QuadraticPotential
from nifty import logger
minimizers = [SteepestDescent, RelaxedNewton, VL_BFGS]
spaces = [RGSpace([1024, 1024], distances=0.123), HPSpace(32)]
class Test_DescentMinimizers(unittest.TestCase):
@expand([[minimizer] for minimizer in minimizers])
def test_interface(self, minimizer):
iteration_limit = 100
convergence_level = 4
convergence_tolerance = 1E-6
callback = lambda z: z
minimizer = minimizer(iteration_limit=iteration_limit,
convergence_tolerance=convergence_tolerance,
convergence_level=convergence_level,
callback=callback)
assert_equal(minimizer.iteration_limit, iteration_limit)
assert_equal(minimizer.convergence_level, convergence_level)
assert_equal(minimizer.convergence_tolerance, convergence_tolerance)
assert(minimizer.callback is callback)
@expand(product(minimizers, spaces))
def test_minimization(self, minimizer_class, space):
np.random.seed(42)
starting_point = Field.from_random('normal', domain=space)*10
covariance_diagonal = Field.from_random('uniform', domain=space) + 0.5
covariance = DiagonalOperator(space, diagonal=covariance_diagonal)
energy = QuadraticPotential(position=starting_point,
eigenvalues=covariance)
minimizer = minimizer_class(iteration_limit=30)
(energy, convergence) = minimizer(energy)
assert_almost_equal(energy.value, 0, decimal=5)
assert_almost_equal(energy.position.val.get_full_data(), 0., decimal=5)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment