Skip to content
Snippets Groups Projects
Commit ecd9d323 authored by theos's avatar theos
Browse files

Many bugfixes in the minimization classes.

parent a648ea49
No related branches found
No related tags found
No related merge requests found
# -*- coding: utf-8 -*-
import numpy as np
def bare_dot(a, b):
try:
return a.dot(b, bare=True)
except(AttributeError, TypeError):
pass
try:
return a.vdot(b)
except(AttributeError):
pass
return np.vdot(a, b)
import abc
import numpy as np
from keepers import Loggable
def bare_dot(a, b):
try:
return a.dot(b, bare=True)
except(AttributeError, TypeError):
pass
try:
return a.vdot(b)
except(AttributeError):
pass
return np.vdot(a, b)
from ..bare_dot import bare_dot
class LineSearch(object, Loggable):
......@@ -78,8 +64,8 @@ class LineSearch(object, Loggable):
"""
self.xk = xk
self.pk = pk
self.xk = xk.copy()
self.pk = pk.copy()
if f_k is None:
self.f_k = self.f(xk)
......@@ -91,6 +77,8 @@ class LineSearch(object, Loggable):
else:
self.fprime_k = fprime_k
if f_k_minus_1 is not None:
f_k_minus_1 = f_k_minus_1.copy()
self.f_k_minus_1 = f_k_minus_1
def _phi(self, alpha):
......
......@@ -10,7 +10,7 @@ class LineSearchStrongWolfe(LineSearch):
def __init__(self, c1=1e-4, c2=0.9,
max_step_size=50, max_iterations=10,
max_zoom_iterations=100):
max_zoom_iterations=10):
"""
Parameters
......@@ -82,7 +82,6 @@ class LineSearchStrongWolfe(LineSearch):
# start the minimization loop
for i in xrange(max_iterations):
phi_alpha1 = self._phi(alpha1)
if alpha1 == 0:
self.logger.warn("Increment size became 0.")
alpha_star = 0.
......@@ -137,6 +136,10 @@ class LineSearchStrongWolfe(LineSearch):
cubic_delta = 0.2 # cubic
quad_delta = 0.1 # quadratic
cubic_delta = 0.0 # cubic
quad_delta = 0.0 # quadratic
# initialize the most recent versions (j-1) of phi and alpha
alpha_recent = 0
phi_recent = phi_0
......
......@@ -88,13 +88,14 @@ class QuasiNewtonMinimizer(object, Loggable):
# compute the step length, which minimizes f_k along the
# search direction = the gradient
step_length, f_k = self.line_searcher.perform_line_search(
step_length, new_f_k = self.line_searcher.perform_line_search(
xk=x,
pk=descend_direction,
f_k=f_k,
fprime_k=gradient,
f_k_minus_1=f_k_minus_1)
f_k_minus_1 = f_k
f_k = new_f_k
# update x
x += descend_direction*step_length
......@@ -106,7 +107,8 @@ class QuasiNewtonMinimizer(object, Loggable):
(iteration_number, step_length, delta))
if delta == 0:
convergence = self.convergence_level + 2
self.logger.info("Found minimum. Stopping.")
self.logger.info("Found minimum according to line-search. "
"Stopping.")
break
elif delta < self.convergence_tolerance:
convergence += 1
......
......@@ -5,6 +5,8 @@ import numpy as np
from .quasi_newton_minimizer import QuasiNewtonMinimizer
from .line_searching import LineSearchStrongWolfe
from .bare_dot import bare_dot
class VL_BFGS(QuasiNewtonMinimizer):
def __init__(self, line_searcher=LineSearchStrongWolfe(), callback=None,
......@@ -20,6 +22,10 @@ class VL_BFGS(QuasiNewtonMinimizer):
self.max_history_length = max_history_length
def __call__(self, x0, f, fprime, f_args=()):
self._information_store = None
return super(VL_BFGS, self).__call__(x0, f, fprime, f_args=())
def _get_descend_direction(self, x, gradient):
# initialize the information store if it doesn't already exist
try:
......@@ -36,6 +42,9 @@ class VL_BFGS(QuasiNewtonMinimizer):
for i in xrange(1, len(delta)):
descend_direction += delta[i] * b[i]
norm = np.sqrt(bare_dot(descend_direction, descend_direction))
if norm != 1:
descend_direction /= norm
return descend_direction
......@@ -44,8 +53,8 @@ class InformationStore(object):
self.max_history_length = max_history_length
self.s = LimitedList(max_history_length)
self.y = LimitedList(max_history_length)
self.last_x = x0
self.last_gradient = gradient
self.last_x = x0.copy()
self.last_gradient = gradient.copy()
self.k = 0
self._ss_store = {}
......@@ -168,8 +177,8 @@ class InformationStore(object):
new_y = gradient - self.last_gradient
self.y.add(new_y)
self.last_x = x
self.last_gradient = gradient
self.last_x = x.copy()
self.last_gradient = gradient.copy()
#
# k = self.k
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment