Commit 068e855d authored by Reimar H Leike's avatar Reimar H Leike

added one more choice for beta heuristics, found a bug where beta has to be...

added one more choice for beta heuristics, found a bug where beta has to be clipped by zero according to the reference and added some more comments to the code
parent a10d47fd
Pipeline #25480 passed with stage
in 5 minutes and 28 seconds
......@@ -22,11 +22,12 @@ from .line_search_strong_wolfe import LineSearchStrongWolfe
class NonlinearCG(Minimizer):
#RL FIXME: referenced equations in docstring seem broken for the version I found.
""" Nonlinear Conjugate Gradient scheme according to Polak-Ribiere.
Algorithm 5.4 from Nocedal & Wright.
Eq. (5.41a) has been replaced by eq. (5.49)
Eq. (5.41a) has been replaced by eq. (5.49)
Parameters
----------
controller : IterationController
......@@ -41,8 +42,10 @@ class NonlinearCG(Minimizer):
"""
def __init__(self, controller, line_searcher=LineSearchStrongWolfe(c2=0.1), beta_heuristics = 'Polak-Ribiere'):
if (beta_heuristics != 'Polak-Ribiere') and (beta_heuristics != 'Polak-Ribiere'):
raise ValueError("beta heuristics must be either 'Polak-Ribiere' or 'Hestenes-Stiefel'")
valid_beta_heuristics = ['Polak-Ribiere', 'Fletcher-Reeves', 'Polak-Ribiere']
if not (beta_heuristics in valid_beta_heuristics):
raise ValueError("beta heuristics must be either 'Polak-Ribiere', "\
+"'Fletcher-Reeves', or 'Hestenes-Stiefel'")
self._beta_heuristic = beta_heuristics
self._controller = controller
self._line_searcher = line_searcher
......@@ -66,8 +69,14 @@ class NonlinearCG(Minimizer):
if status != controller.CONTINUE:
return energy, status
grad_new = energy.gradient
#print(grad_old.val, grad_new.val, p.val, energy.position.val)
if self._beta_heuristic == 'Hestenes-Stiefel':
beta = grad_new.vdot(grad_new-grad_old)/(grad_new-grad_old).vdot(p).real
#Eq. (5.45) in Nocedal & Wright.
beta = max(0.0,(grad_new.vdot(grad_new-grad_old)/(grad_new-grad_old).vdot(p)).real)
elif self._beta_heuristic == 'Polak Ribiere':
#Eq. (5.43) in Nocedal & Wright. (with (5.44) additionally)
beta = max(0.0,(grad_new.vdot(grad_new-grad_old)/(grad_old.vdot(grad_old))).real)
else:
beta = grad_new.vdot(grad_new-grad_old)/(grad_old.vdot(grad_old)).real
#Eq. (5.40a) in Nocedal & Wright.
beta = (grad_new.vdot(grad_new)/(grad_old.vdot(grad_old))).real
p = beta*p - grad_new
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment