Skip to content
Snippets Groups Projects
Commit b92b75fb authored by Martin Reinecke's avatar Martin Reinecke
Browse files

CG improvements

parent 0c8a2610
No related branches found
No related tags found
1 merge request!155Tweak CG
Pipeline #
...@@ -20,6 +20,7 @@ from __future__ import division ...@@ -20,6 +20,7 @@ from __future__ import division
import numpy as np import numpy as np
from keepers import Loggable from keepers import Loggable
from nifty import Field
class ConjugateGradient(Loggable, object): class ConjugateGradient(Loggable, object):
...@@ -75,7 +76,7 @@ class ConjugateGradient(Loggable, object): ...@@ -75,7 +76,7 @@ class ConjugateGradient(Loggable, object):
""" """
def __init__(self, convergence_tolerance=1E-4, convergence_level=3, def __init__(self, convergence_tolerance=1E-8, convergence_level=3,
iteration_limit=None, reset_count=None, iteration_limit=None, reset_count=None,
preconditioner=None, callback=None): preconditioner=None, callback=None):
...@@ -121,13 +122,13 @@ class ConjugateGradient(Loggable, object): ...@@ -121,13 +122,13 @@ class ConjugateGradient(Loggable, object):
r = b - A(x0) r = b - A(x0)
d = self.preconditioner(r) d = self.preconditioner(r)
previous_gamma = r.vdot(d) previous_gamma = (r.vdot(d)).real
if previous_gamma == 0: if previous_gamma == 0:
self.logger.info("The starting guess is already perfect solution " self.logger.info("The starting guess is already perfect solution "
"for the inverse problem.") "for the inverse problem.")
return x0, self.convergence_level+1 return x0, self.convergence_level+1
norm_b = np.sqrt(b.vdot(b)) norm_b = np.sqrt((b.vdot(b)).real)
x = x0 x = x0.copy()
convergence = 0 convergence = 0
iteration_number = 1 iteration_number = 1
self.logger.info("Starting conjugate gradient.") self.logger.info("Starting conjugate gradient.")
...@@ -137,7 +138,7 @@ class ConjugateGradient(Loggable, object): ...@@ -137,7 +138,7 @@ class ConjugateGradient(Loggable, object):
self.callback(x, iteration_number) self.callback(x, iteration_number)
q = A(d) q = A(d)
alpha = previous_gamma/d.vdot(q) alpha = previous_gamma/d.vdot(q).real
if not np.isfinite(alpha): if not np.isfinite(alpha):
self.logger.error("Alpha became infinite! Stopping.") self.logger.error("Alpha became infinite! Stopping.")
...@@ -146,7 +147,7 @@ class ConjugateGradient(Loggable, object): ...@@ -146,7 +147,7 @@ class ConjugateGradient(Loggable, object):
x += d * alpha x += d * alpha
reset = False reset = False
if alpha.real < 0: if alpha < 0:
self.logger.warn("Positive definiteness of A violated!") self.logger.warn("Positive definiteness of A violated!")
reset = True reset = True
if self.reset_count is not None: if self.reset_count is not None:
...@@ -156,24 +157,26 @@ class ConjugateGradient(Loggable, object): ...@@ -156,24 +157,26 @@ class ConjugateGradient(Loggable, object):
r = b - A(x) r = b - A(x)
else: else:
r -= q * alpha r -= q * alpha
#tmp=r.val.get_full_data()
#tmp.imag=0.
#r=Field(r.domain,val=tmp)
s = self.preconditioner(r) s = self.preconditioner(r)
gamma = r.vdot(s) gamma = r.vdot(s).real
if gamma.real < 0: if gamma < 0:
self.logger.warn("Positive definitness of preconditioner " self.logger.warn("Positive definitness of preconditioner "
"violated!") "violated!")
beta = max(0, gamma/previous_gamma) beta = max(0, gamma/previous_gamma)
print "beta:",beta
delta = np.sqrt(gamma)/norm_b delta = np.sqrt(gamma)/norm_b
print "delta:",delta
self.logger.debug("Iteration : %08u alpha = %3.1E " self.logger.debug("Iteration : %08u alpha = %3.1E "
"beta = %3.1E delta = %3.1E" % "beta = %3.1E delta = %3.1E" %
(iteration_number, (iteration_number, alpha, beta, delta))
np.real(alpha),
np.real(beta),
np.real(delta)))
if gamma == 0: if gamma == 0:
convergence = self.convergence_level+1 convergence = self.convergence_level+1
...@@ -196,6 +199,7 @@ class ConjugateGradient(Loggable, object): ...@@ -196,6 +199,7 @@ class ConjugateGradient(Loggable, object):
d = s + d * beta d = s + d * beta
print "iter:",iteration_number
iteration_number += 1 iteration_number += 1
previous_gamma = gamma previous_gamma = gamma
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment