Commit f64657e5 authored by Theo Steininger's avatar Theo Steininger

Extended logging.

parent f0888388
Pipeline #17415 passed with stages
in 25 minutes and 44 seconds
......@@ -120,9 +120,11 @@ class CriticalPowerEnergy(Energy):
@property
def w(self):
if self._w is None:
self.logger.info("Initializing w")
w = Field(domain=self.position.domain, val=0., dtype=self.m.dtype)
if self.D is not None:
for i in range(self.samples):
self.logger.info("Drawing sample %i" % i)
posterior_sample = generate_posterior_sample(
self.m, self.D)
projected_sample = posterior_sample.power_analyze(
......
......@@ -132,6 +132,9 @@ class ConjugateGradient(Loggable, object):
iteration_number = 1
self.logger.info("Starting conjugate gradient.")
beta = np.inf
delta = np.inf
while True:
if self.callback is not None:
self.callback(x, iteration_number)
......@@ -140,7 +143,10 @@ class ConjugateGradient(Loggable, object):
alpha = previous_gamma/d.vdot(q).real
if not np.isfinite(alpha):
self.logger.error("Alpha became infinite! Stopping.")
self.logger.error(
"Alpha became infinite! Stopping. Iteration : %08u "
"alpha = %3.1E beta = %3.1E delta = %3.1E" %
(iteration_number, alpha, beta, delta))
return x0, 0
x += d * alpha
......@@ -174,21 +180,30 @@ class ConjugateGradient(Loggable, object):
if gamma == 0:
convergence = self.convergence_level+1
self.logger.info("Reached infinite convergence.")
self.logger.info(
"Reached infinite convergence. Iteration : %08u "
"alpha = %3.1E beta = %3.1E delta = %3.1E" %
(iteration_number, alpha, beta, delta))
break
elif abs(delta) < self.convergence_tolerance:
convergence += 1
self.logger.info("Updated convergence level to: %u" %
convergence)
if convergence == self.convergence_level:
self.logger.info("Reached target convergence level.")
self.logger.info(
"Reached target convergence level. Iteration : %08u "
"alpha = %3.1E beta = %3.1E delta = %3.1E" %
(iteration_number, alpha, beta, delta))
break
else:
convergence = max(0, convergence-1)
if self.iteration_limit is not None:
if iteration_number == self.iteration_limit:
self.logger.warn("Reached iteration limit. Stopping.")
self.logger.info(
"Reached iteration limit. Iteration : %08u "
"alpha = %3.1E beta = %3.1E delta = %3.1E" %
(iteration_number, alpha, beta, delta))
break
d = s + d * beta
......
......@@ -127,10 +127,13 @@ class LineSearchStrongWolfe(LineSearch):
alpha1 = 1.0/pk.norm()
# start the minimization loop
for i in range(self.max_iterations):
iteration_number = 0
while iteration_number < self.max_iterations:
iteration_number += 1
if alpha1 == 0:
self.logger.warn("Increment size became 0.")
return le_0.energy
result_energy = le_0.energy
break
le_alpha1 = le_0.at(alpha1)
phi_alpha1 = le_alpha1.value
......@@ -140,31 +143,37 @@ class LineSearchStrongWolfe(LineSearch):
le_star = self._zoom(alpha0, alpha1, phi_0, phiprime_0,
phi_alpha0, phiprime_alpha0, phi_alpha1,
le_0)
return le_star.energy
result_energy = le_star.energy
break
phiprime_alpha1 = le_alpha1.directional_derivative
if abs(phiprime_alpha1) <= -self.c2*phiprime_0:
return le_alpha1.energy
result_energy = le_alpha1.energy
break
if phiprime_alpha1 >= 0:
le_star = self._zoom(alpha1, alpha0, phi_0, phiprime_0,
phi_alpha1, phiprime_alpha1, phi_alpha0,
le_0)
return le_star.energy
result_energy = le_star.energy
break
# update alphas
alpha0, alpha1 = alpha1, min(2*alpha1, self.max_step_size)
if alpha1 == self.max_step_size:
print ("reached max step size, bailing out")
self.logger.info("Reached max step size, bailing out")
return le_alpha1.energy
phi_alpha0 = phi_alpha1
phiprime_alpha0 = phiprime_alpha1
else:
# max_iterations was reached
self.logger.error("The line search algorithm did not converge.")
return le_alpha1.energy
if iteration_number > 1:
self.logger.debug("Finished line-search after %08u steps" %
iteration_number)
return result_energy
def _zoom(self, alpha_lo, alpha_hi, phi_0, phiprime_0,
phi_lo, phiprime_lo, phi_hi, le_0):
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment