Commit ed80f6d7 authored by Marco Selig's avatar Marco Selig

SD glitch fixed.

parent 7d9aaa5e
......@@ -167,8 +167,9 @@ class problem(object):
power = numerator / denominator
## check convergence
dtau = np.log(power / self.S.get_power())
dtau = log(power / self.S.get_power(), base=self.S.get_power())
iterating = (np.max(np.abs(dtau)) > 2E-2)
print max(np.abs(dtau))
## update signal covariance
self.S.set_power(power, bare=False) ## auto-updates D
......
......@@ -514,7 +514,7 @@ class _about(object): ## nifty support class for global settings
"""
## version
self._version = "0.7.6"
self._version = "0.7.7"
## switches and notifications
self._errors = notification(default=True,ccode=notification._code)
......
......@@ -1035,11 +1035,14 @@ class steepest_descent(object):
raise TypeError(about._errors.cstring("ERROR: invalid input."))
self.x = x0
clevel = int(clevel)
clevel = max(1,int(clevel))
limii = int(limii)
E,g = self.eggs(self.x) ## energy and gradient
norm = g.norm() ## gradient norm
if(norm==0):
self.note.cprint("\niteration : 00000000 alpha = 0.0E+00 delta = 0.0E+00\n... done.")
return self.x,clevel+2
convergence = 0
ii = 1
......@@ -1060,6 +1063,10 @@ class steepest_descent(object):
if(ii==limii):
self.note.cprint("\n... quit.")
break
elif(delta==0):
convergence = clevel+2
self.note.cprint(" convergence level : %u\n... done."%convergence)
break
elif(delta<tol):
convergence += 1
self.note.cflush(" convergence level : %u"%convergence)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment