Commit 74237b2f authored by Marco Selig's avatar Marco Selig

several minor fixes; version updated.

parent 2395b49a
This diff is collapsed.
...@@ -401,7 +401,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None ...@@ -401,7 +401,7 @@ def infer_power(m,domain=None,Sk=None,D=None,pindex=None,pundex=None,kindex=None
derived, and the implications of a certain choise of the perception derived, and the implications of a certain choise of the perception
tuple (delta,epsilon) are discussed. tuple (delta,epsilon) are discussed.
The further incorporation of a smoothness prior as detailed in [#]_, The further incorporation of a smoothness prior as detailed in [#]_,
where the underlying formula(s), Eq.(27), of this implementation are where the underlying formula(s), Eq.(26), of this implementation are
derived and discussed in terms of their applicability. derived and discussed in terms of their applicability.
References References
......
...@@ -185,13 +185,14 @@ class invertible_operator(operator): ...@@ -185,13 +185,14 @@ class invertible_operator(operator):
""" """
x_,convergence = conjugate_gradient(self.inverse_times,x,W=W,spam=spam,reset=reset,note=note)(x0=x0,tol=tol,clevel=clevel,limii=limii) x_,convergence = conjugate_gradient(self.inverse_times,x,W=W,spam=spam,reset=reset,note=note)(x0=x0,tol=tol,clevel=clevel,limii=limii)
if(not self.imp): ## continiuos domain/target
x_.weight(power=-1,overwrite=True)
## check convergence ## check convergence
if(not convergence): if(not convergence):
if(not force): if(not force)or(x_ is None):
return None return None
about.warnings.cprint("WARNING: conjugate gradient failed.") about.warnings.cprint("WARNING: conjugate gradient failed.")
## weight if ...
if(not self.imp): ## continiuos domain/target
x_.weight(power=-1,overwrite=True)
return x_ return x_
def _inverse_multiply(self,x,force=False,W=None,spam=None,reset=None,note=False,x0=None,tol=1E-4,clevel=1,limii=None,**kwargs): def _inverse_multiply(self,x,force=False,W=None,spam=None,reset=None,note=False,x0=None,tol=1E-4,clevel=1,limii=None,**kwargs):
...@@ -242,13 +243,14 @@ class invertible_operator(operator): ...@@ -242,13 +243,14 @@ class invertible_operator(operator):
""" """
x_,convergence = conjugate_gradient(self.times,x,W=W,spam=spam,reset=reset,note=note)(x0=x0,tol=tol,clevel=clevel,limii=limii) x_,convergence = conjugate_gradient(self.times,x,W=W,spam=spam,reset=reset,note=note)(x0=x0,tol=tol,clevel=clevel,limii=limii)
if(not self.imp): ## continiuos domain/target
x_.weight(power=1,overwrite=True)
## check convergence ## check convergence
if(not convergence): if(not convergence):
if(not force): if(not force)or(x_ is None):
return None return None
about.warnings.cprint("WARNING: conjugate gradient failed.") about.warnings.cprint("WARNING: conjugate gradient failed.")
## weight if ...
if(not self.imp): ## continiuos domain/target
x_.weight(power=1,overwrite=True)
return x_ return x_
##----------------------------------------------------------------------------- ##-----------------------------------------------------------------------------
...@@ -578,6 +580,10 @@ class conjugate_gradient(object): ...@@ -578,6 +580,10 @@ class conjugate_gradient(object):
note : bool, *optional* note : bool, *optional*
Indicates whether notes are printed or not (default: False). Indicates whether notes are printed or not (default: False).
See Also
--------
scipy.sparse.linalg.cg
Notes Notes
----- -----
After initialization by `__init__`, the minimizer is started by calling After initialization by `__init__`, the minimizer is started by calling
...@@ -585,10 +591,11 @@ class conjugate_gradient(object): ...@@ -585,10 +591,11 @@ class conjugate_gradient(object):
if enabled, will state the iteration number, current step widths if enabled, will state the iteration number, current step widths
`alpha` and `beta`, the current relative residual `delta` that is `alpha` and `beta`, the current relative residual `delta` that is
compared to the tolerance, and the convergence level if changed. compared to the tolerance, and the convergence level if changed.
The minimizer will exit in two states: QUIT if the maximum number of The minimizer will exit in three states: DEAD if alpha becomes
iterations is reached, or DONE if convergence is achieved. Returned infinite, QUIT if the maximum number of iterations is reached, or DONE
will be the latest `x` and the latest convergence level, which can if convergence is achieved. Returned will be the latest `x` and the
evaluate ``True`` for all exit states. latest convergence level, which can evaluate ``True`` for the exit
states QUIT and DONE.
References References
---------- ----------
...@@ -714,12 +721,17 @@ class conjugate_gradient(object): ...@@ -714,12 +721,17 @@ class conjugate_gradient(object):
def _calc_without(self,tol=1E-4,clevel=1,limii=None): ## > runs cg without preconditioner def _calc_without(self,tol=1E-4,clevel=1,limii=None): ## > runs cg without preconditioner
clevel = int(clevel)
if(limii is None): if(limii is None):
limii = 10*self.b.domain.dim(split=False) limii = 10*self.b.domain.dim(split=False)
else:
limii = int(limii)
r = self.b-self.A(self.x) r = self.b-self.A(self.x)
d = field(self.b.domain,val=np.copy(r.val),target=self.b.target) d = field(self.b.domain,val=np.copy(r.val),target=self.b.target)
gamma = r.dot(d) gamma = r.dot(d)
if(gamma==0):
return self.x,clevel+1
delta_ = np.absolute(gamma)**(-0.5) delta_ = np.absolute(gamma)**(-0.5)
convergence = 0 convergence = 0
...@@ -727,6 +739,9 @@ class conjugate_gradient(object): ...@@ -727,6 +739,9 @@ class conjugate_gradient(object):
while(True): while(True):
q = self.A(d) q = self.A(d)
alpha = gamma/d.dot(q) ## positive definite alpha = gamma/d.dot(q) ## positive definite
if(not np.isfinite(alpha)):
self.note.cprint("\niteration : %08u alpha = NAN\n... dead."%ii)
return self.x,0
self.x += alpha*d self.x += alpha*d
if(ii%self.reset==0)or(np.signbit(np.real(alpha))): if(ii%self.reset==0)or(np.signbit(np.real(alpha))):
r = self.b-self.A(self.x) r = self.b-self.A(self.x)
...@@ -769,8 +784,11 @@ class conjugate_gradient(object): ...@@ -769,8 +784,11 @@ class conjugate_gradient(object):
def _calc_with(self,tol=1E-4,clevel=1,limii=None): ## > runs cg with preconditioner def _calc_with(self,tol=1E-4,clevel=1,limii=None): ## > runs cg with preconditioner
clevel = int(clevel)
if(limii is None): if(limii is None):
limii = 10*self.b.domain.dim(split=False) limii = 10*self.b.domain.dim(split=False)
else:
limii = int(limii)
r = self.b-self.A(self.x) r = self.b-self.A(self.x)
d = self.W(r) d = self.W(r)
...@@ -782,6 +800,9 @@ class conjugate_gradient(object): ...@@ -782,6 +800,9 @@ class conjugate_gradient(object):
while(True): while(True):
q = self.A(d) q = self.A(d)
alpha = gamma/d.dot(q) ## positive definite alpha = gamma/d.dot(q) ## positive definite
if(not np.isfinite(alpha)):
self.note.cprint("\niteration : %08u alpha = NAN\n... dead."%ii)
return self.x,0
self.x += alpha*d ## update self.x += alpha*d ## update
if(ii%self.reset==0)or(np.signbit(np.real(alpha))): if(ii%self.reset==0)or(np.signbit(np.real(alpha))):
r = self.b-self.A(self.x) r = self.b-self.A(self.x)
...@@ -861,6 +882,11 @@ class steepest_descent(object): ...@@ -861,6 +882,11 @@ class steepest_descent(object):
note : bool, *optional* note : bool, *optional*
Indicates whether notes are printed or not (default: False). Indicates whether notes are printed or not (default: False).
See Also
--------
scipy.optimize.fmin_cg, scipy.optimize.fmin_ncg,
scipy.optimize.fmin_l_bfgs_b
Notes Notes
----- -----
After initialization by `__init__`, the minimizer is started by calling After initialization by `__init__`, the minimizer is started by calling
...@@ -982,6 +1008,10 @@ class steepest_descent(object): ...@@ -982,6 +1008,10 @@ class steepest_descent(object):
if(not isinstance(x0,field)): if(not isinstance(x0,field)):
raise TypeError(about._errors.cstring("ERROR: invalid input.")) raise TypeError(about._errors.cstring("ERROR: invalid input."))
self.x = x0 self.x = x0
clevel = int(clevel)
limii = int(limii)
E,g = self.eggs(self.x) ## energy and gradient E,g = self.eggs(self.x) ## energy and gradient
norm = g.norm() ## gradient norm norm = g.norm() ## gradient norm
......
...@@ -23,7 +23,7 @@ from distutils.core import setup ...@@ -23,7 +23,7 @@ from distutils.core import setup
import os import os
setup(name="nifty", setup(name="nifty",
version="0.5.0", version="0.5.5",
description="Numerical Information Field Theory", description="Numerical Information Field Theory",
author="Marco Selig", author="Marco Selig",
author_email="mselig@mpa-garching.mpg.de", author_email="mselig@mpa-garching.mpg.de",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment